diff options
373 files changed, 2361 insertions, 1863 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 605dedf9679..b3599743093 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c | |||
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void); | |||
60 | static int sharpsl_fatal_check(void); | 60 | static int sharpsl_fatal_check(void); |
61 | static int sharpsl_average_value(int ad); | 61 | static int sharpsl_average_value(int ad); |
62 | static void sharpsl_average_clear(void); | 62 | static void sharpsl_average_clear(void); |
63 | static void sharpsl_charge_toggle(void *private_); | 63 | static void sharpsl_charge_toggle(struct work_struct *private_); |
64 | static void sharpsl_battery_thread(void *private_); | 64 | static void sharpsl_battery_thread(struct work_struct *private_); |
65 | 65 | ||
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Variables | 68 | * Variables |
69 | */ | 69 | */ |
70 | struct sharpsl_pm_status sharpsl_pm; | 70 | struct sharpsl_pm_status sharpsl_pm; |
71 | DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); | 71 | DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); |
72 | DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); | 72 | DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); |
73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); | 73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); |
74 | 74 | ||
75 | 75 | ||
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void) | |||
116 | EXPORT_SYMBOL(sharpsl_battery_kick); | 116 | EXPORT_SYMBOL(sharpsl_battery_kick); |
117 | 117 | ||
118 | 118 | ||
119 | static void sharpsl_battery_thread(void *private_) | 119 | static void sharpsl_battery_thread(struct work_struct *private_) |
120 | { | 120 | { |
121 | int voltage, percent, apm_status, i = 0; | 121 | int voltage, percent, apm_status, i = 0; |
122 | 122 | ||
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_) | |||
128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ | 128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ |
129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) | 129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) |
130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) | 130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) |
131 | schedule_work(&toggle_charger); | 131 | schedule_delayed_work(&toggle_charger, 0); |
132 | 132 | ||
133 | while(1) { | 133 | while(1) { |
134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); | 134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); |
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void) | |||
212 | sharpsl_pm_led(SHARPSL_LED_OFF); | 212 | sharpsl_pm_led(SHARPSL_LED_OFF); |
213 | sharpsl_pm.charge_mode = CHRG_OFF; | 213 | sharpsl_pm.charge_mode = CHRG_OFF; |
214 | 214 | ||
215 | schedule_work(&sharpsl_bat); | 215 | schedule_delayed_work(&sharpsl_bat, 0); |
216 | } | 216 | } |
217 | 217 | ||
218 | static void sharpsl_charge_error(void) | 218 | static void sharpsl_charge_error(void) |
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void) | |||
222 | sharpsl_pm.charge_mode = CHRG_ERROR; | 222 | sharpsl_pm.charge_mode = CHRG_ERROR; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void sharpsl_charge_toggle(void *private_) | 225 | static void sharpsl_charge_toggle(struct work_struct *private_) |
226 | { | 226 | { |
227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); | 227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); |
228 | 228 | ||
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data) | |||
254 | else if (sharpsl_pm.charge_mode == CHRG_ON) | 254 | else if (sharpsl_pm.charge_mode == CHRG_ON) |
255 | sharpsl_charge_off(); | 255 | sharpsl_charge_off(); |
256 | 256 | ||
257 | schedule_work(&sharpsl_bat); | 257 | schedule_delayed_work(&sharpsl_bat, 0); |
258 | } | 258 | } |
259 | 259 | ||
260 | 260 | ||
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data) | |||
279 | sharpsl_charge_off(); | 279 | sharpsl_charge_off(); |
280 | } else if (sharpsl_pm.full_count < 2) { | 280 | } else if (sharpsl_pm.full_count < 2) { |
281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); | 281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); |
282 | schedule_work(&toggle_charger); | 282 | schedule_delayed_work(&toggle_charger, 0); |
283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { | 283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { |
284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); | 284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); |
285 | schedule_work(&toggle_charger); | 285 | schedule_delayed_work(&toggle_charger, 0); |
286 | } else { | 286 | } else { |
287 | sharpsl_charge_off(); | 287 | sharpsl_charge_off(); |
288 | sharpsl_pm.charge_mode = CHRG_DONE; | 288 | sharpsl_pm.charge_mode = CHRG_DONE; |
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index f225a083dee..9d2346fb68f 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c | |||
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode) | |||
323 | 323 | ||
324 | cancel_delayed_work(&irda_config->gpio_expa); | 324 | cancel_delayed_work(&irda_config->gpio_expa); |
325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
326 | schedule_work(&irda_config->gpio_expa); | 326 | #error this is not permitted - mode is an argument variable |
327 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
327 | 328 | ||
328 | return 0; | 329 | return 0; |
329 | } | 330 | } |
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index dbc555d209f..cbe909bad79 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c | |||
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = { | |||
74 | .rows = 8, | 74 | .rows = 8, |
75 | .cols = 8, | 75 | .cols = 8, |
76 | .keymap = nokia770_keymap, | 76 | .keymap = nokia770_keymap, |
77 | .keymapsize = ARRAY_SIZE(nokia770_keymap) | 77 | .keymapsize = ARRAY_SIZE(nokia770_keymap), |
78 | .delay = 4, | 78 | .delay = 4, |
79 | }; | 79 | }; |
80 | 80 | ||
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void) | |||
191 | printk("HP connected\n"); | 191 | printk("HP connected\n"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void codec_delayed_power_down(void *arg) | 194 | static void codec_delayed_power_down(struct work_struct *work) |
195 | { | 195 | { |
196 | down(&audio_pwr_sem); | 196 | down(&audio_pwr_sem); |
197 | if (audio_pwr_state == -1) | 197 | if (audio_pwr_state == -1) |
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg) | |||
200 | up(&audio_pwr_sem); | 200 | up(&audio_pwr_sem); |
201 | } | 201 | } |
202 | 202 | ||
203 | static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); | 203 | static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); |
204 | 204 | ||
205 | static void nokia770_audio_pwr_down(void) | 205 | static void nokia770_audio_pwr_down(void) |
206 | { | 206 | { |
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c index 3b29e59b0e6..0cbf1b0071f 100644 --- a/arch/arm/mach-omap1/leds-osk.c +++ b/arch/arm/mach-omap1/leds-osk.c | |||
@@ -35,7 +35,7 @@ static u8 hw_led_state; | |||
35 | 35 | ||
36 | static u8 tps_leds_change; | 36 | static u8 tps_leds_change; |
37 | 37 | ||
38 | static void tps_work(void *unused) | 38 | static void tps_work(struct work_struct *unused) |
39 | { | 39 | { |
40 | for (;;) { | 40 | for (;;) { |
41 | u8 leds; | 41 | u8 leds; |
@@ -61,7 +61,7 @@ static void tps_work(void *unused) | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static DECLARE_WORK(work, tps_work, NULL); | 64 | static DECLARE_WORK(work, tps_work); |
65 | 65 | ||
66 | #ifdef CONFIG_OMAP_OSK_MISTRAL | 66 | #ifdef CONFIG_OMAP_OSK_MISTRAL |
67 | 67 | ||
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 26a95a642ad..3b1ad1d981a 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c | |||
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode) | |||
206 | 206 | ||
207 | cancel_delayed_work(&irda_config->gpio_expa); | 207 | cancel_delayed_work(&irda_config->gpio_expa); |
208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
209 | schedule_work(&irda_config->gpio_expa); | 209 | #error this is not permitted - mode is an argument variable |
210 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
210 | 211 | ||
211 | return 0; | 212 | return 0; |
212 | } | 213 | } |
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c index 1b398742ab5..12d2fe0ceff 100644 --- a/arch/arm/mach-pxa/akita-ioexp.c +++ b/arch/arm/mach-pxa/akita-ioexp.c | |||
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD; | |||
36 | 36 | ||
37 | static int max7310_write(struct i2c_client *client, int address, int data); | 37 | static int max7310_write(struct i2c_client *client, int address, int data); |
38 | static struct i2c_client max7310_template; | 38 | static struct i2c_client max7310_template; |
39 | static void akita_ioexp_work(void *private_); | 39 | static void akita_ioexp_work(struct work_struct *private_); |
40 | 40 | ||
41 | static struct device *akita_ioexp_device; | 41 | static struct device *akita_ioexp_device; |
42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; | 42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; |
43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); | 43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work); |
44 | 44 | ||
45 | 45 | ||
46 | /* | 46 | /* |
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit) | |||
158 | EXPORT_SYMBOL(akita_set_ioexp); | 158 | EXPORT_SYMBOL(akita_set_ioexp); |
159 | EXPORT_SYMBOL(akita_reset_ioexp); | 159 | EXPORT_SYMBOL(akita_reset_ioexp); |
160 | 160 | ||
161 | static void akita_ioexp_work(void *private_) | 161 | static void akita_ioexp_work(struct work_struct *private_) |
162 | { | 162 | { |
163 | if (akita_ioexp_device) | 163 | if (akita_ioexp_device) |
164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); | 164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); |
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c index 1f9153ae5b0..6b5d3518a1c 100644 --- a/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c | |||
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info) | |||
51 | } | 51 | } |
52 | } | 52 | } |
53 | 53 | ||
54 | static void mce_work_fn(void *data); | 54 | static void mce_work_fn(struct work_struct *work); |
55 | static DECLARE_WORK(mce_work, mce_work_fn, NULL); | 55 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); |
56 | 56 | ||
57 | static void mce_work_fn(void *data) | 57 | static void mce_work_fn(struct work_struct *work) |
58 | { | 58 | { |
59 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 59 | on_each_cpu(mce_checkregs, NULL, 1, 1); |
60 | schedule_delayed_work(&mce_work, MCE_RATE); | 60 | schedule_delayed_work(&mce_work, MCE_RATE); |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 4bb8b77cd65..02a9b66b6ac 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void) | |||
1049 | 1049 | ||
1050 | struct warm_boot_cpu_info { | 1050 | struct warm_boot_cpu_info { |
1051 | struct completion *complete; | 1051 | struct completion *complete; |
1052 | struct work_struct task; | ||
1052 | int apicid; | 1053 | int apicid; |
1053 | int cpu; | 1054 | int cpu; |
1054 | }; | 1055 | }; |
1055 | 1056 | ||
1056 | static void __cpuinit do_warm_boot_cpu(void *p) | 1057 | static void __cpuinit do_warm_boot_cpu(struct work_struct *work) |
1057 | { | 1058 | { |
1058 | struct warm_boot_cpu_info *info = p; | 1059 | struct warm_boot_cpu_info *info = |
1060 | container_of(work, struct warm_boot_cpu_info, task); | ||
1059 | do_boot_cpu(info->apicid, info->cpu); | 1061 | do_boot_cpu(info->apicid, info->cpu); |
1060 | complete(info->complete); | 1062 | complete(info->complete); |
1061 | } | 1063 | } |
@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1064 | { | 1066 | { |
1065 | DECLARE_COMPLETION_ONSTACK(done); | 1067 | DECLARE_COMPLETION_ONSTACK(done); |
1066 | struct warm_boot_cpu_info info; | 1068 | struct warm_boot_cpu_info info; |
1067 | struct work_struct task; | ||
1068 | int apicid, ret; | 1069 | int apicid, ret; |
1069 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 1070 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
1070 | 1071 | ||
@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1089 | info.complete = &done; | 1090 | info.complete = &done; |
1090 | info.apicid = apicid; | 1091 | info.apicid = apicid; |
1091 | info.cpu = cpu; | 1092 | info.cpu = cpu; |
1092 | INIT_WORK(&task, do_warm_boot_cpu, &info); | 1093 | INIT_WORK(&info.task, do_warm_boot_cpu); |
1093 | 1094 | ||
1094 | tsc_sync_disabled = 1; | 1095 | tsc_sync_disabled = 1; |
1095 | 1096 | ||
@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1097 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | 1098 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
1098 | KERNEL_PGD_PTRS); | 1099 | KERNEL_PGD_PTRS); |
1099 | flush_tlb_all(); | 1100 | flush_tlb_all(); |
1100 | schedule_work(&task); | 1101 | schedule_work(&info.task); |
1101 | wait_for_completion(&done); | 1102 | wait_for_completion(&done); |
1102 | 1103 | ||
1103 | tsc_sync_disabled = 0; | 1104 | tsc_sync_disabled = 0; |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index fbc95828cd7..9810c8c9075 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
217 | static unsigned int cpufreq_init = 0; | 217 | static unsigned int cpufreq_init = 0; |
218 | static struct work_struct cpufreq_delayed_get_work; | 218 | static struct work_struct cpufreq_delayed_get_work; |
219 | 219 | ||
220 | static void handle_cpufreq_delayed_get(void *v) | 220 | static void handle_cpufreq_delayed_get(struct work_struct *work) |
221 | { | 221 | { |
222 | unsigned int cpu; | 222 | unsigned int cpu; |
223 | 223 | ||
@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void) | |||
306 | { | 306 | { |
307 | int ret; | 307 | int ret; |
308 | 308 | ||
309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | 310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, |
311 | CPUFREQ_TRANSITION_NOTIFIER); | 311 | CPUFREQ_TRANSITION_NOTIFIER); |
312 | if (!ret) | 312 | if (!ret) |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index caab986af70..b62f0c4d2c7 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -209,7 +209,7 @@ static void do_serial_bh(void) | |||
209 | } | 209 | } |
210 | #endif | 210 | #endif |
211 | 211 | ||
212 | static void do_softint(void *private_) | 212 | static void do_softint(struct work_struct *private_) |
213 | { | 213 | { |
214 | printk(KERN_ERR "simserial: do_softint called\n"); | 214 | printk(KERN_ERR "simserial: do_softint called\n"); |
215 | } | 215 | } |
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) | |||
698 | info->flags = sstate->flags; | 698 | info->flags = sstate->flags; |
699 | info->xmit_fifo_size = sstate->xmit_fifo_size; | 699 | info->xmit_fifo_size = sstate->xmit_fifo_size; |
700 | info->line = line; | 700 | info->line = line; |
701 | INIT_WORK(&info->work, do_softint, info); | 701 | INIT_WORK(&info->work, do_softint); |
702 | info->state = sstate; | 702 | info->state = sstate; |
703 | if (sstate->info) { | 703 | if (sstate->info) { |
704 | kfree(info); | 704 | kfree(info); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7cfa63a98cb..6bedd97570c 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
678 | * disable the cmc interrupt vector. | 678 | * disable the cmc interrupt vector. |
679 | */ | 679 | */ |
680 | static void | 680 | static void |
681 | ia64_mca_cmc_vector_disable_keventd(void *unused) | 681 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
682 | { | 682 | { |
683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); |
684 | } | 684 | } |
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused) | |||
690 | * enable the cmc interrupt vector. | 690 | * enable the cmc interrupt vector. |
691 | */ | 691 | */ |
692 | static void | 692 | static void |
693 | ia64_mca_cmc_vector_enable_keventd(void *unused) | 693 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
694 | { | 694 | { |
695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); |
696 | } | 696 | } |
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1247 | monarch_cpu = -1; | 1247 | monarch_cpu = -1; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); |
1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); | 1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * ia64_mca_cmc_int_handler | 1254 | * ia64_mca_cmc_int_handler |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index f7d7f566814..b21ddecea94 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | struct create_idle { | 465 | struct create_idle { |
466 | struct work_struct work; | ||
466 | struct task_struct *idle; | 467 | struct task_struct *idle; |
467 | struct completion done; | 468 | struct completion done; |
468 | int cpu; | 469 | int cpu; |
469 | }; | 470 | }; |
470 | 471 | ||
471 | void | 472 | void |
472 | do_fork_idle(void *_c_idle) | 473 | do_fork_idle(struct work_struct *work) |
473 | { | 474 | { |
474 | struct create_idle *c_idle = _c_idle; | 475 | struct create_idle *c_idle = |
476 | container_of(work, struct create_idle, work); | ||
475 | 477 | ||
476 | c_idle->idle = fork_idle(c_idle->cpu); | 478 | c_idle->idle = fork_idle(c_idle->cpu); |
477 | complete(&c_idle->done); | 479 | complete(&c_idle->done); |
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu) | |||
482 | { | 484 | { |
483 | int timeout; | 485 | int timeout; |
484 | struct create_idle c_idle = { | 486 | struct create_idle c_idle = { |
487 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
485 | .cpu = cpu, | 488 | .cpu = cpu, |
486 | .done = COMPLETION_INITIALIZER(c_idle.done), | 489 | .done = COMPLETION_INITIALIZER(c_idle.done), |
487 | }; | 490 | }; |
488 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
489 | 491 | ||
490 | c_idle.idle = get_idle_for_cpu(cpu); | 492 | c_idle.idle = get_idle_for_cpu(cpu); |
491 | if (c_idle.idle) { | 493 | if (c_idle.idle) { |
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu) | |||
497 | * We can't use kernel_thread since we must avoid to reschedule the child. | 499 | * We can't use kernel_thread since we must avoid to reschedule the child. |
498 | */ | 500 | */ |
499 | if (!keventd_up() || current_is_keventd()) | 501 | if (!keventd_up() || current_is_keventd()) |
500 | work.func(work.data); | 502 | c_idle.work.func(&c_idle.work); |
501 | else { | 503 | else { |
502 | schedule_work(&work); | 504 | schedule_work(&c_idle.work); |
503 | wait_for_completion(&c_idle.done); | 505 | wait_for_completion(&c_idle.done); |
504 | } | 506 | } |
505 | 507 | ||
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index f06a144c788..2c82412b9ef 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -319,7 +319,7 @@ static void sp_cleanup(void) | |||
319 | static int channel_open = 0; | 319 | static int channel_open = 0; |
320 | 320 | ||
321 | /* the work handler */ | 321 | /* the work handler */ |
322 | static void sp_work(void *data) | 322 | static void sp_work(struct work_struct *unused) |
323 | { | 323 | { |
324 | if (!channel_open) { | 324 | if (!channel_open) { |
325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { | 325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { |
@@ -354,7 +354,7 @@ static void startwork(int vpe) | |||
354 | return; | 354 | return; |
355 | } | 355 | } |
356 | 356 | ||
357 | INIT_WORK(&work, sp_work, NULL); | 357 | INIT_WORK(&work, sp_work); |
358 | queue_work(workqueue, &work); | 358 | queue_work(workqueue, &work); |
359 | } else | 359 | } else |
360 | queue_work(workqueue, &work); | 360 | queue_work(workqueue, &work); |
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 31bcdae8482..0e837762cc5 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c | |||
@@ -14,7 +14,7 @@ static unsigned long avr_clock; | |||
14 | 14 | ||
15 | static struct work_struct wd_work; | 15 | static struct work_struct wd_work; |
16 | 16 | ||
17 | static void wd_stop(void *unused) | 17 | static void wd_stop(struct work_struct *unused) |
18 | { | 18 | { |
19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; | 19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; |
20 | int i = 0, rescue = 8; | 20 | int i = 0, rescue = 8; |
@@ -122,7 +122,7 @@ static int __init ls_uarts_init(void) | |||
122 | 122 | ||
123 | ls_uart_init(); | 123 | ls_uart_init(); |
124 | 124 | ||
125 | INIT_WORK(&wd_work, wd_stop, NULL); | 125 | INIT_WORK(&wd_work, wd_stop); |
126 | schedule_work(&wd_work); | 126 | schedule_work(&wd_work); |
127 | 127 | ||
128 | return 0; | 128 | return 0; |
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index afa593a8544..c3a89414ddc 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c | |||
@@ -18,11 +18,11 @@ | |||
18 | 18 | ||
19 | #define OLD_BACKLIGHT_MAX 15 | 19 | #define OLD_BACKLIGHT_MAX 15 |
20 | 20 | ||
21 | static void pmac_backlight_key_worker(void *data); | 21 | static void pmac_backlight_key_worker(struct work_struct *work); |
22 | static void pmac_backlight_set_legacy_worker(void *data); | 22 | static void pmac_backlight_set_legacy_worker(struct work_struct *work); |
23 | 23 | ||
24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); | 24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); |
25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); | 25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); |
26 | 26 | ||
27 | /* Although these variables are used in interrupt context, it makes no sense to | 27 | /* Although these variables are used in interrupt context, it makes no sense to |
28 | * protect them. No user is able to produce enough key events per second and | 28 | * protect them. No user is able to produce enough key events per second and |
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value) | |||
94 | return level; | 94 | return level; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void pmac_backlight_key_worker(void *data) | 97 | static void pmac_backlight_key_worker(struct work_struct *work) |
98 | { | 98 | { |
99 | if (atomic_read(&kernel_backlight_disabled)) | 99 | if (atomic_read(&kernel_backlight_disabled)) |
100 | return; | 100 | return; |
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness) | |||
166 | return error; | 166 | return error; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pmac_backlight_set_legacy_worker(void *data) | 169 | static void pmac_backlight_set_legacy_worker(struct work_struct *work) |
170 | { | 170 | { |
171 | if (atomic_read(&kernel_backlight_disabled)) | 171 | if (atomic_read(&kernel_backlight_disabled)) |
172 | return; | 172 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 13707745131..49037edf7d3 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -37,8 +37,8 @@ | |||
37 | /* EEH event workqueue setup. */ | 37 | /* EEH event workqueue setup. */ |
38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); | 38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); |
39 | LIST_HEAD(eeh_eventlist); | 39 | LIST_HEAD(eeh_eventlist); |
40 | static void eeh_thread_launcher(void *); | 40 | static void eeh_thread_launcher(struct work_struct *); |
41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); | 41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); |
42 | 42 | ||
43 | /* Serialize reset sequences for a given pci device */ | 43 | /* Serialize reset sequences for a given pci device */ |
44 | DEFINE_MUTEX(eeh_event_mutex); | 44 | DEFINE_MUTEX(eeh_event_mutex); |
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy) | |||
103 | * eeh_thread_launcher | 103 | * eeh_thread_launcher |
104 | * @dummy - unused | 104 | * @dummy - unused |
105 | */ | 105 | */ |
106 | static void eeh_thread_launcher(void *dummy) | 106 | static void eeh_thread_launcher(struct work_struct *dummy) |
107 | { | 107 | { |
108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) | 108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) |
109 | printk(KERN_ERR "Failed to start EEH daemon\n"); | 109 | printk(KERN_ERR "Failed to start EEH daemon\n"); |
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c index 2e1943e2781..709952c25f2 100644 --- a/arch/ppc/8260_io/fcc_enet.c +++ b/arch/ppc/8260_io/fcc_enet.c | |||
@@ -385,6 +385,7 @@ struct fcc_enet_private { | |||
385 | phy_info_t *phy; | 385 | phy_info_t *phy; |
386 | struct work_struct phy_relink; | 386 | struct work_struct phy_relink; |
387 | struct work_struct phy_display_config; | 387 | struct work_struct phy_display_config; |
388 | struct net_device *dev; | ||
388 | 389 | ||
389 | uint sequence_done; | 390 | uint sequence_done; |
390 | 391 | ||
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = { | |||
1391 | NULL | 1392 | NULL |
1392 | }; | 1393 | }; |
1393 | 1394 | ||
1394 | static void mii_display_status(void *data) | 1395 | static void mii_display_status(struct work_struct *work) |
1395 | { | 1396 | { |
1396 | struct net_device *dev = data; | 1397 | volatile struct fcc_enet_private *fep = |
1397 | volatile struct fcc_enet_private *fep = dev->priv; | 1398 | container_of(work, struct fcc_enet_private, phy_relink); |
1399 | struct net_device *dev = fep->dev; | ||
1398 | uint s = fep->phy_status; | 1400 | uint s = fep->phy_status; |
1399 | 1401 | ||
1400 | if (!fep->link && !fep->old_link) { | 1402 | if (!fep->link && !fep->old_link) { |
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data) | |||
1428 | printk(".\n"); | 1430 | printk(".\n"); |
1429 | } | 1431 | } |
1430 | 1432 | ||
1431 | static void mii_display_config(void *data) | 1433 | static void mii_display_config(struct work_struct *work) |
1432 | { | 1434 | { |
1433 | struct net_device *dev = data; | 1435 | volatile struct fcc_enet_private *fep = |
1434 | volatile struct fcc_enet_private *fep = dev->priv; | 1436 | container_of(work, struct fcc_enet_private, |
1437 | phy_display_config); | ||
1438 | struct net_device *dev = fep->dev; | ||
1435 | uint s = fep->phy_status; | 1439 | uint s = fep->phy_status; |
1436 | 1440 | ||
1437 | printk("%s: config: auto-negotiation ", dev->name); | 1441 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void) | |||
1758 | cep->phy_id_done = 0; | 1762 | cep->phy_id_done = 0; |
1759 | cep->phy_addr = fip->fc_phyaddr; | 1763 | cep->phy_addr = fip->fc_phyaddr; |
1760 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); | 1764 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); |
1761 | INIT_WORK(&cep->phy_relink, mii_display_status, dev); | 1765 | INIT_WORK(&cep->phy_relink, mii_display_status); |
1762 | INIT_WORK(&cep->phy_display_config, mii_display_config, dev); | 1766 | INIT_WORK(&cep->phy_display_config, mii_display_config); |
1767 | cep->dev = dev; | ||
1763 | #endif /* CONFIG_USE_MDIO */ | 1768 | #endif /* CONFIG_USE_MDIO */ |
1764 | 1769 | ||
1765 | fip++; | 1770 | fip++; |
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c index 2f9fa9e3d33..e6c28fb423b 100644 --- a/arch/ppc/8xx_io/fec.c +++ b/arch/ppc/8xx_io/fec.c | |||
@@ -173,6 +173,7 @@ struct fec_enet_private { | |||
173 | uint phy_speed; | 173 | uint phy_speed; |
174 | phy_info_t *phy; | 174 | phy_info_t *phy; |
175 | struct work_struct phy_task; | 175 | struct work_struct phy_task; |
176 | struct net_device *dev; | ||
176 | 177 | ||
177 | uint sequence_done; | 178 | uint sequence_done; |
178 | 179 | ||
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev) | |||
1263 | printk(".\n"); | 1264 | printk(".\n"); |
1264 | } | 1265 | } |
1265 | 1266 | ||
1266 | static void mii_display_config(void *priv) | 1267 | static void mii_display_config(struct work_struct *work) |
1267 | { | 1268 | { |
1268 | struct net_device *dev = (struct net_device *)priv; | 1269 | struct fec_enet_private *fep = |
1269 | struct fec_enet_private *fep = dev->priv; | 1270 | container_of(work, struct fec_enet_private, phy_task); |
1271 | struct net_device *dev = fep->dev; | ||
1270 | volatile uint *s = &(fep->phy_status); | 1272 | volatile uint *s = &(fep->phy_status); |
1271 | 1273 | ||
1272 | printk("%s: config: auto-negotiation ", dev->name); | 1274 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv) | |||
1295 | fep->sequence_done = 1; | 1297 | fep->sequence_done = 1; |
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | static void mii_relink(void *priv) | 1300 | static void mii_relink(struct work_struct *work) |
1299 | { | 1301 | { |
1300 | struct net_device *dev = (struct net_device *)priv; | 1302 | struct fec_enet_private *fep = |
1301 | struct fec_enet_private *fep = dev->priv; | 1303 | container_of(work, struct fec_enet_private, phy_task); |
1304 | struct net_device *dev = fep->dev; | ||
1302 | int duplex; | 1305 | int duplex; |
1303 | 1306 | ||
1304 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; | 1307 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; |
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) | |||
1325 | { | 1328 | { |
1326 | struct fec_enet_private *fep = dev->priv; | 1329 | struct fec_enet_private *fep = dev->priv; |
1327 | 1330 | ||
1328 | INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); | 1331 | fep->dev = dev; |
1332 | INIT_WORK(&fep->phy_task, mii_relink); | ||
1329 | schedule_work(&fep->phy_task); | 1333 | schedule_work(&fep->phy_task); |
1330 | } | 1334 | } |
1331 | 1335 | ||
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) | |||
1333 | { | 1337 | { |
1334 | struct fec_enet_private *fep = dev->priv; | 1338 | struct fec_enet_private *fep = dev->priv; |
1335 | 1339 | ||
1336 | INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); | 1340 | fep->dev = dev; |
1341 | INIT_WORK(&fep->phy_task, mii_display_config); | ||
1337 | schedule_work(&fep->phy_task); | 1342 | schedule_work(&fep->phy_task); |
1338 | } | 1343 | } |
1339 | 1344 | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index af1e8fc7d98..67d5cf9cba8 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -92,8 +92,8 @@ static int appldata_timer_active; | |||
92 | * Work queue | 92 | * Work queue |
93 | */ | 93 | */ |
94 | static struct workqueue_struct *appldata_wq; | 94 | static struct workqueue_struct *appldata_wq; |
95 | static void appldata_work_fn(void *data); | 95 | static void appldata_work_fn(struct work_struct *work); |
96 | static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); | 96 | static DECLARE_WORK(appldata_work, appldata_work_fn); |
97 | 97 | ||
98 | 98 | ||
99 | /* | 99 | /* |
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) | |||
125 | * | 125 | * |
126 | * call data gathering function for each (active) module | 126 | * call data gathering function for each (active) module |
127 | */ | 127 | */ |
128 | static void appldata_work_fn(void *data) | 128 | static void appldata_work_fn(struct work_struct *work) |
129 | { | 129 | { |
130 | struct list_head *lh; | 130 | struct list_head *lh; |
131 | struct appldata_ops *ops; | 131 | struct appldata_ops *ops; |
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 3576b3cc505..7d4190e5565 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c | |||
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans) | |||
638 | return -1; | 638 | return -1; |
639 | } | 639 | } |
640 | 640 | ||
641 | void chan_interrupt(struct list_head *chans, struct work_struct *task, | 641 | void chan_interrupt(struct list_head *chans, struct delayed_work *task, |
642 | struct tty_struct *tty, int irq) | 642 | struct tty_struct *tty, int irq) |
643 | { | 643 | { |
644 | struct list_head *ele, *next; | 644 | struct list_head *ele, *next; |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 7b172160fe0..96f0189327a 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = { | |||
56 | 56 | ||
57 | static LIST_HEAD(mc_requests); | 57 | static LIST_HEAD(mc_requests); |
58 | 58 | ||
59 | static void mc_work_proc(void *unused) | 59 | static void mc_work_proc(struct work_struct *unused) |
60 | { | 60 | { |
61 | struct mconsole_entry *req; | 61 | struct mconsole_entry *req; |
62 | unsigned long flags; | 62 | unsigned long flags; |
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused) | |||
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
75 | static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); | 75 | static DECLARE_WORK(mconsole_work, mc_work_proc); |
76 | 76 | ||
77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) | 77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) |
78 | { | 78 | { |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index ec9eb8bd943..286bc0b3207 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id) | |||
99 | * same device, since it tests for (dev->flags & IFF_UP). So | 99 | * same device, since it tests for (dev->flags & IFF_UP). So |
100 | * there's no harm in delaying the device shutdown. */ | 100 | * there's no harm in delaying the device shutdown. */ |
101 | schedule_work(&close_work); | 101 | schedule_work(&close_work); |
102 | #error this is not permitted - close_work will go out of scope | ||
102 | goto out; | 103 | goto out; |
103 | } | 104 | } |
104 | reactivate_fd(lp->fd, UM_ETH_IRQ); | 105 | reactivate_fd(lp->fd, UM_ETH_IRQ); |
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index ce9f3733f73..6dfe632f1c1 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c | |||
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port) | |||
132 | DECLARE_MUTEX(ports_sem); | 132 | DECLARE_MUTEX(ports_sem); |
133 | struct list_head ports = LIST_HEAD_INIT(ports); | 133 | struct list_head ports = LIST_HEAD_INIT(ports); |
134 | 134 | ||
135 | void port_work_proc(void *unused) | 135 | void port_work_proc(struct work_struct *unused) |
136 | { | 136 | { |
137 | struct port_list *port; | 137 | struct port_list *port; |
138 | struct list_head *ele; | 138 | struct list_head *ele; |
@@ -150,7 +150,7 @@ void port_work_proc(void *unused) | |||
150 | local_irq_restore(flags); | 150 | local_irq_restore(flags); |
151 | } | 151 | } |
152 | 152 | ||
153 | DECLARE_WORK(port_work, port_work_proc, NULL); | 153 | DECLARE_WORK(port_work, port_work_proc); |
154 | 154 | ||
155 | static irqreturn_t port_interrupt(int irq, void *data) | 155 | static irqreturn_t port_interrupt(int irq, void *data) |
156 | { | 156 | { |
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index bbea88801d8..c7587fc3901 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
306 | */ | 306 | */ |
307 | 307 | ||
308 | static int check_interval = 5 * 60; /* 5 minutes */ | 308 | static int check_interval = 5 * 60; /* 5 minutes */ |
309 | static void mcheck_timer(void *data); | 309 | static void mcheck_timer(struct work_struct *work); |
310 | static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); | 310 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); |
311 | 311 | ||
312 | static void mcheck_check_cpu(void *info) | 312 | static void mcheck_check_cpu(void *info) |
313 | { | 313 | { |
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info) | |||
315 | do_machine_check(NULL, 0); | 315 | do_machine_check(NULL, 0); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void mcheck_timer(void *data) | 318 | static void mcheck_timer(struct work_struct *work) |
319 | { | 319 | { |
320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); |
321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); | 321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 62c2e747af5..9800147c4c6 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta | |||
753 | } | 753 | } |
754 | 754 | ||
755 | struct create_idle { | 755 | struct create_idle { |
756 | struct work_struct work; | ||
756 | struct task_struct *idle; | 757 | struct task_struct *idle; |
757 | struct completion done; | 758 | struct completion done; |
758 | int cpu; | 759 | int cpu; |
759 | }; | 760 | }; |
760 | 761 | ||
761 | void do_fork_idle(void *_c_idle) | 762 | void do_fork_idle(struct work_struct *work) |
762 | { | 763 | { |
763 | struct create_idle *c_idle = _c_idle; | 764 | struct create_idle *c_idle = |
765 | container_of(work, struct create_idle, work); | ||
764 | 766 | ||
765 | c_idle->idle = fork_idle(c_idle->cpu); | 767 | c_idle->idle = fork_idle(c_idle->cpu); |
766 | complete(&c_idle->done); | 768 | complete(&c_idle->done); |
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
775 | int timeout; | 777 | int timeout; |
776 | unsigned long start_rip; | 778 | unsigned long start_rip; |
777 | struct create_idle c_idle = { | 779 | struct create_idle c_idle = { |
780 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
778 | .cpu = cpu, | 781 | .cpu = cpu, |
779 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 782 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
780 | }; | 783 | }; |
781 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
782 | 784 | ||
783 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | 785 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
784 | if (!cpu_gdt_descr[cpu].address && | 786 | if (!cpu_gdt_descr[cpu].address && |
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
825 | * thread. | 827 | * thread. |
826 | */ | 828 | */ |
827 | if (!keventd_up() || current_is_keventd()) | 829 | if (!keventd_up() || current_is_keventd()) |
828 | work.func(work.data); | 830 | c_idle.work.func(&c_idle.work); |
829 | else { | 831 | else { |
830 | schedule_work(&work); | 832 | schedule_work(&c_idle.work); |
831 | wait_for_completion(&c_idle.done); | 833 | wait_for_completion(&c_idle.done); |
832 | } | 834 | } |
833 | 835 | ||
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index e3ef544d2cf..9f05bc9b2da 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
563 | static unsigned int cpufreq_init = 0; | 563 | static unsigned int cpufreq_init = 0; |
564 | static struct work_struct cpufreq_delayed_get_work; | 564 | static struct work_struct cpufreq_delayed_get_work; |
565 | 565 | ||
566 | static void handle_cpufreq_delayed_get(void *v) | 566 | static void handle_cpufreq_delayed_get(struct work_struct *v) |
567 | { | 567 | { |
568 | unsigned int cpu; | 568 | unsigned int cpu; |
569 | for_each_online_cpu(cpu) { | 569 | for_each_online_cpu(cpu) { |
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = { | |||
639 | 639 | ||
640 | static int __init cpufreq_tsc(void) | 640 | static int __init cpufreq_tsc(void) |
641 | { | 641 | { |
642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, | 643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, |
644 | CPUFREQ_TRANSITION_NOTIFIER)) | 644 | CPUFREQ_TRANSITION_NOTIFIER)) |
645 | cpufreq_init = 1; | 645 | cpufreq_init = 1; |
diff --git a/block/as-iosched.c b/block/as-iosched.c index 00242111a45..5934c4bfd52 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req, | |||
1274 | * | 1274 | * |
1275 | * FIXME! dispatch queue is not a queue at all! | 1275 | * FIXME! dispatch queue is not a queue at all! |
1276 | */ | 1276 | */ |
1277 | static void as_work_handler(void *data) | 1277 | static void as_work_handler(struct work_struct *work) |
1278 | { | 1278 | { |
1279 | struct request_queue *q = data; | 1279 | struct as_data *ad = container_of(work, struct as_data, antic_work); |
1280 | struct request_queue *q = ad->q; | ||
1280 | unsigned long flags; | 1281 | unsigned long flags; |
1281 | 1282 | ||
1282 | spin_lock_irqsave(q->queue_lock, flags); | 1283 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q) | |||
1332 | ad->antic_timer.function = as_antic_timeout; | 1333 | ad->antic_timer.function = as_antic_timeout; |
1333 | ad->antic_timer.data = (unsigned long)q; | 1334 | ad->antic_timer.data = (unsigned long)q; |
1334 | init_timer(&ad->antic_timer); | 1335 | init_timer(&ad->antic_timer); |
1335 | INIT_WORK(&ad->antic_work, as_work_handler, q); | 1336 | INIT_WORK(&ad->antic_work, as_work_handler); |
1336 | 1337 | ||
1337 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); | 1338 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); |
1338 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 1339 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e9019ed39b7..84e9be07318 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1840,9 +1840,11 @@ queue_fail: | |||
1840 | return 1; | 1840 | return 1; |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | static void cfq_kick_queue(void *data) | 1843 | static void cfq_kick_queue(struct work_struct *work) |
1844 | { | 1844 | { |
1845 | request_queue_t *q = data; | 1845 | struct cfq_data *cfqd = |
1846 | container_of(work, struct cfq_data, unplug_work); | ||
1847 | request_queue_t *q = cfqd->queue; | ||
1846 | unsigned long flags; | 1848 | unsigned long flags; |
1847 | 1849 | ||
1848 | spin_lock_irqsave(q->queue_lock, flags); | 1850 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q) | |||
1986 | cfqd->idle_class_timer.function = cfq_idle_class_timer; | 1988 | cfqd->idle_class_timer.function = cfq_idle_class_timer; |
1987 | cfqd->idle_class_timer.data = (unsigned long) cfqd; | 1989 | cfqd->idle_class_timer.data = (unsigned long) cfqd; |
1988 | 1990 | ||
1989 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | 1991 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
1990 | 1992 | ||
1991 | cfqd->cfq_quantum = cfq_quantum; | 1993 | cfqd->cfq_quantum = cfq_quantum; |
1992 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 1994 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0f82e12f7b6..cc6e95f8e5d 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | #include <scsi/scsi_cmnd.h> | 35 | #include <scsi/scsi_cmnd.h> |
36 | 36 | ||
37 | static void blk_unplug_work(void *data); | 37 | static void blk_unplug_work(struct work_struct *work); |
38 | static void blk_unplug_timeout(unsigned long data); | 38 | static void blk_unplug_timeout(unsigned long data); |
39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
40 | static void init_request_from_bio(struct request *req, struct bio *bio); | 40 | static void init_request_from_bio(struct request *req, struct bio *bio); |
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
227 | if (q->unplug_delay == 0) | 227 | if (q->unplug_delay == 0) |
228 | q->unplug_delay = 1; | 228 | q->unplug_delay = 1; |
229 | 229 | ||
230 | INIT_WORK(&q->unplug_work, blk_unplug_work, q); | 230 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
231 | 231 | ||
232 | q->unplug_timer.function = blk_unplug_timeout; | 232 | q->unplug_timer.function = blk_unplug_timeout; |
233 | q->unplug_timer.data = (unsigned long)q; | 233 | q->unplug_timer.data = (unsigned long)q; |
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1631 | } | 1631 | } |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | static void blk_unplug_work(void *data) | 1634 | static void blk_unplug_work(struct work_struct *work) |
1635 | { | 1635 | { |
1636 | request_queue_t *q = data; | 1636 | request_queue_t *q = container_of(work, request_queue_t, unplug_work); |
1637 | 1637 | ||
1638 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 1638 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, |
1639 | q->rq.count[READ] + q->rq.count[WRITE]); | 1639 | q->rq.count[READ] + q->rq.count[WRITE]); |
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 9b5b1560106..2ebffb84f1d 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c | |||
@@ -40,9 +40,10 @@ struct cryptomgr_param { | |||
40 | char template[CRYPTO_MAX_ALG_NAME]; | 40 | char template[CRYPTO_MAX_ALG_NAME]; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static void cryptomgr_probe(void *data) | 43 | static void cryptomgr_probe(struct work_struct *work) |
44 | { | 44 | { |
45 | struct cryptomgr_param *param = data; | 45 | struct cryptomgr_param *param = |
46 | container_of(work, struct cryptomgr_param, work); | ||
46 | struct crypto_template *tmpl; | 47 | struct crypto_template *tmpl; |
47 | struct crypto_instance *inst; | 48 | struct crypto_instance *inst; |
48 | int err; | 49 | int err; |
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) | |||
112 | param->larval.type = larval->alg.cra_flags; | 113 | param->larval.type = larval->alg.cra_flags; |
113 | param->larval.mask = larval->mask; | 114 | param->larval.mask = larval->mask; |
114 | 115 | ||
115 | INIT_WORK(¶m->work, cryptomgr_probe, param); | 116 | INIT_WORK(¶m->work, cryptomgr_probe); |
116 | schedule_work(¶m->work); | 117 | schedule_work(¶m->work); |
117 | 118 | ||
118 | return NOTIFY_STOP; | 119 | return NOTIFY_STOP; |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 068fe4f100b..02b30ae6a68 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl") | |||
50 | struct acpi_os_dpc { | 50 | struct acpi_os_dpc { |
51 | acpi_osd_exec_callback function; | 51 | acpi_osd_exec_callback function; |
52 | void *context; | 52 | void *context; |
53 | struct work_struct work; | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | #ifdef CONFIG_ACPI_CUSTOM_DSDT | 56 | #ifdef CONFIG_ACPI_CUSTOM_DSDT |
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ | |||
564 | acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); | 565 | acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); |
565 | } | 566 | } |
566 | 567 | ||
567 | static void acpi_os_execute_deferred(void *context) | 568 | static void acpi_os_execute_deferred(struct work_struct *work) |
568 | { | 569 | { |
569 | struct acpi_os_dpc *dpc = NULL; | 570 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); |
570 | |||
571 | |||
572 | dpc = (struct acpi_os_dpc *)context; | ||
573 | if (!dpc) { | 571 | if (!dpc) { |
574 | printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); | 572 | printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); |
575 | return; | 573 | return; |
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type, | |||
602 | { | 600 | { |
603 | acpi_status status = AE_OK; | 601 | acpi_status status = AE_OK; |
604 | struct acpi_os_dpc *dpc; | 602 | struct acpi_os_dpc *dpc; |
605 | struct work_struct *task; | ||
606 | 603 | ||
607 | ACPI_FUNCTION_TRACE("os_queue_for_execution"); | 604 | ACPI_FUNCTION_TRACE("os_queue_for_execution"); |
608 | 605 | ||
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type, | |||
615 | 612 | ||
616 | /* | 613 | /* |
617 | * Allocate/initialize DPC structure. Note that this memory will be | 614 | * Allocate/initialize DPC structure. Note that this memory will be |
618 | * freed by the callee. The kernel handles the tq_struct list in a | 615 | * freed by the callee. The kernel handles the work_struct list in a |
619 | * way that allows us to also free its memory inside the callee. | 616 | * way that allows us to also free its memory inside the callee. |
620 | * Because we may want to schedule several tasks with different | 617 | * Because we may want to schedule several tasks with different |
621 | * parameters we can't use the approach some kernel code uses of | 618 | * parameters we can't use the approach some kernel code uses of |
622 | * having a static tq_struct. | 619 | * having a static work_struct. |
623 | * We can save time and code by allocating the DPC and tq_structs | ||
624 | * from the same memory. | ||
625 | */ | 620 | */ |
626 | 621 | ||
627 | dpc = | 622 | dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); |
628 | kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), | ||
629 | GFP_ATOMIC); | ||
630 | if (!dpc) | 623 | if (!dpc) |
631 | return_ACPI_STATUS(AE_NO_MEMORY); | 624 | return_ACPI_STATUS(AE_NO_MEMORY); |
632 | 625 | ||
633 | dpc->function = function; | 626 | dpc->function = function; |
634 | dpc->context = context; | 627 | dpc->context = context; |
635 | 628 | ||
636 | task = (void *)(dpc + 1); | 629 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
637 | INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); | 630 | if (!queue_work(kacpid_wq, &dpc->work)) { |
638 | |||
639 | if (!queue_work(kacpid_wq, task)) { | ||
640 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 631 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
641 | "Call to queue_work() failed.\n")); | 632 | "Call to queue_work() failed.\n")); |
642 | kfree(dpc); | 633 | kfree(dpc); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f8ec3896b79..8816e30fb7a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1081 | * ata_port_queue_task - Queue port_task | 1081 | * ata_port_queue_task - Queue port_task |
1082 | * @ap: The ata_port to queue port_task for | 1082 | * @ap: The ata_port to queue port_task for |
1083 | * @fn: workqueue function to be scheduled | 1083 | * @fn: workqueue function to be scheduled |
1084 | * @data: data value to pass to workqueue function | 1084 | * @data: data for @fn to use |
1085 | * @delay: delay time for workqueue function | 1085 | * @delay: delay time for workqueue function |
1086 | * | 1086 | * |
1087 | * Schedule @fn(@data) for execution after @delay jiffies using | 1087 | * Schedule @fn(@data) for execution after @delay jiffies using |
@@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1096 | * LOCKING: | 1096 | * LOCKING: |
1097 | * Inherited from caller. | 1097 | * Inherited from caller. |
1098 | */ | 1098 | */ |
1099 | void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, | 1099 | void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, |
1100 | unsigned long delay) | 1100 | unsigned long delay) |
1101 | { | 1101 | { |
1102 | int rc; | 1102 | int rc; |
@@ -1104,12 +1104,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, | |||
1104 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) | 1104 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) |
1105 | return; | 1105 | return; |
1106 | 1106 | ||
1107 | PREPARE_WORK(&ap->port_task, fn, data); | 1107 | PREPARE_DELAYED_WORK(&ap->port_task, fn); |
1108 | ap->port_task_data = data; | ||
1108 | 1109 | ||
1109 | if (!delay) | 1110 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); |
1110 | rc = queue_work(ata_wq, &ap->port_task); | ||
1111 | else | ||
1112 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); | ||
1113 | 1111 | ||
1114 | /* rc == 0 means that another user is using port task */ | 1112 | /* rc == 0 means that another user is using port task */ |
1115 | WARN_ON(rc == 0); | 1113 | WARN_ON(rc == 0); |
@@ -4588,10 +4586,11 @@ fsm_start: | |||
4588 | return poll_next; | 4586 | return poll_next; |
4589 | } | 4587 | } |
4590 | 4588 | ||
4591 | static void ata_pio_task(void *_data) | 4589 | static void ata_pio_task(struct work_struct *work) |
4592 | { | 4590 | { |
4593 | struct ata_queued_cmd *qc = _data; | 4591 | struct ata_port *ap = |
4594 | struct ata_port *ap = qc->ap; | 4592 | container_of(work, struct ata_port, port_task.work); |
4593 | struct ata_queued_cmd *qc = ap->port_task_data; | ||
4595 | u8 status; | 4594 | u8 status; |
4596 | int poll_next; | 4595 | int poll_next; |
4597 | 4596 | ||
@@ -5635,9 +5634,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host, | |||
5635 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; | 5634 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
5636 | #endif | 5635 | #endif |
5637 | 5636 | ||
5638 | INIT_WORK(&ap->port_task, NULL, NULL); | 5637 | INIT_DELAYED_WORK(&ap->port_task, NULL); |
5639 | INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); | 5638 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
5640 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); | 5639 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
5641 | INIT_LIST_HEAD(&ap->eh_done_q); | 5640 | INIT_LIST_HEAD(&ap->eh_done_q); |
5642 | init_waitqueue_head(&ap->eh_wait_q); | 5641 | init_waitqueue_head(&ap->eh_wait_q); |
5643 | 5642 | ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 76a85dfb730..08ad44b3e48 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
332 | if (ap->pflags & ATA_PFLAG_LOADING) | 332 | if (ap->pflags & ATA_PFLAG_LOADING) |
333 | ap->pflags &= ~ATA_PFLAG_LOADING; | 333 | ap->pflags &= ~ATA_PFLAG_LOADING; |
334 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) | 334 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) |
335 | queue_work(ata_aux_wq, &ap->hotplug_task); | 335 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); |
336 | 336 | ||
337 | if (ap->pflags & ATA_PFLAG_RECOVERED) | 337 | if (ap->pflags & ATA_PFLAG_RECOVERED) |
338 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); | 338 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 8eaace94d96..664e1377b54 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2963 | 2963 | ||
2964 | /** | 2964 | /** |
2965 | * ata_scsi_hotplug - SCSI part of hotplug | 2965 | * ata_scsi_hotplug - SCSI part of hotplug |
2966 | * @data: Pointer to ATA port to perform SCSI hotplug on | 2966 | * @work: Pointer to ATA port to perform SCSI hotplug on |
2967 | * | 2967 | * |
2968 | * Perform SCSI part of hotplug. It's executed from a separate | 2968 | * Perform SCSI part of hotplug. It's executed from a separate |
2969 | * workqueue after EH completes. This is necessary because SCSI | 2969 | * workqueue after EH completes. This is necessary because SCSI |
@@ -2973,9 +2973,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2973 | * LOCKING: | 2973 | * LOCKING: |
2974 | * Kernel thread context (may sleep). | 2974 | * Kernel thread context (may sleep). |
2975 | */ | 2975 | */ |
2976 | void ata_scsi_hotplug(void *data) | 2976 | void ata_scsi_hotplug(struct work_struct *work) |
2977 | { | 2977 | { |
2978 | struct ata_port *ap = data; | 2978 | struct ata_port *ap = |
2979 | container_of(work, struct ata_port, hotplug_task.work); | ||
2979 | int i; | 2980 | int i; |
2980 | 2981 | ||
2981 | if (ap->pflags & ATA_PFLAG_UNLOADING) { | 2982 | if (ap->pflags & ATA_PFLAG_UNLOADING) { |
@@ -3076,7 +3077,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3076 | 3077 | ||
3077 | /** | 3078 | /** |
3078 | * ata_scsi_dev_rescan - initiate scsi_rescan_device() | 3079 | * ata_scsi_dev_rescan - initiate scsi_rescan_device() |
3079 | * @data: Pointer to ATA port to perform scsi_rescan_device() | 3080 | * @work: Pointer to ATA port to perform scsi_rescan_device() |
3080 | * | 3081 | * |
3081 | * After ATA pass thru (SAT) commands are executed successfully, | 3082 | * After ATA pass thru (SAT) commands are executed successfully, |
3082 | * libata need to propagate the changes to SCSI layer. This | 3083 | * libata need to propagate the changes to SCSI layer. This |
@@ -3086,9 +3087,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3086 | * LOCKING: | 3087 | * LOCKING: |
3087 | * Kernel thread context (may sleep). | 3088 | * Kernel thread context (may sleep). |
3088 | */ | 3089 | */ |
3089 | void ata_scsi_dev_rescan(void *data) | 3090 | void ata_scsi_dev_rescan(struct work_struct *work) |
3090 | { | 3091 | { |
3091 | struct ata_port *ap = data; | 3092 | struct ata_port *ap = |
3093 | container_of(work, struct ata_port, scsi_rescan_task); | ||
3092 | unsigned long flags; | 3094 | unsigned long flags; |
3093 | unsigned int i; | 3095 | unsigned int i; |
3094 | 3096 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 107b2b56522..81ae41d5f23 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template; | |||
94 | 94 | ||
95 | extern void ata_scsi_scan_host(struct ata_port *ap); | 95 | extern void ata_scsi_scan_host(struct ata_port *ap); |
96 | extern int ata_scsi_offline_dev(struct ata_device *dev); | 96 | extern int ata_scsi_offline_dev(struct ata_device *dev); |
97 | extern void ata_scsi_hotplug(void *data); | 97 | extern void ata_scsi_hotplug(struct work_struct *work); |
98 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, | 98 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, |
99 | unsigned int buflen); | 99 | unsigned int buflen); |
100 | 100 | ||
@@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
124 | unsigned int (*actor) (struct ata_scsi_args *args, | 124 | unsigned int (*actor) (struct ata_scsi_args *args, |
125 | u8 *rbuf, unsigned int buflen)); | 125 | u8 *rbuf, unsigned int buflen)); |
126 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); | 126 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); |
127 | extern void ata_scsi_dev_rescan(void *data); | 127 | extern void ata_scsi_dev_rescan(struct work_struct *work); |
128 | extern int ata_bus_probe(struct ata_port *ap); | 128 | extern int ata_bus_probe(struct ata_port *ap); |
129 | 129 | ||
130 | /* libata-eh.c */ | 130 | /* libata-eh.c */ |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 87b17c33b3f..f4078612194 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, | |||
135 | int flags); | 135 | int flags); |
136 | static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, | 136 | static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, |
137 | char *page); | 137 | char *page); |
138 | static void idt77252_softint(void *dev_id); | 138 | static void idt77252_softint(struct work_struct *work); |
139 | 139 | ||
140 | 140 | ||
141 | static struct atmdev_ops idt77252_ops = | 141 | static struct atmdev_ops idt77252_ops = |
@@ -2866,9 +2866,10 @@ out: | |||
2866 | } | 2866 | } |
2867 | 2867 | ||
2868 | static void | 2868 | static void |
2869 | idt77252_softint(void *dev_id) | 2869 | idt77252_softint(struct work_struct *work) |
2870 | { | 2870 | { |
2871 | struct idt77252_dev *card = dev_id; | 2871 | struct idt77252_dev *card = |
2872 | container_of(work, struct idt77252_dev, tqueue); | ||
2872 | u32 stat; | 2873 | u32 stat; |
2873 | int done; | 2874 | int done; |
2874 | 2875 | ||
@@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
3697 | card->pcidev = pcidev; | 3698 | card->pcidev = pcidev; |
3698 | sprintf(card->name, "idt77252-%d", card->index); | 3699 | sprintf(card->name, "idt77252-%d", card->index); |
3699 | 3700 | ||
3700 | INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); | 3701 | INIT_WORK(&card->tqueue, idt77252_softint); |
3701 | 3702 | ||
3702 | membase = pci_resource_start(pcidev, 1); | 3703 | membase = pci_resource_start(pcidev, 1); |
3703 | srambase = pci_resource_start(pcidev, 2); | 3704 | srambase = pci_resource_start(pcidev, 2); |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 6d111228cfa..2308e83e5f3 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d); | |||
159 | void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); | 159 | void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); |
160 | void aoecmd_ata_rsp(struct sk_buff *); | 160 | void aoecmd_ata_rsp(struct sk_buff *); |
161 | void aoecmd_cfg_rsp(struct sk_buff *); | 161 | void aoecmd_cfg_rsp(struct sk_buff *); |
162 | void aoecmd_sleepwork(void *vp); | 162 | void aoecmd_sleepwork(struct work_struct *); |
163 | struct sk_buff *new_skb(ulong); | 163 | struct sk_buff *new_skb(ulong); |
164 | 164 | ||
165 | int aoedev_init(void); | 165 | int aoedev_init(void); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8a13b1af8ba..97f7f535f41 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -408,9 +408,9 @@ rexmit_timer(ulong vp) | |||
408 | /* this function performs work that has been deferred until sleeping is OK | 408 | /* this function performs work that has been deferred until sleeping is OK |
409 | */ | 409 | */ |
410 | void | 410 | void |
411 | aoecmd_sleepwork(void *vp) | 411 | aoecmd_sleepwork(struct work_struct *work) |
412 | { | 412 | { |
413 | struct aoedev *d = (struct aoedev *) vp; | 413 | struct aoedev *d = container_of(work, struct aoedev, work); |
414 | 414 | ||
415 | if (d->flags & DEVFL_GDALLOC) | 415 | if (d->flags & DEVFL_GDALLOC) |
416 | aoeblk_gdalloc(d); | 416 | aoeblk_gdalloc(d); |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 6125921bbec..05a97197c91 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes) | |||
88 | kfree(d); | 88 | kfree(d); |
89 | return NULL; | 89 | return NULL; |
90 | } | 90 | } |
91 | INIT_WORK(&d->work, aoecmd_sleepwork, d); | 91 | INIT_WORK(&d->work, aoecmd_sleepwork); |
92 | spin_lock_init(&d->lock); | 92 | spin_lock_init(&d->lock); |
93 | init_timer(&d->timer); | 93 | init_timer(&d->timer); |
94 | d->timer.data = (ulong) d; | 94 | d->timer.data = (ulong) d; |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9e6d3a87cbe..3f1b38276e9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -992,11 +992,11 @@ static void empty(void) | |||
992 | { | 992 | { |
993 | } | 993 | } |
994 | 994 | ||
995 | static DECLARE_WORK(floppy_work, NULL, NULL); | 995 | static DECLARE_WORK(floppy_work, NULL); |
996 | 996 | ||
997 | static void schedule_bh(void (*handler) (void)) | 997 | static void schedule_bh(void (*handler) (void)) |
998 | { | 998 | { |
999 | PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL); | 999 | PREPARE_WORK(&floppy_work, (work_func_t)handler); |
1000 | schedule_work(&floppy_work); | 1000 | schedule_work(&floppy_work); |
1001 | } | 1001 | } |
1002 | 1002 | ||
@@ -1008,7 +1008,7 @@ static void cancel_activity(void) | |||
1008 | 1008 | ||
1009 | spin_lock_irqsave(&floppy_lock, flags); | 1009 | spin_lock_irqsave(&floppy_lock, flags); |
1010 | do_floppy = NULL; | 1010 | do_floppy = NULL; |
1011 | PREPARE_WORK(&floppy_work, (void *)empty, NULL); | 1011 | PREPARE_WORK(&floppy_work, (work_func_t)empty); |
1012 | del_timer(&fd_timer); | 1012 | del_timer(&fd_timer); |
1013 | spin_unlock_irqrestore(&floppy_lock, flags); | 1013 | spin_unlock_irqrestore(&floppy_lock, flags); |
1014 | } | 1014 | } |
@@ -1868,7 +1868,7 @@ static void show_floppy(void) | |||
1868 | printk("fdc_busy=%lu\n", fdc_busy); | 1868 | printk("fdc_busy=%lu\n", fdc_busy); |
1869 | if (do_floppy) | 1869 | if (do_floppy) |
1870 | printk("do_floppy=%p\n", do_floppy); | 1870 | printk("do_floppy=%p\n", do_floppy); |
1871 | if (floppy_work.pending) | 1871 | if (work_pending(&floppy_work)) |
1872 | printk("floppy_work.func=%p\n", floppy_work.func); | 1872 | printk("floppy_work.func=%p\n", floppy_work.func); |
1873 | if (timer_pending(&fd_timer)) | 1873 | if (timer_pending(&fd_timer)) |
1874 | printk("fd_timer.function=%p\n", fd_timer.function); | 1874 | printk("fd_timer.function=%p\n", fd_timer.function); |
@@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void) | |||
4498 | printk("floppy timer still active:%s\n", timeout_message); | 4498 | printk("floppy timer still active:%s\n", timeout_message); |
4499 | if (timer_pending(&fd_timer)) | 4499 | if (timer_pending(&fd_timer)) |
4500 | printk("auxiliary floppy timer still active\n"); | 4500 | printk("auxiliary floppy timer still active\n"); |
4501 | if (floppy_work.pending) | 4501 | if (work_pending(&floppy_work)) |
4502 | printk("work still pending\n"); | 4502 | printk("work still pending\n"); |
4503 | #endif | 4503 | #endif |
4504 | old_fdc = fdc; | 4504 | old_fdc = fdc; |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 40a11e56797..9d9bff23f42 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -352,19 +352,19 @@ static enum action (*phase)(void); | |||
352 | 352 | ||
353 | static void run_fsm(void); | 353 | static void run_fsm(void); |
354 | 354 | ||
355 | static void ps_tq_int( void *data); | 355 | static void ps_tq_int(struct work_struct *work); |
356 | 356 | ||
357 | static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); | 357 | static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int); |
358 | 358 | ||
359 | static void schedule_fsm(void) | 359 | static void schedule_fsm(void) |
360 | { | 360 | { |
361 | if (!nice) | 361 | if (!nice) |
362 | schedule_work(&fsm_tq); | 362 | schedule_delayed_work(&fsm_tq, 0); |
363 | else | 363 | else |
364 | schedule_delayed_work(&fsm_tq, nice-1); | 364 | schedule_delayed_work(&fsm_tq, nice-1); |
365 | } | 365 | } |
366 | 366 | ||
367 | static void ps_tq_int(void *data) | 367 | static void ps_tq_int(struct work_struct *work) |
368 | { | 368 | { |
369 | run_fsm(); | 369 | run_fsm(); |
370 | } | 370 | } |
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h index 932342d7a8e..bc370329414 100644 --- a/drivers/block/paride/pseudo.h +++ b/drivers/block/paride/pseudo.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | 37 | ||
38 | static void ps_tq_int( void *data); | 38 | static void ps_tq_int(struct work_struct *work); |
39 | 39 | ||
40 | static void (* ps_continuation)(void); | 40 | static void (* ps_continuation)(void); |
41 | static int (* ps_ready)(void); | 41 | static int (* ps_ready)(void); |
@@ -45,7 +45,7 @@ static int ps_nice = 0; | |||
45 | 45 | ||
46 | static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); | 46 | static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); |
47 | 47 | ||
48 | static DECLARE_WORK(ps_tq, ps_tq_int, NULL); | 48 | static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); |
49 | 49 | ||
50 | static void ps_set_intr(void (*continuation)(void), | 50 | static void ps_set_intr(void (*continuation)(void), |
51 | int (*ready)(void), | 51 | int (*ready)(void), |
@@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void), | |||
63 | if (!ps_tq_active) { | 63 | if (!ps_tq_active) { |
64 | ps_tq_active = 1; | 64 | ps_tq_active = 1; |
65 | if (!ps_nice) | 65 | if (!ps_nice) |
66 | schedule_work(&ps_tq); | 66 | schedule_delayed_work(&ps_tq, 0); |
67 | else | 67 | else |
68 | schedule_delayed_work(&ps_tq, ps_nice-1); | 68 | schedule_delayed_work(&ps_tq, ps_nice-1); |
69 | } | 69 | } |
70 | spin_unlock_irqrestore(&ps_spinlock,flags); | 70 | spin_unlock_irqrestore(&ps_spinlock,flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void ps_tq_int(void *data) | 73 | static void ps_tq_int(struct work_struct *work) |
74 | { | 74 | { |
75 | void (*con)(void); | 75 | void (*con)(void); |
76 | unsigned long flags; | 76 | unsigned long flags; |
@@ -92,7 +92,7 @@ static void ps_tq_int(void *data) | |||
92 | } | 92 | } |
93 | ps_tq_active = 1; | 93 | ps_tq_active = 1; |
94 | if (!ps_nice) | 94 | if (!ps_nice) |
95 | schedule_work(&ps_tq); | 95 | schedule_delayed_work(&ps_tq, 0); |
96 | else | 96 | else |
97 | schedule_delayed_work(&ps_tq, ps_nice-1); | 97 | schedule_delayed_work(&ps_tq, ps_nice-1); |
98 | spin_unlock_irqrestore(&ps_spinlock,flags); | 98 | spin_unlock_irqrestore(&ps_spinlock,flags); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 47d6975268f..54509eb3391 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -1244,9 +1244,10 @@ out: | |||
1244 | return IRQ_RETVAL(handled); | 1244 | return IRQ_RETVAL(handled); |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | static void carm_fsm_task (void *_data) | 1247 | static void carm_fsm_task (struct work_struct *work) |
1248 | { | 1248 | { |
1249 | struct carm_host *host = _data; | 1249 | struct carm_host *host = |
1250 | container_of(work, struct carm_host, fsm_task); | ||
1250 | unsigned long flags; | 1251 | unsigned long flags; |
1251 | unsigned int state; | 1252 | unsigned int state; |
1252 | int rc, i, next_dev; | 1253 | int rc, i, next_dev; |
@@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1619 | host->pdev = pdev; | 1620 | host->pdev = pdev; |
1620 | host->flags = pci_dac ? FL_DAC : 0; | 1621 | host->flags = pci_dac ? FL_DAC : 0; |
1621 | spin_lock_init(&host->lock); | 1622 | spin_lock_init(&host->lock); |
1622 | INIT_WORK(&host->fsm_task, carm_fsm_task, host); | 1623 | INIT_WORK(&host->fsm_task, carm_fsm_task); |
1623 | init_completion(&host->probe_comp); | 1624 | init_completion(&host->probe_comp); |
1624 | 1625 | ||
1625 | for (i = 0; i < ARRAY_SIZE(host->req); i++) | 1626 | for (i = 0; i < ARRAY_SIZE(host->req); i++) |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 0d5c73f0726..2098eff91e1 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
376 | int stalled_pipe); | 376 | int stalled_pipe); |
377 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | 377 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); |
378 | static void ub_reset_enter(struct ub_dev *sc, int try); | 378 | static void ub_reset_enter(struct ub_dev *sc, int try); |
379 | static void ub_reset_task(void *arg); | 379 | static void ub_reset_task(struct work_struct *work); |
380 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); | 380 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
381 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | 381 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
382 | struct ub_capacity *ret); | 382 | struct ub_capacity *ret); |
@@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try) | |||
1558 | schedule_work(&sc->reset_work); | 1558 | schedule_work(&sc->reset_work); |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static void ub_reset_task(void *arg) | 1561 | static void ub_reset_task(struct work_struct *work) |
1562 | { | 1562 | { |
1563 | struct ub_dev *sc = arg; | 1563 | struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); |
1564 | unsigned long flags; | 1564 | unsigned long flags; |
1565 | struct list_head *p; | 1565 | struct list_head *p; |
1566 | struct ub_lun *lun; | 1566 | struct ub_lun *lun; |
@@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf, | |||
2179 | usb_init_urb(&sc->work_urb); | 2179 | usb_init_urb(&sc->work_urb); |
2180 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | 2180 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); |
2181 | atomic_set(&sc->poison, 0); | 2181 | atomic_set(&sc->poison, 0); |
2182 | INIT_WORK(&sc->reset_work, ub_reset_task, sc); | 2182 | INIT_WORK(&sc->reset_work, ub_reset_task); |
2183 | init_waitqueue_head(&sc->reset_wait); | 2183 | init_waitqueue_head(&sc->reset_wait); |
2184 | 2184 | ||
2185 | init_timer(&sc->work_timer); | 2185 | init_timer(&sc->work_timer); |
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index 516751754aa..9256985cbe3 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c | |||
@@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb) | |||
157 | } | 157 | } |
158 | } | 158 | } |
159 | 159 | ||
160 | static void bcm203x_work(void *user_data) | 160 | static void bcm203x_work(struct work_struct *work) |
161 | { | 161 | { |
162 | struct bcm203x_data *data = user_data; | 162 | struct bcm203x_data *data = |
163 | container_of(work, struct bcm203x_data, work); | ||
163 | 164 | ||
164 | if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) | 165 | if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) |
165 | BT_ERR("Can't submit URB"); | 166 | BT_ERR("Can't submit URB"); |
@@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id | |||
246 | 247 | ||
247 | release_firmware(firmware); | 248 | release_firmware(firmware); |
248 | 249 | ||
249 | INIT_WORK(&data->work, bcm203x_work, (void *) data); | 250 | INIT_WORK(&data->work, bcm203x_work); |
250 | 251 | ||
251 | usb_set_intfdata(intf, data); | 252 | usb_set_intfdata(intf, data); |
252 | 253 | ||
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index e608dadece2..acb2de5e3a9 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event) | |||
926 | * had to poll every port to see if that port needed servicing. | 926 | * had to poll every port to see if that port needed servicing. |
927 | */ | 927 | */ |
928 | static void | 928 | static void |
929 | do_softint(void *private_) | 929 | do_softint(struct work_struct *work) |
930 | { | 930 | { |
931 | struct cyclades_port *info = (struct cyclades_port *) private_; | 931 | struct cyclades_port *info = |
932 | container_of(work, struct cyclades_port, tqueue); | ||
932 | struct tty_struct *tty; | 933 | struct tty_struct *tty; |
933 | 934 | ||
934 | tty = info->tty; | 935 | tty = info->tty; |
@@ -5328,7 +5329,7 @@ cy_init(void) | |||
5328 | info->blocked_open = 0; | 5329 | info->blocked_open = 0; |
5329 | info->default_threshold = 0; | 5330 | info->default_threshold = 0; |
5330 | info->default_timeout = 0; | 5331 | info->default_timeout = 0; |
5331 | INIT_WORK(&info->tqueue, do_softint, info); | 5332 | INIT_WORK(&info->tqueue, do_softint); |
5332 | init_waitqueue_head(&info->open_wait); | 5333 | init_waitqueue_head(&info->open_wait); |
5333 | init_waitqueue_head(&info->close_wait); | 5334 | init_waitqueue_head(&info->close_wait); |
5334 | init_waitqueue_head(&info->shutdown_wait); | 5335 | init_waitqueue_head(&info->shutdown_wait); |
@@ -5403,7 +5404,7 @@ cy_init(void) | |||
5403 | info->blocked_open = 0; | 5404 | info->blocked_open = 0; |
5404 | info->default_threshold = 0; | 5405 | info->default_threshold = 0; |
5405 | info->default_timeout = 0; | 5406 | info->default_timeout = 0; |
5406 | INIT_WORK(&info->tqueue, do_softint, info); | 5407 | INIT_WORK(&info->tqueue, do_softint); |
5407 | init_waitqueue_head(&info->open_wait); | 5408 | init_waitqueue_head(&info->open_wait); |
5408 | init_waitqueue_head(&info->close_wait); | 5409 | init_waitqueue_head(&info->close_wait); |
5409 | init_waitqueue_head(&info->shutdown_wait); | 5410 | init_waitqueue_head(&info->shutdown_wait); |
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c index 60c1695db30..806f9ce5f47 100644 --- a/drivers/char/drm/via_dmablit.c +++ b/drivers/char/drm/via_dmablit.c | |||
@@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data) | |||
500 | 500 | ||
501 | 501 | ||
502 | static void | 502 | static void |
503 | via_dmablit_workqueue(void *data) | 503 | via_dmablit_workqueue(struct work_struct *work) |
504 | { | 504 | { |
505 | drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; | 505 | drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); |
506 | drm_device_t *dev = blitq->dev; | 506 | drm_device_t *dev = blitq->dev; |
507 | unsigned long irqsave; | 507 | unsigned long irqsave; |
508 | drm_via_sg_info_t *cur_sg; | 508 | drm_via_sg_info_t *cur_sg; |
@@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev) | |||
571 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | 571 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); |
572 | } | 572 | } |
573 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); | 573 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); |
574 | INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); | 574 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); |
575 | init_timer(&blitq->poll_timer); | 575 | init_timer(&blitq->poll_timer); |
576 | blitq->poll_timer.function = &via_dmablit_timer; | 576 | blitq->poll_timer.function = &via_dmablit_timer; |
577 | blitq->poll_timer.data = (unsigned long) blitq; | 577 | blitq->poll_timer.data = (unsigned long) blitq; |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 706733c0b36..7c71eb77980 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *, | |||
200 | static int info_ioctl(struct tty_struct *, struct file *, | 200 | static int info_ioctl(struct tty_struct *, struct file *, |
201 | unsigned int, unsigned long); | 201 | unsigned int, unsigned long); |
202 | static void pc_set_termios(struct tty_struct *, struct termios *); | 202 | static void pc_set_termios(struct tty_struct *, struct termios *); |
203 | static void do_softint(void *); | 203 | static void do_softint(struct work_struct *work); |
204 | static void pc_stop(struct tty_struct *); | 204 | static void pc_stop(struct tty_struct *); |
205 | static void pc_start(struct tty_struct *); | 205 | static void pc_start(struct tty_struct *); |
206 | static void pc_throttle(struct tty_struct * tty); | 206 | static void pc_throttle(struct tty_struct * tty); |
@@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd) | |||
1505 | 1505 | ||
1506 | ch->brdchan = bc; | 1506 | ch->brdchan = bc; |
1507 | ch->mailbox = gd; | 1507 | ch->mailbox = gd; |
1508 | INIT_WORK(&ch->tqueue, do_softint, ch); | 1508 | INIT_WORK(&ch->tqueue, do_softint); |
1509 | ch->board = &boards[crd]; | 1509 | ch->board = &boards[crd]; |
1510 | 1510 | ||
1511 | spin_lock_irqsave(&epca_lock, flags); | 1511 | spin_lock_irqsave(&epca_lock, flags); |
@@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios) | |||
2566 | 2566 | ||
2567 | /* --------------------- Begin do_softint ----------------------- */ | 2567 | /* --------------------- Begin do_softint ----------------------- */ |
2568 | 2568 | ||
2569 | static void do_softint(void *private_) | 2569 | static void do_softint(struct work_struct *work) |
2570 | { /* Begin do_softint */ | 2570 | { /* Begin do_softint */ |
2571 | struct channel *ch = (struct channel *) private_; | 2571 | struct channel *ch = container_of(work, struct channel, tqueue); |
2572 | /* Called in response to a modem change event */ | 2572 | /* Called in response to a modem change event */ |
2573 | if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ | 2573 | if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ |
2574 | struct tty_struct *tty = ch->tty; | 2574 | struct tty_struct *tty = ch->tty; |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 15a4ea89632..93b55196251 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
723 | * ------------------------------------------------------------------- | 723 | * ------------------------------------------------------------------- |
724 | */ | 724 | */ |
725 | 725 | ||
726 | static void do_softint(void *private_) | 726 | static void do_softint(struct work_struct *work) |
727 | { | 727 | { |
728 | struct esp_struct *info = (struct esp_struct *) private_; | 728 | struct esp_struct *info = |
729 | container_of(work, struct esp_struct, tqueue); | ||
729 | struct tty_struct *tty; | 730 | struct tty_struct *tty; |
730 | 731 | ||
731 | tty = info->tty; | 732 | tty = info->tty; |
@@ -746,9 +747,10 @@ static void do_softint(void *private_) | |||
746 | * do_serial_hangup() -> tty->hangup() -> esp_hangup() | 747 | * do_serial_hangup() -> tty->hangup() -> esp_hangup() |
747 | * | 748 | * |
748 | */ | 749 | */ |
749 | static void do_serial_hangup(void *private_) | 750 | static void do_serial_hangup(struct work_struct *work) |
750 | { | 751 | { |
751 | struct esp_struct *info = (struct esp_struct *) private_; | 752 | struct esp_struct *info = |
753 | container_of(work, struct esp_struct, tqueue_hangup); | ||
752 | struct tty_struct *tty; | 754 | struct tty_struct *tty; |
753 | 755 | ||
754 | tty = info->tty; | 756 | tty = info->tty; |
@@ -2501,8 +2503,8 @@ static int __init espserial_init(void) | |||
2501 | info->magic = ESP_MAGIC; | 2503 | info->magic = ESP_MAGIC; |
2502 | info->close_delay = 5*HZ/10; | 2504 | info->close_delay = 5*HZ/10; |
2503 | info->closing_wait = 30*HZ; | 2505 | info->closing_wait = 30*HZ; |
2504 | INIT_WORK(&info->tqueue, do_softint, info); | 2506 | INIT_WORK(&info->tqueue, do_softint); |
2505 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); | 2507 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup); |
2506 | info->config.rx_timeout = rx_timeout; | 2508 | info->config.rx_timeout = rx_timeout; |
2507 | info->config.flow_on = flow_on; | 2509 | info->config.flow_on = flow_on; |
2508 | info->config.flow_off = flow_off; | 2510 | info->config.flow_off = flow_off; |
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index 817dc409ac2..23b25ada65e 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c | |||
@@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg); | |||
102 | * Routine to poll RTC seconds field for change as often as possible, | 102 | * Routine to poll RTC seconds field for change as often as possible, |
103 | * after first RTC_UIE use timer to reduce polling | 103 | * after first RTC_UIE use timer to reduce polling |
104 | */ | 104 | */ |
105 | static void genrtc_troutine(void *data) | 105 | static void genrtc_troutine(struct work_struct *work) |
106 | { | 106 | { |
107 | unsigned int tmp = get_rtc_ss(); | 107 | unsigned int tmp = get_rtc_ss(); |
108 | 108 | ||
@@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit) | |||
255 | irq_active = 1; | 255 | irq_active = 1; |
256 | stop_rtc_timers = 0; | 256 | stop_rtc_timers = 0; |
257 | lostint = 0; | 257 | lostint = 0; |
258 | INIT_WORK(&genrtc_task, genrtc_troutine, NULL); | 258 | INIT_WORK(&genrtc_task, genrtc_troutine); |
259 | oldsecs = get_rtc_ss(); | 259 | oldsecs = get_rtc_ss(); |
260 | init_timer(&timer_task); | 260 | init_timer(&timer_task); |
261 | 261 | ||
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 2cf63e7305a..82a41d5b4ed 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) | 69 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) |
70 | 70 | ||
71 | struct hvsi_struct { | 71 | struct hvsi_struct { |
72 | struct work_struct writer; | 72 | struct delayed_work writer; |
73 | struct work_struct handshaker; | 73 | struct work_struct handshaker; |
74 | wait_queue_head_t emptyq; /* woken when outbuf is emptied */ | 74 | wait_queue_head_t emptyq; /* woken when outbuf is emptied */ |
75 | wait_queue_head_t stateq; /* woken when HVSI state changes */ | 75 | wait_queue_head_t stateq; /* woken when HVSI state changes */ |
@@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp) | |||
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | 746 | ||
747 | static void hvsi_handshaker(void *arg) | 747 | static void hvsi_handshaker(struct work_struct *work) |
748 | { | 748 | { |
749 | struct hvsi_struct *hp = (struct hvsi_struct *)arg; | 749 | struct hvsi_struct *hp = |
750 | container_of(work, struct hvsi_struct, handshaker); | ||
750 | 751 | ||
751 | if (hvsi_handshake(hp) >= 0) | 752 | if (hvsi_handshake(hp) >= 0) |
752 | return; | 753 | return; |
@@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp) | |||
951 | } | 952 | } |
952 | 953 | ||
953 | /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ | 954 | /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ |
954 | static void hvsi_write_worker(void *arg) | 955 | static void hvsi_write_worker(struct work_struct *work) |
955 | { | 956 | { |
956 | struct hvsi_struct *hp = (struct hvsi_struct *)arg; | 957 | struct hvsi_struct *hp = |
958 | container_of(work, struct hvsi_struct, writer.work); | ||
957 | unsigned long flags; | 959 | unsigned long flags; |
958 | #ifdef DEBUG | 960 | #ifdef DEBUG |
959 | static long start_j = 0; | 961 | static long start_j = 0; |
@@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void) | |||
1287 | } | 1289 | } |
1288 | 1290 | ||
1289 | hp = &hvsi_ports[hvsi_count]; | 1291 | hp = &hvsi_ports[hvsi_count]; |
1290 | INIT_WORK(&hp->writer, hvsi_write_worker, hp); | 1292 | INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); |
1291 | INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); | 1293 | INIT_WORK(&hp->handshaker, hvsi_handshaker); |
1292 | init_waitqueue_head(&hp->emptyq); | 1294 | init_waitqueue_head(&hp->emptyq); |
1293 | init_waitqueue_head(&hp->stateq); | 1295 | init_waitqueue_head(&hp->stateq); |
1294 | spin_lock_init(&hp->lock); | 1296 | spin_lock_init(&hp->lock); |
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index 54d93f0345e..c213fdbdb2b 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
@@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr); | |||
84 | static void serviceOutgoingFifo(i2eBordStrPtr); | 84 | static void serviceOutgoingFifo(i2eBordStrPtr); |
85 | 85 | ||
86 | // Functions defined in ip2.c as part of interrupt handling | 86 | // Functions defined in ip2.c as part of interrupt handling |
87 | static void do_input(void *); | 87 | static void do_input(struct work_struct *); |
88 | static void do_status(void *); | 88 | static void do_status(struct work_struct *); |
89 | 89 | ||
90 | //*************** | 90 | //*************** |
91 | //* Debug Data * | 91 | //* Debug Data * |
@@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) | |||
331 | pCh->ClosingWaitTime = 30*HZ; | 331 | pCh->ClosingWaitTime = 30*HZ; |
332 | 332 | ||
333 | // Initialize task queue objects | 333 | // Initialize task queue objects |
334 | INIT_WORK(&pCh->tqueue_input, do_input, pCh); | 334 | INIT_WORK(&pCh->tqueue_input, do_input); |
335 | INIT_WORK(&pCh->tqueue_status, do_status, pCh); | 335 | INIT_WORK(&pCh->tqueue_status, do_status); |
336 | 336 | ||
337 | #ifdef IP2DEBUG_TRACE | 337 | #ifdef IP2DEBUG_TRACE |
338 | pCh->trace = ip2trace; | 338 | pCh->trace = ip2trace; |
@@ -1573,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB) | |||
1573 | #ifdef USE_IQ | 1573 | #ifdef USE_IQ |
1574 | schedule_work(&pCh->tqueue_input); | 1574 | schedule_work(&pCh->tqueue_input); |
1575 | #else | 1575 | #else |
1576 | do_input(pCh); | 1576 | do_input(&pCh->tqueue_input); |
1577 | #endif | 1577 | #endif |
1578 | 1578 | ||
1579 | // Note we do not need to maintain any flow-control credits at this | 1579 | // Note we do not need to maintain any flow-control credits at this |
@@ -1810,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB) | |||
1810 | #ifdef USE_IQ | 1810 | #ifdef USE_IQ |
1811 | schedule_work(&pCh->tqueue_status); | 1811 | schedule_work(&pCh->tqueue_status); |
1812 | #else | 1812 | #else |
1813 | do_status(pCh); | 1813 | do_status(&pCh->tqueue_status); |
1814 | #endif | 1814 | #endif |
1815 | } | 1815 | } |
1816 | } | 1816 | } |
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index a3f32d46d2f..cda2459c1d6 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
@@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file, | |||
189 | unsigned int set, unsigned int clear); | 189 | unsigned int set, unsigned int clear); |
190 | 190 | ||
191 | static void set_irq(int, int); | 191 | static void set_irq(int, int); |
192 | static void ip2_interrupt_bh(i2eBordStrPtr pB); | 192 | static void ip2_interrupt_bh(struct work_struct *work); |
193 | static irqreturn_t ip2_interrupt(int irq, void *dev_id); | 193 | static irqreturn_t ip2_interrupt(int irq, void *dev_id); |
194 | static void ip2_poll(unsigned long arg); | 194 | static void ip2_poll(unsigned long arg); |
195 | static inline void service_all_boards(void); | 195 | static inline void service_all_boards(void); |
196 | static void do_input(void *p); | 196 | static void do_input(struct work_struct *); |
197 | static void do_status(void *p); | 197 | static void do_status(struct work_struct *); |
198 | 198 | ||
199 | static void ip2_wait_until_sent(PTTY,int); | 199 | static void ip2_wait_until_sent(PTTY,int); |
200 | 200 | ||
@@ -918,7 +918,7 @@ ip2_init_board( int boardnum ) | |||
918 | pCh++; | 918 | pCh++; |
919 | } | 919 | } |
920 | ex_exit: | 920 | ex_exit: |
921 | INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); | 921 | INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh); |
922 | return; | 922 | return; |
923 | 923 | ||
924 | err_release_region: | 924 | err_release_region: |
@@ -1125,8 +1125,8 @@ service_all_boards(void) | |||
1125 | 1125 | ||
1126 | 1126 | ||
1127 | /******************************************************************************/ | 1127 | /******************************************************************************/ |
1128 | /* Function: ip2_interrupt_bh(pB) */ | 1128 | /* Function: ip2_interrupt_bh(work) */ |
1129 | /* Parameters: pB - pointer to the board structure */ | 1129 | /* Parameters: work - pointer to the board structure */ |
1130 | /* Returns: Nothing */ | 1130 | /* Returns: Nothing */ |
1131 | /* */ | 1131 | /* */ |
1132 | /* Description: */ | 1132 | /* Description: */ |
@@ -1135,8 +1135,9 @@ service_all_boards(void) | |||
1135 | /* */ | 1135 | /* */ |
1136 | /******************************************************************************/ | 1136 | /******************************************************************************/ |
1137 | static void | 1137 | static void |
1138 | ip2_interrupt_bh(i2eBordStrPtr pB) | 1138 | ip2_interrupt_bh(struct work_struct *work) |
1139 | { | 1139 | { |
1140 | i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt); | ||
1140 | // pB better well be set or we have a problem! We can only get | 1141 | // pB better well be set or we have a problem! We can only get |
1141 | // here from the IMMEDIATE queue. Here, we process the boards. | 1142 | // here from the IMMEDIATE queue. Here, we process the boards. |
1142 | // Checking pB doesn't cost much and it saves us from the sanity checkers. | 1143 | // Checking pB doesn't cost much and it saves us from the sanity checkers. |
@@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg) | |||
1245 | ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); | 1246 | ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); |
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | static void do_input(void *p) | 1249 | static void do_input(struct work_struct *work) |
1249 | { | 1250 | { |
1250 | i2ChanStrPtr pCh = p; | 1251 | i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input); |
1251 | unsigned long flags; | 1252 | unsigned long flags; |
1252 | 1253 | ||
1253 | ip2trace(CHANN, ITRC_INPUT, 21, 0 ); | 1254 | ip2trace(CHANN, ITRC_INPUT, 21, 0 ); |
@@ -1279,9 +1280,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush) | |||
1279 | } | 1280 | } |
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | static void do_status(void *p) | 1283 | static void do_status(struct work_struct *work) |
1283 | { | 1284 | { |
1284 | i2ChanStrPtr pCh = p; | 1285 | i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status); |
1285 | int status; | 1286 | int status; |
1286 | 1287 | ||
1287 | status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); | 1288 | status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 58c955e390b..1637c1d9a4b 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -530,9 +530,9 @@ sched_again: | |||
530 | /* Interrupt handlers */ | 530 | /* Interrupt handlers */ |
531 | 531 | ||
532 | 532 | ||
533 | static void isicom_bottomhalf(void *data) | 533 | static void isicom_bottomhalf(struct work_struct *work) |
534 | { | 534 | { |
535 | struct isi_port *port = (struct isi_port *) data; | 535 | struct isi_port *port = container_of(work, struct isi_port, bh_tqueue); |
536 | struct tty_struct *tty = port->tty; | 536 | struct tty_struct *tty = port->tty; |
537 | 537 | ||
538 | if (!tty) | 538 | if (!tty) |
@@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty) | |||
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | /* hangup et all */ | 1476 | /* hangup et all */ |
1477 | static void do_isicom_hangup(void *data) | 1477 | static void do_isicom_hangup(struct work_struct *work) |
1478 | { | 1478 | { |
1479 | struct isi_port *port = data; | 1479 | struct isi_port *port = container_of(work, struct isi_port, hangup_tq); |
1480 | struct tty_struct *tty; | 1480 | struct tty_struct *tty; |
1481 | 1481 | ||
1482 | tty = port->tty; | 1482 | tty = port->tty; |
@@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void) | |||
1966 | port->channel = channel; | 1966 | port->channel = channel; |
1967 | port->close_delay = 50 * HZ/100; | 1967 | port->close_delay = 50 * HZ/100; |
1968 | port->closing_wait = 3000 * HZ/100; | 1968 | port->closing_wait = 3000 * HZ/100; |
1969 | INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); | 1969 | INIT_WORK(&port->hangup_tq, do_isicom_hangup); |
1970 | INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); | 1970 | INIT_WORK(&port->bh_tqueue, isicom_bottomhalf); |
1971 | port->status = 0; | 1971 | port->status = 0; |
1972 | init_waitqueue_head(&port->open_wait); | 1972 | init_waitqueue_head(&port->open_wait); |
1973 | init_waitqueue_head(&port->close_wait); | 1973 | init_waitqueue_head(&port->close_wait); |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 96cb1f07332..2d025a9fd14 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem; | |||
222 | /* | 222 | /* |
223 | * static functions: | 223 | * static functions: |
224 | */ | 224 | */ |
225 | static void do_moxa_softint(void *); | 225 | static void do_moxa_softint(struct work_struct *); |
226 | static int moxa_open(struct tty_struct *, struct file *); | 226 | static int moxa_open(struct tty_struct *, struct file *); |
227 | static void moxa_close(struct tty_struct *, struct file *); | 227 | static void moxa_close(struct tty_struct *, struct file *); |
228 | static int moxa_write(struct tty_struct *, const unsigned char *, int); | 228 | static int moxa_write(struct tty_struct *, const unsigned char *, int); |
@@ -363,7 +363,7 @@ static int __init moxa_init(void) | |||
363 | for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { | 363 | for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { |
364 | ch->type = PORT_16550A; | 364 | ch->type = PORT_16550A; |
365 | ch->port = i; | 365 | ch->port = i; |
366 | INIT_WORK(&ch->tqueue, do_moxa_softint, ch); | 366 | INIT_WORK(&ch->tqueue, do_moxa_softint); |
367 | ch->tty = NULL; | 367 | ch->tty = NULL; |
368 | ch->close_delay = 5 * HZ / 10; | 368 | ch->close_delay = 5 * HZ / 10; |
369 | ch->closing_wait = 30 * HZ; | 369 | ch->closing_wait = 30 * HZ; |
@@ -509,9 +509,9 @@ static void __exit moxa_exit(void) | |||
509 | module_init(moxa_init); | 509 | module_init(moxa_init); |
510 | module_exit(moxa_exit); | 510 | module_exit(moxa_exit); |
511 | 511 | ||
512 | static void do_moxa_softint(void *private_) | 512 | static void do_moxa_softint(struct work_struct *work) |
513 | { | 513 | { |
514 | struct moxa_str *ch = (struct moxa_str *) private_; | 514 | struct moxa_str *ch = container_of(work, struct moxa_str, tqueue); |
515 | struct tty_struct *tty; | 515 | struct tty_struct *tty; |
516 | 516 | ||
517 | if (ch && (tty = ch->tty)) { | 517 | if (ch && (tty = ch->tty)) { |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 048d91142c1..5ed2486b758 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -389,7 +389,7 @@ static int mxser_init(void); | |||
389 | /* static void mxser_poll(unsigned long); */ | 389 | /* static void mxser_poll(unsigned long); */ |
390 | static int mxser_get_ISA_conf(int, struct mxser_hwconf *); | 390 | static int mxser_get_ISA_conf(int, struct mxser_hwconf *); |
391 | static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); | 391 | static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); |
392 | static void mxser_do_softint(void *); | 392 | static void mxser_do_softint(struct work_struct *); |
393 | static int mxser_open(struct tty_struct *, struct file *); | 393 | static int mxser_open(struct tty_struct *, struct file *); |
394 | static void mxser_close(struct tty_struct *, struct file *); | 394 | static void mxser_close(struct tty_struct *, struct file *); |
395 | static int mxser_write(struct tty_struct *, const unsigned char *, int); | 395 | static int mxser_write(struct tty_struct *, const unsigned char *, int); |
@@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf) | |||
590 | info->custom_divisor = hwconf->baud_base[i] * 16; | 590 | info->custom_divisor = hwconf->baud_base[i] * 16; |
591 | info->close_delay = 5 * HZ / 10; | 591 | info->close_delay = 5 * HZ / 10; |
592 | info->closing_wait = 30 * HZ; | 592 | info->closing_wait = 30 * HZ; |
593 | INIT_WORK(&info->tqueue, mxser_do_softint, info); | 593 | INIT_WORK(&info->tqueue, mxser_do_softint); |
594 | info->normal_termios = mxvar_sdriver->init_termios; | 594 | info->normal_termios = mxvar_sdriver->init_termios; |
595 | init_waitqueue_head(&info->open_wait); | 595 | init_waitqueue_head(&info->open_wait); |
596 | init_waitqueue_head(&info->close_wait); | 596 | init_waitqueue_head(&info->close_wait); |
@@ -917,9 +917,10 @@ static int mxser_init(void) | |||
917 | return 0; | 917 | return 0; |
918 | } | 918 | } |
919 | 919 | ||
920 | static void mxser_do_softint(void *private_) | 920 | static void mxser_do_softint(struct work_struct *work) |
921 | { | 921 | { |
922 | struct mxser_struct *info = private_; | 922 | struct mxser_struct *info = |
923 | container_of(work, struct mxser_struct, tqueue); | ||
923 | struct tty_struct *tty; | 924 | struct tty_struct *tty; |
924 | 925 | ||
925 | tty = info->tty; | 926 | tty = info->tty; |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index f9f72500ea5..1bd12296dca 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -421,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id); | |||
421 | /* | 421 | /* |
422 | * Bottom half interrupt handlers | 422 | * Bottom half interrupt handlers |
423 | */ | 423 | */ |
424 | static void bh_handler(void* Context); | 424 | static void bh_handler(struct work_struct *work); |
425 | static void bh_transmit(MGSLPC_INFO *info); | 425 | static void bh_transmit(MGSLPC_INFO *info); |
426 | static void bh_status(MGSLPC_INFO *info); | 426 | static void bh_status(MGSLPC_INFO *info); |
427 | 427 | ||
@@ -547,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
547 | 547 | ||
548 | memset(info, 0, sizeof(MGSLPC_INFO)); | 548 | memset(info, 0, sizeof(MGSLPC_INFO)); |
549 | info->magic = MGSLPC_MAGIC; | 549 | info->magic = MGSLPC_MAGIC; |
550 | INIT_WORK(&info->task, bh_handler, info); | 550 | INIT_WORK(&info->task, bh_handler); |
551 | info->max_frame_size = 4096; | 551 | info->max_frame_size = 4096; |
552 | info->close_delay = 5*HZ/10; | 552 | info->close_delay = 5*HZ/10; |
553 | info->closing_wait = 30*HZ; | 553 | info->closing_wait = 30*HZ; |
@@ -835,9 +835,9 @@ static int bh_action(MGSLPC_INFO *info) | |||
835 | return rc; | 835 | return rc; |
836 | } | 836 | } |
837 | 837 | ||
838 | static void bh_handler(void* Context) | 838 | static void bh_handler(struct work_struct *work) |
839 | { | 839 | { |
840 | MGSLPC_INFO *info = (MGSLPC_INFO*)Context; | 840 | MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); |
841 | int action; | 841 | int action; |
842 | 842 | ||
843 | if (!info) | 843 | if (!info) |
diff --git a/drivers/char/random.c b/drivers/char/random.c index d40df30c2b1..4c6782a1ecd 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1422,9 +1422,9 @@ static struct keydata { | |||
1422 | 1422 | ||
1423 | static unsigned int ip_cnt; | 1423 | static unsigned int ip_cnt; |
1424 | 1424 | ||
1425 | static void rekey_seq_generator(void *private_); | 1425 | static void rekey_seq_generator(struct work_struct *work); |
1426 | 1426 | ||
1427 | static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); | 1427 | static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); |
1428 | 1428 | ||
1429 | /* | 1429 | /* |
1430 | * Lock avoidance: | 1430 | * Lock avoidance: |
@@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); | |||
1438 | * happen, and even if that happens only a not perfectly compliant | 1438 | * happen, and even if that happens only a not perfectly compliant |
1439 | * ISN is generated, nothing fatal. | 1439 | * ISN is generated, nothing fatal. |
1440 | */ | 1440 | */ |
1441 | static void rekey_seq_generator(void *private_) | 1441 | static void rekey_seq_generator(struct work_struct *work) |
1442 | { | 1442 | { |
1443 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; | 1443 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; |
1444 | 1444 | ||
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index c084149153d..fc87070f186 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c | |||
@@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state) | |||
765 | sonypi_device.bluetooth_power = state; | 765 | sonypi_device.bluetooth_power = state; |
766 | } | 766 | } |
767 | 767 | ||
768 | static void input_keyrelease(void *data) | 768 | static void input_keyrelease(struct work_struct *work) |
769 | { | 769 | { |
770 | struct sonypi_keypress kp; | 770 | struct sonypi_keypress kp; |
771 | 771 | ||
@@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev) | |||
1412 | goto err_inpdev_unregister; | 1412 | goto err_inpdev_unregister; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); | 1415 | INIT_WORK(&sonypi_device.input_work, input_keyrelease); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | sonypi_enable(0); | 1418 | sonypi_enable(0); |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 7e1bd9562c2..99137ab66b6 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty) | |||
2261 | * do_sx_hangup() -> tty->hangup() -> sx_hangup() | 2261 | * do_sx_hangup() -> tty->hangup() -> sx_hangup() |
2262 | * | 2262 | * |
2263 | */ | 2263 | */ |
2264 | static void do_sx_hangup(void *private_) | 2264 | static void do_sx_hangup(struct work_struct *work) |
2265 | { | 2265 | { |
2266 | struct specialix_port *port = (struct specialix_port *) private_; | 2266 | struct specialix_port *port = |
2267 | container_of(work, struct specialix_port, tqueue_hangup); | ||
2267 | struct tty_struct *tty; | 2268 | struct tty_struct *tty; |
2268 | 2269 | ||
2269 | func_enter(); | 2270 | func_enter(); |
@@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios | |||
2336 | } | 2337 | } |
2337 | 2338 | ||
2338 | 2339 | ||
2339 | static void do_softint(void *private_) | 2340 | static void do_softint(struct work_struct *work) |
2340 | { | 2341 | { |
2341 | struct specialix_port *port = (struct specialix_port *) private_; | 2342 | struct specialix_port *port = |
2343 | container_of(work, struct specialix_port, tqueue); | ||
2342 | struct tty_struct *tty; | 2344 | struct tty_struct *tty; |
2343 | 2345 | ||
2344 | func_enter(); | 2346 | func_enter(); |
@@ -2411,8 +2413,8 @@ static int sx_init_drivers(void) | |||
2411 | memset(sx_port, 0, sizeof(sx_port)); | 2413 | memset(sx_port, 0, sizeof(sx_port)); |
2412 | for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { | 2414 | for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { |
2413 | sx_port[i].magic = SPECIALIX_MAGIC; | 2415 | sx_port[i].magic = SPECIALIX_MAGIC; |
2414 | INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); | 2416 | INIT_WORK(&sx_port[i].tqueue, do_softint); |
2415 | INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); | 2417 | INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup); |
2416 | sx_port[i].close_delay = 50 * HZ/100; | 2418 | sx_port[i].close_delay = 50 * HZ/100; |
2417 | sx_port[i].closing_wait = 3000 * HZ/100; | 2419 | sx_port[i].closing_wait = 3000 * HZ/100; |
2418 | init_waitqueue_head(&sx_port[i].open_wait); | 2420 | init_waitqueue_head(&sx_port[i].open_wait); |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 06784adcc35..147c30da81e 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -802,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u | |||
802 | /* | 802 | /* |
803 | * Bottom half interrupt handlers | 803 | * Bottom half interrupt handlers |
804 | */ | 804 | */ |
805 | static void mgsl_bh_handler(void* Context); | 805 | static void mgsl_bh_handler(struct work_struct *work); |
806 | static void mgsl_bh_receive(struct mgsl_struct *info); | 806 | static void mgsl_bh_receive(struct mgsl_struct *info); |
807 | static void mgsl_bh_transmit(struct mgsl_struct *info); | 807 | static void mgsl_bh_transmit(struct mgsl_struct *info); |
808 | static void mgsl_bh_status(struct mgsl_struct *info); | 808 | static void mgsl_bh_status(struct mgsl_struct *info); |
@@ -1071,9 +1071,10 @@ static int mgsl_bh_action(struct mgsl_struct *info) | |||
1071 | /* | 1071 | /* |
1072 | * Perform bottom half processing of work items queued by ISR. | 1072 | * Perform bottom half processing of work items queued by ISR. |
1073 | */ | 1073 | */ |
1074 | static void mgsl_bh_handler(void* Context) | 1074 | static void mgsl_bh_handler(struct work_struct *work) |
1075 | { | 1075 | { |
1076 | struct mgsl_struct *info = (struct mgsl_struct*)Context; | 1076 | struct mgsl_struct *info = |
1077 | container_of(work, struct mgsl_struct, task); | ||
1077 | int action; | 1078 | int action; |
1078 | 1079 | ||
1079 | if (!info) | 1080 | if (!info) |
@@ -4337,7 +4338,7 @@ static struct mgsl_struct* mgsl_allocate_device(void) | |||
4337 | } else { | 4338 | } else { |
4338 | memset(info, 0, sizeof(struct mgsl_struct)); | 4339 | memset(info, 0, sizeof(struct mgsl_struct)); |
4339 | info->magic = MGSL_MAGIC; | 4340 | info->magic = MGSL_MAGIC; |
4340 | INIT_WORK(&info->task, mgsl_bh_handler, info); | 4341 | INIT_WORK(&info->task, mgsl_bh_handler); |
4341 | info->max_frame_size = 4096; | 4342 | info->max_frame_size = 4096; |
4342 | info->close_delay = 5*HZ/10; | 4343 | info->close_delay = 5*HZ/10; |
4343 | info->closing_wait = 30*HZ; | 4344 | info->closing_wait = 30*HZ; |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index d4334c79f8d..07f34d43dc7 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -485,7 +485,7 @@ static void enable_loopback(struct slgt_info *info); | |||
485 | static void set_rate(struct slgt_info *info, u32 data_rate); | 485 | static void set_rate(struct slgt_info *info, u32 data_rate); |
486 | 486 | ||
487 | static int bh_action(struct slgt_info *info); | 487 | static int bh_action(struct slgt_info *info); |
488 | static void bh_handler(void* context); | 488 | static void bh_handler(struct work_struct *work); |
489 | static void bh_transmit(struct slgt_info *info); | 489 | static void bh_transmit(struct slgt_info *info); |
490 | static void isr_serial(struct slgt_info *info); | 490 | static void isr_serial(struct slgt_info *info); |
491 | static void isr_rdma(struct slgt_info *info); | 491 | static void isr_rdma(struct slgt_info *info); |
@@ -1878,9 +1878,9 @@ static int bh_action(struct slgt_info *info) | |||
1878 | /* | 1878 | /* |
1879 | * perform bottom half processing | 1879 | * perform bottom half processing |
1880 | */ | 1880 | */ |
1881 | static void bh_handler(void* context) | 1881 | static void bh_handler(struct work_struct *work) |
1882 | { | 1882 | { |
1883 | struct slgt_info *info = context; | 1883 | struct slgt_info *info = container_of(work, struct slgt_info, task); |
1884 | int action; | 1884 | int action; |
1885 | 1885 | ||
1886 | if (!info) | 1886 | if (!info) |
@@ -3326,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev | |||
3326 | } else { | 3326 | } else { |
3327 | memset(info, 0, sizeof(struct slgt_info)); | 3327 | memset(info, 0, sizeof(struct slgt_info)); |
3328 | info->magic = MGSL_MAGIC; | 3328 | info->magic = MGSL_MAGIC; |
3329 | INIT_WORK(&info->task, bh_handler, info); | 3329 | INIT_WORK(&info->task, bh_handler); |
3330 | info->max_frame_size = 4096; | 3330 | info->max_frame_size = 4096; |
3331 | info->raw_rx_size = DMABUFSIZE; | 3331 | info->raw_rx_size = DMABUFSIZE; |
3332 | info->close_delay = 5*HZ/10; | 3332 | info->close_delay = 5*HZ/10; |
@@ -4799,6 +4799,6 @@ static void rx_timeout(unsigned long context) | |||
4799 | spin_lock_irqsave(&info->lock, flags); | 4799 | spin_lock_irqsave(&info->lock, flags); |
4800 | info->pending_bh |= BH_RECEIVE; | 4800 | info->pending_bh |= BH_RECEIVE; |
4801 | spin_unlock_irqrestore(&info->lock, flags); | 4801 | spin_unlock_irqrestore(&info->lock, flags); |
4802 | bh_handler(info); | 4802 | bh_handler(&info->task); |
4803 | } | 4803 | } |
4804 | 4804 | ||
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 3e932b68137..13a57245cf2 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -602,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable); | |||
602 | static void set_rate(SLMP_INFO *info, u32 data_rate); | 602 | static void set_rate(SLMP_INFO *info, u32 data_rate); |
603 | 603 | ||
604 | static int bh_action(SLMP_INFO *info); | 604 | static int bh_action(SLMP_INFO *info); |
605 | static void bh_handler(void* Context); | 605 | static void bh_handler(struct work_struct *work); |
606 | static void bh_receive(SLMP_INFO *info); | 606 | static void bh_receive(SLMP_INFO *info); |
607 | static void bh_transmit(SLMP_INFO *info); | 607 | static void bh_transmit(SLMP_INFO *info); |
608 | static void bh_status(SLMP_INFO *info); | 608 | static void bh_status(SLMP_INFO *info); |
@@ -2063,9 +2063,9 @@ int bh_action(SLMP_INFO *info) | |||
2063 | 2063 | ||
2064 | /* Perform bottom half processing of work items queued by ISR. | 2064 | /* Perform bottom half processing of work items queued by ISR. |
2065 | */ | 2065 | */ |
2066 | void bh_handler(void* Context) | 2066 | void bh_handler(struct work_struct *work) |
2067 | { | 2067 | { |
2068 | SLMP_INFO *info = (SLMP_INFO*)Context; | 2068 | SLMP_INFO *info = container_of(work, SLMP_INFO, task); |
2069 | int action; | 2069 | int action; |
2070 | 2070 | ||
2071 | if (!info) | 2071 | if (!info) |
@@ -3805,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) | |||
3805 | } else { | 3805 | } else { |
3806 | memset(info, 0, sizeof(SLMP_INFO)); | 3806 | memset(info, 0, sizeof(SLMP_INFO)); |
3807 | info->magic = MGSL_MAGIC; | 3807 | info->magic = MGSL_MAGIC; |
3808 | INIT_WORK(&info->task, bh_handler, info); | 3808 | INIT_WORK(&info->task, bh_handler); |
3809 | info->max_frame_size = 4096; | 3809 | info->max_frame_size = 4096; |
3810 | info->close_delay = 5*HZ/10; | 3810 | info->close_delay = 5*HZ/10; |
3811 | info->closing_wait = 30*HZ; | 3811 | info->closing_wait = 30*HZ; |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 5f49280779f..c64f5bcff94 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = { | |||
219 | .enable_mask = SYSRQ_ENABLE_SIGNAL, | 219 | .enable_mask = SYSRQ_ENABLE_SIGNAL, |
220 | }; | 220 | }; |
221 | 221 | ||
222 | static void moom_callback(void *ignored) | 222 | static void moom_callback(struct work_struct *ignored) |
223 | { | 223 | { |
224 | out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], | 224 | out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], |
225 | GFP_KERNEL, 0); | 225 | GFP_KERNEL, 0); |
226 | } | 226 | } |
227 | 227 | ||
228 | static DECLARE_WORK(moom_work, moom_callback, NULL); | 228 | static DECLARE_WORK(moom_work, moom_callback); |
229 | 229 | ||
230 | static void sysrq_handle_moom(int key, struct tty_struct *tty) | 230 | static void sysrq_handle_moom(int key, struct tty_struct *tty) |
231 | { | 231 | { |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 6e1329d404d..774fa861169 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr) | |||
325 | schedule_work(&chip->work); | 325 | schedule_work(&chip->work); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void timeout_work(void *ptr) | 328 | static void timeout_work(struct work_struct *work) |
329 | { | 329 | { |
330 | struct tpm_chip *chip = ptr; | 330 | struct tpm_chip *chip = container_of(work, struct tpm_chip, work); |
331 | 331 | ||
332 | down(&chip->buffer_mutex); | 332 | down(&chip->buffer_mutex); |
333 | atomic_set(&chip->data_pending, 0); | 333 | atomic_set(&chip->data_pending, 0); |
@@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend | |||
1105 | init_MUTEX(&chip->tpm_mutex); | 1105 | init_MUTEX(&chip->tpm_mutex); |
1106 | INIT_LIST_HEAD(&chip->list); | 1106 | INIT_LIST_HEAD(&chip->list); |
1107 | 1107 | ||
1108 | INIT_WORK(&chip->work, timeout_work, chip); | 1108 | INIT_WORK(&chip->work, timeout_work); |
1109 | 1109 | ||
1110 | init_timer(&chip->user_read_timer); | 1110 | init_timer(&chip->user_read_timer); |
1111 | chip->user_read_timer.function = user_reader_timeout; | 1111 | chip->user_read_timer.function = user_reader_timeout; |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 50dc49205a2..b3cfc8bc613 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); | |||
1254 | 1254 | ||
1255 | /** | 1255 | /** |
1256 | * do_tty_hangup - actual handler for hangup events | 1256 | * do_tty_hangup - actual handler for hangup events |
1257 | * @data: tty device | 1257 | * @work: tty device |
1258 | * | 1258 | * |
1259 | * This can be called by the "eventd" kernel thread. That is process | 1259 | * This can be called by the "eventd" kernel thread. That is process |
1260 | * synchronous but doesn't hold any locks, so we need to make sure we | 1260 | * synchronous but doesn't hold any locks, so we need to make sure we |
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); | |||
1274 | * tasklist_lock to walk task list for hangup event | 1274 | * tasklist_lock to walk task list for hangup event |
1275 | * | 1275 | * |
1276 | */ | 1276 | */ |
1277 | static void do_tty_hangup(void *data) | 1277 | static void do_tty_hangup(struct work_struct *work) |
1278 | { | 1278 | { |
1279 | struct tty_struct *tty = (struct tty_struct *) data; | 1279 | struct tty_struct *tty = |
1280 | container_of(work, struct tty_struct, hangup_work); | ||
1280 | struct file * cons_filp = NULL; | 1281 | struct file * cons_filp = NULL; |
1281 | struct file *filp, *f = NULL; | 1282 | struct file *filp, *f = NULL; |
1282 | struct task_struct *p; | 1283 | struct task_struct *p; |
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty) | |||
1433 | 1434 | ||
1434 | printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); | 1435 | printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); |
1435 | #endif | 1436 | #endif |
1436 | do_tty_hangup((void *) tty); | 1437 | do_tty_hangup(&tty->hangup_work); |
1437 | } | 1438 | } |
1438 | EXPORT_SYMBOL(tty_vhangup); | 1439 | EXPORT_SYMBOL(tty_vhangup); |
1439 | 1440 | ||
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3304 | * Nasty bug: do_SAK is being called in interrupt context. This can | 3305 | * Nasty bug: do_SAK is being called in interrupt context. This can |
3305 | * deadlock. We punt it up to process context. AKPM - 16Mar2001 | 3306 | * deadlock. We punt it up to process context. AKPM - 16Mar2001 |
3306 | */ | 3307 | */ |
3307 | static void __do_SAK(void *arg) | 3308 | static void __do_SAK(struct work_struct *work) |
3308 | { | 3309 | { |
3310 | struct tty_struct *tty = | ||
3311 | container_of(work, struct tty_struct, SAK_work); | ||
3309 | #ifdef TTY_SOFT_SAK | 3312 | #ifdef TTY_SOFT_SAK |
3310 | tty_hangup(tty); | 3313 | tty_hangup(tty); |
3311 | #else | 3314 | #else |
3312 | struct tty_struct *tty = arg; | ||
3313 | struct task_struct *g, *p; | 3315 | struct task_struct *g, *p; |
3314 | int session; | 3316 | int session; |
3315 | int i; | 3317 | int i; |
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty) | |||
3388 | { | 3390 | { |
3389 | if (!tty) | 3391 | if (!tty) |
3390 | return; | 3392 | return; |
3391 | PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); | 3393 | PREPARE_WORK(&tty->SAK_work, __do_SAK); |
3392 | schedule_work(&tty->SAK_work); | 3394 | schedule_work(&tty->SAK_work); |
3393 | } | 3395 | } |
3394 | 3396 | ||
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK); | |||
3396 | 3398 | ||
3397 | /** | 3399 | /** |
3398 | * flush_to_ldisc | 3400 | * flush_to_ldisc |
3399 | * @private_: tty structure passed from work queue. | 3401 | * @work: tty structure passed from work queue. |
3400 | * | 3402 | * |
3401 | * This routine is called out of the software interrupt to flush data | 3403 | * This routine is called out of the software interrupt to flush data |
3402 | * from the buffer chain to the line discipline. | 3404 | * from the buffer chain to the line discipline. |
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK); | |||
3406 | * receive_buf method is single threaded for each tty instance. | 3408 | * receive_buf method is single threaded for each tty instance. |
3407 | */ | 3409 | */ |
3408 | 3410 | ||
3409 | static void flush_to_ldisc(void *private_) | 3411 | static void flush_to_ldisc(struct work_struct *work) |
3410 | { | 3412 | { |
3411 | struct tty_struct *tty = (struct tty_struct *) private_; | 3413 | struct tty_struct *tty = |
3414 | container_of(work, struct tty_struct, buf.work.work); | ||
3412 | unsigned long flags; | 3415 | unsigned long flags; |
3413 | struct tty_ldisc *disc; | 3416 | struct tty_ldisc *disc; |
3414 | struct tty_buffer *tbuf, *head; | 3417 | struct tty_buffer *tbuf, *head; |
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty) | |||
3553 | spin_unlock_irqrestore(&tty->buf.lock, flags); | 3556 | spin_unlock_irqrestore(&tty->buf.lock, flags); |
3554 | 3557 | ||
3555 | if (tty->low_latency) | 3558 | if (tty->low_latency) |
3556 | flush_to_ldisc((void *) tty); | 3559 | flush_to_ldisc(&tty->buf.work.work); |
3557 | else | 3560 | else |
3558 | schedule_delayed_work(&tty->buf.work, 1); | 3561 | schedule_delayed_work(&tty->buf.work, 1); |
3559 | } | 3562 | } |
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty) | |||
3580 | tty->overrun_time = jiffies; | 3583 | tty->overrun_time = jiffies; |
3581 | tty->buf.head = tty->buf.tail = NULL; | 3584 | tty->buf.head = tty->buf.tail = NULL; |
3582 | tty_buffer_init(tty); | 3585 | tty_buffer_init(tty); |
3583 | INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); | 3586 | INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); |
3584 | init_MUTEX(&tty->buf.pty_sem); | 3587 | init_MUTEX(&tty->buf.pty_sem); |
3585 | mutex_init(&tty->termios_mutex); | 3588 | mutex_init(&tty->termios_mutex); |
3586 | init_waitqueue_head(&tty->write_wait); | 3589 | init_waitqueue_head(&tty->write_wait); |
3587 | init_waitqueue_head(&tty->read_wait); | 3590 | init_waitqueue_head(&tty->read_wait); |
3588 | INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); | 3591 | INIT_WORK(&tty->hangup_work, do_tty_hangup); |
3589 | mutex_init(&tty->atomic_read_lock); | 3592 | mutex_init(&tty->atomic_read_lock); |
3590 | mutex_init(&tty->atomic_write_lock); | 3593 | mutex_init(&tty->atomic_write_lock); |
3591 | spin_lock_init(&tty->read_lock); | 3594 | spin_lock_init(&tty->read_lock); |
3592 | INIT_LIST_HEAD(&tty->tty_files); | 3595 | INIT_LIST_HEAD(&tty->tty_files); |
3593 | INIT_WORK(&tty->SAK_work, NULL, NULL); | 3596 | INIT_WORK(&tty->SAK_work, NULL); |
3594 | } | 3597 | } |
3595 | 3598 | ||
3596 | /* | 3599 | /* |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 87587b4385a..75ff0286e1a 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty); | |||
155 | static void set_vesa_blanking(char __user *p); | 155 | static void set_vesa_blanking(char __user *p); |
156 | static void set_cursor(struct vc_data *vc); | 156 | static void set_cursor(struct vc_data *vc); |
157 | static void hide_cursor(struct vc_data *vc); | 157 | static void hide_cursor(struct vc_data *vc); |
158 | static void console_callback(void *ignored); | 158 | static void console_callback(struct work_struct *ignored); |
159 | static void blank_screen_t(unsigned long dummy); | 159 | static void blank_screen_t(unsigned long dummy); |
160 | static void set_palette(struct vc_data *vc); | 160 | static void set_palette(struct vc_data *vc); |
161 | 161 | ||
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */ | |||
174 | static int blankinterval = 10*60*HZ; | 174 | static int blankinterval = 10*60*HZ; |
175 | static int vesa_off_interval; | 175 | static int vesa_off_interval; |
176 | 176 | ||
177 | static DECLARE_WORK(console_work, console_callback, NULL); | 177 | static DECLARE_WORK(console_work, console_callback); |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * fg_console is the current virtual console, | 180 | * fg_console is the current virtual console, |
@@ -2154,7 +2154,7 @@ out: | |||
2154 | * with other console code and prevention of re-entrancy is | 2154 | * with other console code and prevention of re-entrancy is |
2155 | * ensured with console_sem. | 2155 | * ensured with console_sem. |
2156 | */ | 2156 | */ |
2157 | static void console_callback(void *ignored) | 2157 | static void console_callback(struct work_struct *ignored) |
2158 | { | 2158 | { |
2159 | acquire_console_sem(); | 2159 | acquire_console_sem(); |
2160 | 2160 | ||
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 05f8ce2cfb4..b418b16e910 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,9 +31,11 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | void cn_queue_wrapper(void *data) | 34 | void cn_queue_wrapper(struct work_struct *work) |
35 | { | 35 | { |
36 | struct cn_callback_data *d = data; | 36 | struct cn_callback_entry *cbq = |
37 | container_of(work, struct cn_callback_entry, work.work); | ||
38 | struct cn_callback_data *d = &cbq->data; | ||
37 | 39 | ||
38 | d->callback(d->callback_priv); | 40 | d->callback(d->callback_priv); |
39 | 41 | ||
@@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc | |||
57 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); | 59 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
58 | cbq->data.callback = callback; | 60 | cbq->data.callback = callback; |
59 | 61 | ||
60 | INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); | 62 | INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper); |
61 | return cbq; | 63 | return cbq; |
62 | } | 64 | } |
63 | 65 | ||
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index b49bacfd8de..5e7cd45d10e 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v | |||
135 | spin_lock_bh(&dev->cbdev->queue_lock); | 135 | spin_lock_bh(&dev->cbdev->queue_lock); |
136 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { | 136 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { |
137 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { | 137 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { |
138 | if (likely(!test_bit(0, &__cbq->work.pending) && | 138 | if (likely(!test_bit(WORK_STRUCT_PENDING, |
139 | &__cbq->work.work.management) && | ||
139 | __cbq->data.ddata == NULL)) { | 140 | __cbq->data.ddata == NULL)) { |
140 | __cbq->data.callback_priv = msg; | 141 | __cbq->data.callback_priv = msg; |
141 | 142 | ||
142 | __cbq->data.ddata = data; | 143 | __cbq->data.ddata = data; |
143 | __cbq->data.destruct_data = destruct_data; | 144 | __cbq->data.destruct_data = destruct_data; |
144 | 145 | ||
145 | if (queue_work(dev->cbdev->cn_queue, | 146 | if (queue_delayed_work( |
146 | &__cbq->work)) | 147 | dev->cbdev->cn_queue, |
148 | &__cbq->work, 0)) | ||
147 | err = 0; | 149 | err = 0; |
148 | } else { | 150 | } else { |
149 | struct work_struct *w; | ||
150 | struct cn_callback_data *d; | 151 | struct cn_callback_data *d; |
151 | 152 | ||
152 | w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); | 153 | __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC); |
153 | if (w) { | 154 | if (__cbq) { |
154 | d = (struct cn_callback_data *)(w+1); | 155 | d = &__cbq->data; |
155 | |||
156 | d->callback_priv = msg; | 156 | d->callback_priv = msg; |
157 | d->callback = __cbq->data.callback; | 157 | d->callback = __cbq->data.callback; |
158 | d->ddata = data; | 158 | d->ddata = data; |
159 | d->destruct_data = destruct_data; | 159 | d->destruct_data = destruct_data; |
160 | d->free = w; | 160 | d->free = __cbq; |
161 | 161 | ||
162 | INIT_LIST_HEAD(&w->entry); | 162 | INIT_DELAYED_WORK(&__cbq->work, |
163 | w->pending = 0; | 163 | &cn_queue_wrapper); |
164 | w->func = &cn_queue_wrapper; | ||
165 | w->data = d; | ||
166 | init_timer(&w->timer); | ||
167 | 164 | ||
168 | if (queue_work(dev->cbdev->cn_queue, w)) | 165 | if (queue_delayed_work( |
166 | dev->cbdev->cn_queue, | ||
167 | &__cbq->work, 0)) | ||
169 | err = 0; | 168 | err = 0; |
170 | else { | 169 | else { |
171 | kfree(w); | 170 | kfree(__cbq); |
172 | err = -EINVAL; | 171 | err = -EINVAL; |
173 | } | 172 | } |
174 | } else | 173 | } else |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index dd0c2623e27..7a7c6e6dfe4 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
42 | 42 | ||
43 | /* internal prototypes */ | 43 | /* internal prototypes */ |
44 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 44 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
45 | static void handle_update(void *data); | 45 | static void handle_update(struct work_struct *work); |
46 | 46 | ||
47 | /** | 47 | /** |
48 | * Two notifier lists: the "policy" list is involved in the | 48 | * Two notifier lists: the "policy" list is involved in the |
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
665 | mutex_init(&policy->lock); | 665 | mutex_init(&policy->lock); |
666 | mutex_lock(&policy->lock); | 666 | mutex_lock(&policy->lock); |
667 | init_completion(&policy->kobj_unregister); | 667 | init_completion(&policy->kobj_unregister); |
668 | INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); | 668 | INIT_WORK(&policy->update, handle_update); |
669 | 669 | ||
670 | /* call driver. From then on the cpufreq must be able | 670 | /* call driver. From then on the cpufreq must be able |
671 | * to accept all calls to ->verify and ->setpolicy for this CPU | 671 | * to accept all calls to ->verify and ->setpolicy for this CPU |
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
895 | } | 895 | } |
896 | 896 | ||
897 | 897 | ||
898 | static void handle_update(void *data) | 898 | static void handle_update(struct work_struct *work) |
899 | { | 899 | { |
900 | unsigned int cpu = (unsigned int)(long)data; | 900 | struct cpufreq_policy *policy = |
901 | container_of(work, struct cpufreq_policy, update); | ||
902 | unsigned int cpu = policy->cpu; | ||
901 | dprintk("handle_update for cpu %u called\n", cpu); | 903 | dprintk("handle_update for cpu %u called\n", cpu); |
902 | cpufreq_update_policy(cpu); | 904 | cpufreq_update_policy(cpu); |
903 | } | 905 | } |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c4c578defab..5ef5ede5b88 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate; | |||
59 | #define MAX_SAMPLING_DOWN_FACTOR (10) | 59 | #define MAX_SAMPLING_DOWN_FACTOR (10) |
60 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) | 60 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) |
61 | 61 | ||
62 | static void do_dbs_timer(void *data); | 62 | static void do_dbs_timer(struct work_struct *work); |
63 | 63 | ||
64 | struct cpu_dbs_info_s { | 64 | struct cpu_dbs_info_s { |
65 | struct cpufreq_policy *cur_policy; | 65 | struct cpufreq_policy *cur_policy; |
@@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
82 | * is recursive for the same process. -Venki | 82 | * is recursive for the same process. -Venki |
83 | */ | 83 | */ |
84 | static DEFINE_MUTEX (dbs_mutex); | 84 | static DEFINE_MUTEX (dbs_mutex); |
85 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | 85 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); |
86 | 86 | ||
87 | struct dbs_tuners { | 87 | struct dbs_tuners { |
88 | unsigned int sampling_rate; | 88 | unsigned int sampling_rate; |
@@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu) | |||
420 | } | 420 | } |
421 | } | 421 | } |
422 | 422 | ||
423 | static void do_dbs_timer(void *data) | 423 | static void do_dbs_timer(struct work_struct *work) |
424 | { | 424 | { |
425 | int i; | 425 | int i; |
426 | lock_cpu_hotplug(); | 426 | lock_cpu_hotplug(); |
@@ -435,7 +435,6 @@ static void do_dbs_timer(void *data) | |||
435 | 435 | ||
436 | static inline void dbs_timer_init(void) | 436 | static inline void dbs_timer_init(void) |
437 | { | 437 | { |
438 | INIT_WORK(&dbs_work, do_dbs_timer, NULL); | ||
439 | schedule_delayed_work(&dbs_work, | 438 | schedule_delayed_work(&dbs_work, |
440 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 439 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
441 | return; | 440 | return; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bf8aa45d4f0..e1cc5113c2a 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -47,13 +47,17 @@ static unsigned int def_sampling_rate; | |||
47 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) | 47 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) |
48 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) | 48 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) |
49 | 49 | ||
50 | static void do_dbs_timer(void *data); | 50 | static void do_dbs_timer(struct work_struct *work); |
51 | |||
52 | /* Sampling types */ | ||
53 | enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
51 | 54 | ||
52 | struct cpu_dbs_info_s { | 55 | struct cpu_dbs_info_s { |
53 | cputime64_t prev_cpu_idle; | 56 | cputime64_t prev_cpu_idle; |
54 | cputime64_t prev_cpu_wall; | 57 | cputime64_t prev_cpu_wall; |
55 | struct cpufreq_policy *cur_policy; | 58 | struct cpufreq_policy *cur_policy; |
56 | struct work_struct work; | 59 | struct delayed_work work; |
60 | enum dbs_sample sample_type; | ||
57 | unsigned int enable; | 61 | unsigned int enable; |
58 | struct cpufreq_frequency_table *freq_table; | 62 | struct cpufreq_frequency_table *freq_table; |
59 | unsigned int freq_lo; | 63 | unsigned int freq_lo; |
@@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
407 | } | 411 | } |
408 | } | 412 | } |
409 | 413 | ||
410 | /* Sampling types */ | 414 | static void do_dbs_timer(struct work_struct *work) |
411 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
412 | |||
413 | static void do_dbs_timer(void *data) | ||
414 | { | 415 | { |
415 | unsigned int cpu = smp_processor_id(); | 416 | unsigned int cpu = smp_processor_id(); |
416 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 417 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); |
418 | enum dbs_sample sample_type = dbs_info->sample_type; | ||
417 | /* We want all CPUs to do sampling nearly on same jiffy */ | 419 | /* We want all CPUs to do sampling nearly on same jiffy */ |
418 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 420 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
421 | |||
422 | /* Permit rescheduling of this work item */ | ||
423 | work_release(work); | ||
424 | |||
419 | delay -= jiffies % delay; | 425 | delay -= jiffies % delay; |
420 | 426 | ||
421 | if (!dbs_info->enable) | 427 | if (!dbs_info->enable) |
422 | return; | 428 | return; |
423 | /* Common NORMAL_SAMPLE setup */ | 429 | /* Common NORMAL_SAMPLE setup */ |
424 | INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); | 430 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
425 | if (!dbs_tuners_ins.powersave_bias || | 431 | if (!dbs_tuners_ins.powersave_bias || |
426 | (unsigned long) data == DBS_NORMAL_SAMPLE) { | 432 | sample_type == DBS_NORMAL_SAMPLE) { |
427 | lock_cpu_hotplug(); | 433 | lock_cpu_hotplug(); |
428 | dbs_check_cpu(dbs_info); | 434 | dbs_check_cpu(dbs_info); |
429 | unlock_cpu_hotplug(); | 435 | unlock_cpu_hotplug(); |
430 | if (dbs_info->freq_lo) { | 436 | if (dbs_info->freq_lo) { |
431 | /* Setup timer for SUB_SAMPLE */ | 437 | /* Setup timer for SUB_SAMPLE */ |
432 | INIT_WORK(&dbs_info->work, do_dbs_timer, | 438 | dbs_info->sample_type = DBS_SUB_SAMPLE; |
433 | (void *)DBS_SUB_SAMPLE); | ||
434 | delay = dbs_info->freq_hi_jiffies; | 439 | delay = dbs_info->freq_hi_jiffies; |
435 | } | 440 | } |
436 | } else { | 441 | } else { |
@@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu) | |||
449 | delay -= jiffies % delay; | 454 | delay -= jiffies % delay; |
450 | 455 | ||
451 | ondemand_powersave_bias_init(); | 456 | ondemand_powersave_bias_init(); |
452 | INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); | 457 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); |
458 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | ||
453 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 459 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
454 | } | 460 | } |
455 | 461 | ||
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c index 4630f1969a0..15edf40828b 100644 --- a/drivers/i2c/chips/ds1374.c +++ b/drivers/i2c/chips/ds1374.c | |||
@@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void) | |||
140 | return t1; | 140 | return t1; |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ds1374_set_work(void *arg) | 143 | static ulong new_time; |
144 | |||
145 | static void ds1374_set_work(struct work_struct *work) | ||
144 | { | 146 | { |
145 | ulong t1, t2; | 147 | ulong t1, t2; |
146 | int limit = 10; /* arbitrary retry limit */ | 148 | int limit = 10; /* arbitrary retry limit */ |
147 | 149 | ||
148 | t1 = *(ulong *) arg; | 150 | t1 = new_time; |
149 | 151 | ||
150 | mutex_lock(&ds1374_mutex); | 152 | mutex_lock(&ds1374_mutex); |
151 | 153 | ||
@@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg) | |||
167 | "can't confirm time set from rtc chip\n"); | 169 | "can't confirm time set from rtc chip\n"); |
168 | } | 170 | } |
169 | 171 | ||
170 | static ulong new_time; | ||
171 | |||
172 | static struct workqueue_struct *ds1374_workqueue; | 172 | static struct workqueue_struct *ds1374_workqueue; |
173 | 173 | ||
174 | static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); | 174 | static DECLARE_WORK(ds1374_work, ds1374_set_work); |
175 | 175 | ||
176 | int ds1374_set_rtc_time(ulong nowtime) | 176 | int ds1374_set_rtc_time(ulong nowtime) |
177 | { | 177 | { |
@@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime) | |||
180 | if (in_interrupt()) | 180 | if (in_interrupt()) |
181 | queue_work(ds1374_workqueue, &ds1374_work); | 181 | queue_work(ds1374_workqueue, &ds1374_work); |
182 | else | 182 | else |
183 | ds1374_set_work(&new_time); | 183 | ds1374_set_work(NULL); |
184 | 184 | ||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index d90a3a1898c..8f4378a1631 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c | |||
@@ -31,9 +31,10 @@ | |||
31 | #include "config_roms.h" | 31 | #include "config_roms.h" |
32 | 32 | ||
33 | 33 | ||
34 | static void delayed_reset_bus(void * __reset_info) | 34 | static void delayed_reset_bus(struct work_struct *work) |
35 | { | 35 | { |
36 | struct hpsb_host *host = (struct hpsb_host*)__reset_info; | 36 | struct hpsb_host *host = |
37 | container_of(work, struct hpsb_host, delayed_reset.work); | ||
37 | int generation = host->csr.generation + 1; | 38 | int generation = host->csr.generation + 1; |
38 | 39 | ||
39 | /* The generation field rolls over to 2 rather than 0 per IEEE | 40 | /* The generation field rolls over to 2 rather than 0 per IEEE |
@@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | |||
145 | 146 | ||
146 | atomic_set(&h->generation, 0); | 147 | atomic_set(&h->generation, 0); |
147 | 148 | ||
148 | INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); | 149 | INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); |
149 | 150 | ||
150 | init_timer(&h->timeout); | 151 | init_timer(&h->timeout); |
151 | h->timeout.data = (unsigned long) h; | 152 | h->timeout.data = (unsigned long) h; |
@@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host) | |||
234 | * Config ROM in the near future. */ | 235 | * Config ROM in the near future. */ |
235 | reset_delay = HZ; | 236 | reset_delay = HZ; |
236 | 237 | ||
237 | PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); | 238 | PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); |
238 | schedule_delayed_work(&host->delayed_reset, reset_delay); | 239 | schedule_delayed_work(&host->delayed_reset, reset_delay); |
239 | 240 | ||
240 | return 0; | 241 | return 0; |
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index bc6dbfadb89..d553e38c954 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h | |||
@@ -62,7 +62,7 @@ struct hpsb_host { | |||
62 | struct class_device class_dev; | 62 | struct class_device class_dev; |
63 | 63 | ||
64 | int update_config_rom; | 64 | int update_config_rom; |
65 | struct work_struct delayed_reset; | 65 | struct delayed_work delayed_reset; |
66 | unsigned int config_roms; | 66 | unsigned int config_roms; |
67 | 67 | ||
68 | struct list_head addr_space; | 68 | struct list_head addr_space; |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 6986ac18828..cd156d4e779 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, | |||
493 | scsi_unblock_requests(scsi_id->scsi_host); | 493 | scsi_unblock_requests(scsi_id->scsi_host); |
494 | } | 494 | } |
495 | 495 | ||
496 | static void sbp2util_write_orb_pointer(void *p) | 496 | static void sbp2util_write_orb_pointer(struct work_struct *work) |
497 | { | 497 | { |
498 | struct scsi_id_instance_data *scsi_id = | ||
499 | container_of(work, struct scsi_id_instance_data, | ||
500 | protocol_work.work); | ||
498 | quadlet_t data[2]; | 501 | quadlet_t data[2]; |
499 | 502 | ||
500 | data[0] = ORB_SET_NODE_ID( | 503 | data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id); |
501 | ((struct scsi_id_instance_data *)p)->hi->host->node_id); | 504 | data[1] = scsi_id->last_orb_dma; |
502 | data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma; | ||
503 | sbp2util_cpu_to_be32_buffer(data, 8); | 505 | sbp2util_cpu_to_be32_buffer(data, 8); |
504 | sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); | 506 | sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8); |
505 | } | 507 | } |
506 | 508 | ||
507 | static void sbp2util_write_doorbell(void *p) | 509 | static void sbp2util_write_doorbell(struct work_struct *work) |
508 | { | 510 | { |
509 | sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); | 511 | struct scsi_id_instance_data *scsi_id = |
512 | container_of(work, struct scsi_id_instance_data, | ||
513 | protocol_work.work); | ||
514 | sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4); | ||
510 | } | 515 | } |
511 | 516 | ||
512 | /* | 517 | /* |
@@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud | |||
843 | INIT_LIST_HEAD(&scsi_id->scsi_list); | 848 | INIT_LIST_HEAD(&scsi_id->scsi_list); |
844 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); | 849 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); |
845 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); | 850 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); |
846 | INIT_WORK(&scsi_id->protocol_work, NULL, NULL); | 851 | INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL); |
847 | 852 | ||
848 | ud->device.driver_data = scsi_id; | 853 | ud->device.driver_data = scsi_id; |
849 | 854 | ||
@@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, | |||
2047 | * We do not accept new commands until the job is over. | 2052 | * We do not accept new commands until the job is over. |
2048 | */ | 2053 | */ |
2049 | scsi_block_requests(scsi_id->scsi_host); | 2054 | scsi_block_requests(scsi_id->scsi_host); |
2050 | PREPARE_WORK(&scsi_id->protocol_work, | 2055 | PREPARE_DELAYED_WORK(&scsi_id->protocol_work, |
2051 | last_orb ? sbp2util_write_doorbell: | 2056 | last_orb ? sbp2util_write_doorbell: |
2052 | sbp2util_write_orb_pointer, | 2057 | sbp2util_write_orb_pointer); |
2053 | scsi_id); | 2058 | schedule_delayed_work(&scsi_id->protocol_work, 0); |
2054 | schedule_work(&scsi_id->protocol_work); | ||
2055 | } | 2059 | } |
2056 | } | 2060 | } |
2057 | 2061 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index abbe48e646c..1b16d6b9cf1 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -348,7 +348,7 @@ struct scsi_id_instance_data { | |||
348 | unsigned workarounds; | 348 | unsigned workarounds; |
349 | 349 | ||
350 | atomic_t state; | 350 | atomic_t state; |
351 | struct work_struct protocol_work; | 351 | struct delayed_work protocol_work; |
352 | }; | 352 | }; |
353 | 353 | ||
354 | /* For use in scsi_id_instance_data.state */ | 354 | /* For use in scsi_id_instance_data.state */ |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 7767a11b689..af939796750 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -55,11 +55,11 @@ struct addr_req { | |||
55 | int status; | 55 | int status; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static void process_req(void *data); | 58 | static void process_req(struct work_struct *work); |
59 | 59 | ||
60 | static DEFINE_MUTEX(lock); | 60 | static DEFINE_MUTEX(lock); |
61 | static LIST_HEAD(req_list); | 61 | static LIST_HEAD(req_list); |
62 | static DECLARE_WORK(work, process_req, NULL); | 62 | static DECLARE_DELAYED_WORK(work, process_req); |
63 | static struct workqueue_struct *addr_wq; | 63 | static struct workqueue_struct *addr_wq; |
64 | 64 | ||
65 | void rdma_addr_register_client(struct rdma_addr_client *client) | 65 | void rdma_addr_register_client(struct rdma_addr_client *client) |
@@ -215,7 +215,7 @@ out: | |||
215 | return ret; | 215 | return ret; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void process_req(void *data) | 218 | static void process_req(struct work_struct *work) |
219 | { | 219 | { |
220 | struct addr_req *req, *temp_req; | 220 | struct addr_req *req, *temp_req; |
221 | struct sockaddr_in *src_in, *dst_in; | 221 | struct sockaddr_in *src_in, *dst_in; |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 20e9f64e67a..98272fbbfb3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -285,9 +285,10 @@ err: | |||
285 | kfree(tprops); | 285 | kfree(tprops); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void ib_cache_task(void *work_ptr) | 288 | static void ib_cache_task(struct work_struct *_work) |
289 | { | 289 | { |
290 | struct ib_update_work *work = work_ptr; | 290 | struct ib_update_work *work = |
291 | container_of(_work, struct ib_update_work, work); | ||
291 | 292 | ||
292 | ib_cache_update(work->device, work->port_num); | 293 | ib_cache_update(work->device, work->port_num); |
293 | kfree(work); | 294 | kfree(work); |
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
306 | event->event == IB_EVENT_CLIENT_REREGISTER) { | 307 | event->event == IB_EVENT_CLIENT_REREGISTER) { |
307 | work = kmalloc(sizeof *work, GFP_ATOMIC); | 308 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
308 | if (work) { | 309 | if (work) { |
309 | INIT_WORK(&work->work, ib_cache_task, work); | 310 | INIT_WORK(&work->work, ib_cache_task); |
310 | work->device = event->device; | 311 | work->device = event->device; |
311 | work->port_num = event->element.port_num; | 312 | work->port_num = event->element.port_num; |
312 | schedule_work(&work->work); | 313 | schedule_work(&work->work); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index e5dc4530808..79c937bf696 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -101,7 +101,7 @@ struct cm_av { | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct cm_work { | 103 | struct cm_work { |
104 | struct work_struct work; | 104 | struct delayed_work work; |
105 | struct list_head list; | 105 | struct list_head list; |
106 | struct cm_port *port; | 106 | struct cm_port *port; |
107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ | 107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ |
@@ -161,7 +161,7 @@ struct cm_id_private { | |||
161 | atomic_t work_count; | 161 | atomic_t work_count; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void cm_work_handler(void *data); | 164 | static void cm_work_handler(struct work_struct *work); |
165 | 165 | ||
166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
167 | { | 167 | { |
@@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) | |||
668 | return ERR_PTR(-ENOMEM); | 668 | return ERR_PTR(-ENOMEM); |
669 | 669 | ||
670 | timewait_info->work.local_id = local_id; | 670 | timewait_info->work.local_id = local_id; |
671 | INIT_WORK(&timewait_info->work.work, cm_work_handler, | 671 | INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); |
672 | &timewait_info->work); | ||
673 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; | 672 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; |
674 | return timewait_info; | 673 | return timewait_info; |
675 | } | 674 | } |
@@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, | |||
2995 | } | 2994 | } |
2996 | } | 2995 | } |
2997 | 2996 | ||
2998 | static void cm_work_handler(void *data) | 2997 | static void cm_work_handler(struct work_struct *_work) |
2999 | { | 2998 | { |
3000 | struct cm_work *work = data; | 2999 | struct cm_work *work = container_of(_work, struct cm_work, work.work); |
3001 | int ret; | 3000 | int ret; |
3002 | 3001 | ||
3003 | switch (work->cm_event.event) { | 3002 | switch (work->cm_event.event) { |
@@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
3087 | * we need to find the cm_id once we're in the context of the | 3086 | * we need to find the cm_id once we're in the context of the |
3088 | * worker thread, rather than holding a reference on it. | 3087 | * worker thread, rather than holding a reference on it. |
3089 | */ | 3088 | */ |
3090 | INIT_WORK(&work->work, cm_work_handler, work); | 3089 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3091 | work->local_id = cm_id->local_id; | 3090 | work->local_id = cm_id->local_id; |
3092 | work->remote_id = cm_id->remote_id; | 3091 | work->remote_id = cm_id->remote_id; |
3093 | work->mad_recv_wc = NULL; | 3092 | work->mad_recv_wc = NULL; |
3094 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3093 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
3095 | queue_work(cm.wq, &work->work); | 3094 | queue_delayed_work(cm.wq, &work->work, 0); |
3096 | out: | 3095 | out: |
3097 | return ret; | 3096 | return ret; |
3098 | } | 3097 | } |
@@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, | |||
3191 | return; | 3190 | return; |
3192 | } | 3191 | } |
3193 | 3192 | ||
3194 | INIT_WORK(&work->work, cm_work_handler, work); | 3193 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3195 | work->cm_event.event = event; | 3194 | work->cm_event.event = event; |
3196 | work->mad_recv_wc = mad_recv_wc; | 3195 | work->mad_recv_wc = mad_recv_wc; |
3197 | work->port = (struct cm_port *)mad_agent->context; | 3196 | work->port = (struct cm_port *)mad_agent->context; |
3198 | queue_work(cm.wq, &work->work); | 3197 | queue_delayed_work(cm.wq, &work->work, 0); |
3199 | } | 3198 | } |
3200 | 3199 | ||
3201 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | 3200 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cf48f269743..985a6b564d8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | 1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | static void cma_work_handler(void *data) | 1343 | static void cma_work_handler(struct work_struct *_work) |
1344 | { | 1344 | { |
1345 | struct cma_work *work = data; | 1345 | struct cma_work *work = container_of(_work, struct cma_work, work); |
1346 | struct rdma_id_private *id_priv = work->id; | 1346 | struct rdma_id_private *id_priv = work->id; |
1347 | int destroy = 0; | 1347 | int destroy = 0; |
1348 | 1348 | ||
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1373 | return -ENOMEM; | 1373 | return -ENOMEM; |
1374 | 1374 | ||
1375 | work->id = id_priv; | 1375 | work->id = id_priv; |
1376 | INIT_WORK(&work->work, cma_work_handler, work); | 1376 | INIT_WORK(&work->work, cma_work_handler); |
1377 | work->old_state = CMA_ROUTE_QUERY; | 1377 | work->old_state = CMA_ROUTE_QUERY; |
1378 | work->new_state = CMA_ROUTE_RESOLVED; | 1378 | work->new_state = CMA_ROUTE_RESOLVED; |
1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1430 | return -ENOMEM; | 1430 | return -ENOMEM; |
1431 | 1431 | ||
1432 | work->id = id_priv; | 1432 | work->id = id_priv; |
1433 | INIT_WORK(&work->work, cma_work_handler, work); | 1433 | INIT_WORK(&work->work, cma_work_handler); |
1434 | work->old_state = CMA_ROUTE_QUERY; | 1434 | work->old_state = CMA_ROUTE_QUERY; |
1435 | work->new_state = CMA_ROUTE_RESOLVED; | 1435 | work->new_state = CMA_ROUTE_RESOLVED; |
1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1583 | } | 1583 | } |
1584 | 1584 | ||
1585 | work->id = id_priv; | 1585 | work->id = id_priv; |
1586 | INIT_WORK(&work->work, cma_work_handler, work); | 1586 | INIT_WORK(&work->work, cma_work_handler); |
1587 | work->old_state = CMA_ADDR_QUERY; | 1587 | work->old_state = CMA_ADDR_QUERY; |
1588 | work->new_state = CMA_ADDR_RESOLVED; | 1588 | work->new_state = CMA_ADDR_RESOLVED; |
1589 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1589 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index cf797d7aea0..1039ad57d53 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv, | |||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | 828 | * thread asleep on the destroy_comp list vs. an object destroyed |
829 | * here synchronously when the last reference is removed. | 829 | * here synchronously when the last reference is removed. |
830 | */ | 830 | */ |
831 | static void cm_work_handler(void *arg) | 831 | static void cm_work_handler(struct work_struct *_work) |
832 | { | 832 | { |
833 | struct iwcm_work *work = arg; | 833 | struct iwcm_work *work = container_of(_work, struct iwcm_work, work); |
834 | struct iw_cm_event levent; | 834 | struct iw_cm_event levent; |
835 | struct iwcm_id_private *cm_id_priv = work->cm_id; | 835 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
836 | unsigned long flags; | 836 | unsigned long flags; |
@@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, | |||
900 | goto out; | 900 | goto out; |
901 | } | 901 | } |
902 | 902 | ||
903 | INIT_WORK(&work->work, cm_work_handler, work); | 903 | INIT_WORK(&work->work, cm_work_handler); |
904 | work->cm_id = cm_id_priv; | 904 | work->cm_id = cm_id_priv; |
905 | work->event = *iw_event; | 905 | work->event = *iw_event; |
906 | 906 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 3f9c16232c4..15f38d94b3a 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent( | |||
65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
66 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
68 | static void timeout_sends(void *data); | 68 | static void timeout_sends(struct work_struct *work); |
69 | static void local_completions(void *data); | 69 | static void local_completions(struct work_struct *work); |
70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
71 | struct ib_mad_agent_private *agent_priv, | 71 | struct ib_mad_agent_private *agent_priv, |
72 | u8 mgmt_class); | 72 | u8 mgmt_class); |
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | 357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | 358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); |
359 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 359 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
361 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 361 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
362 | mad_agent_priv); | ||
363 | atomic_set(&mad_agent_priv->refcount, 1); | 362 | atomic_set(&mad_agent_priv->refcount, 1); |
364 | init_completion(&mad_agent_priv->comp); | 363 | init_completion(&mad_agent_priv->comp); |
365 | 364 | ||
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
2198 | /* | 2197 | /* |
2199 | * IB MAD completion callback | 2198 | * IB MAD completion callback |
2200 | */ | 2199 | */ |
2201 | static void ib_mad_completion_handler(void *data) | 2200 | static void ib_mad_completion_handler(struct work_struct *work) |
2202 | { | 2201 | { |
2203 | struct ib_mad_port_private *port_priv; | 2202 | struct ib_mad_port_private *port_priv; |
2204 | struct ib_wc wc; | 2203 | struct ib_wc wc; |
2205 | 2204 | ||
2206 | port_priv = (struct ib_mad_port_private *)data; | 2205 | port_priv = container_of(work, struct ib_mad_port_private, work); |
2207 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2206 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2208 | 2207 | ||
2209 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | 2208 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { |
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, | |||
2324 | } | 2323 | } |
2325 | EXPORT_SYMBOL(ib_cancel_mad); | 2324 | EXPORT_SYMBOL(ib_cancel_mad); |
2326 | 2325 | ||
2327 | static void local_completions(void *data) | 2326 | static void local_completions(struct work_struct *work) |
2328 | { | 2327 | { |
2329 | struct ib_mad_agent_private *mad_agent_priv; | 2328 | struct ib_mad_agent_private *mad_agent_priv; |
2330 | struct ib_mad_local_private *local; | 2329 | struct ib_mad_local_private *local; |
@@ -2334,7 +2333,8 @@ static void local_completions(void *data) | |||
2334 | struct ib_wc wc; | 2333 | struct ib_wc wc; |
2335 | struct ib_mad_send_wc mad_send_wc; | 2334 | struct ib_mad_send_wc mad_send_wc; |
2336 | 2335 | ||
2337 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2336 | mad_agent_priv = |
2337 | container_of(work, struct ib_mad_agent_private, local_work); | ||
2338 | 2338 | ||
2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2340 | while (!list_empty(&mad_agent_priv->local_list)) { | 2340 | while (!list_empty(&mad_agent_priv->local_list)) { |
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2434 | return ret; | 2434 | return ret; |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | static void timeout_sends(void *data) | 2437 | static void timeout_sends(struct work_struct *work) |
2438 | { | 2438 | { |
2439 | struct ib_mad_agent_private *mad_agent_priv; | 2439 | struct ib_mad_agent_private *mad_agent_priv; |
2440 | struct ib_mad_send_wr_private *mad_send_wr; | 2440 | struct ib_mad_send_wr_private *mad_send_wr; |
2441 | struct ib_mad_send_wc mad_send_wc; | 2441 | struct ib_mad_send_wc mad_send_wc; |
2442 | unsigned long flags, delay; | 2442 | unsigned long flags, delay; |
2443 | 2443 | ||
2444 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2444 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2445 | timed_work.work); | ||
2445 | mad_send_wc.vendor_err = 0; | 2446 | mad_send_wc.vendor_err = 0; |
2446 | 2447 | ||
2447 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2448 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2799 | ret = -ENOMEM; | 2800 | ret = -ENOMEM; |
2800 | goto error8; | 2801 | goto error8; |
2801 | } | 2802 | } |
2802 | INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); | 2803 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
2803 | 2804 | ||
2804 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 2805 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2805 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | 2806 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d06b59083f6..d5548e73e06 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -102,7 +102,7 @@ struct ib_mad_agent_private { | |||
102 | struct list_head send_list; | 102 | struct list_head send_list; |
103 | struct list_head wait_list; | 103 | struct list_head wait_list; |
104 | struct list_head done_list; | 104 | struct list_head done_list; |
105 | struct work_struct timed_work; | 105 | struct delayed_work timed_work; |
106 | unsigned long timeout; | 106 | unsigned long timeout; |
107 | struct list_head local_list; | 107 | struct list_head local_list; |
108 | struct work_struct local_work; | 108 | struct work_struct local_work; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 1ef79d015a1..3663fd7022b 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -45,8 +45,8 @@ enum rmpp_state { | |||
45 | struct mad_rmpp_recv { | 45 | struct mad_rmpp_recv { |
46 | struct ib_mad_agent_private *agent; | 46 | struct ib_mad_agent_private *agent; |
47 | struct list_head list; | 47 | struct list_head list; |
48 | struct work_struct timeout_work; | 48 | struct delayed_work timeout_work; |
49 | struct work_struct cleanup_work; | 49 | struct delayed_work cleanup_work; |
50 | struct completion comp; | 50 | struct completion comp; |
51 | enum rmpp_state state; | 51 | enum rmpp_state state; |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | static void recv_timeout_handler(void *data) | 236 | static void recv_timeout_handler(struct work_struct *work) |
237 | { | 237 | { |
238 | struct mad_rmpp_recv *rmpp_recv = data; | 238 | struct mad_rmpp_recv *rmpp_recv = |
239 | container_of(work, struct mad_rmpp_recv, timeout_work.work); | ||
239 | struct ib_mad_recv_wc *rmpp_wc; | 240 | struct ib_mad_recv_wc *rmpp_wc; |
240 | unsigned long flags; | 241 | unsigned long flags; |
241 | 242 | ||
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data) | |||
254 | ib_free_recv_mad(rmpp_wc); | 255 | ib_free_recv_mad(rmpp_wc); |
255 | } | 256 | } |
256 | 257 | ||
257 | static void recv_cleanup_handler(void *data) | 258 | static void recv_cleanup_handler(struct work_struct *work) |
258 | { | 259 | { |
259 | struct mad_rmpp_recv *rmpp_recv = data; | 260 | struct mad_rmpp_recv *rmpp_recv = |
261 | container_of(work, struct mad_rmpp_recv, cleanup_work.work); | ||
260 | unsigned long flags; | 262 | unsigned long flags; |
261 | 263 | ||
262 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); | 264 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); |
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
285 | 287 | ||
286 | rmpp_recv->agent = agent; | 288 | rmpp_recv->agent = agent; |
287 | init_completion(&rmpp_recv->comp); | 289 | init_completion(&rmpp_recv->comp); |
288 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 290 | INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); |
289 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 291 | INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); |
290 | spin_lock_init(&rmpp_recv->lock); | 292 | spin_lock_init(&rmpp_recv->lock); |
291 | rmpp_recv->state = RMPP_STATE_ACTIVE; | 293 | rmpp_recv->state = RMPP_STATE_ACTIVE; |
292 | atomic_set(&rmpp_recv->refcount, 1); | 294 | atomic_set(&rmpp_recv->refcount, 1); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1706d3c7e95..e45afba7534 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref) | |||
360 | kfree(sm_ah); | 360 | kfree(sm_ah); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void update_sm_ah(void *port_ptr) | 363 | static void update_sm_ah(struct work_struct *work) |
364 | { | 364 | { |
365 | struct ib_sa_port *port = port_ptr; | 365 | struct ib_sa_port *port = |
366 | container_of(work, struct ib_sa_port, update_task); | ||
366 | struct ib_sa_sm_ah *new_ah, *old_ah; | 367 | struct ib_sa_sm_ah *new_ah, *old_ah; |
367 | struct ib_port_attr port_attr; | 368 | struct ib_port_attr port_attr; |
368 | struct ib_ah_attr ah_attr; | 369 | struct ib_ah_attr ah_attr; |
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
992 | if (IS_ERR(sa_dev->port[i].agent)) | 993 | if (IS_ERR(sa_dev->port[i].agent)) |
993 | goto err; | 994 | goto err; |
994 | 995 | ||
995 | INIT_WORK(&sa_dev->port[i].update_task, | 996 | INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); |
996 | update_sm_ah, &sa_dev->port[i]); | ||
997 | } | 997 | } |
998 | 998 | ||
999 | ib_set_client_data(device, &sa_client, sa_dev); | 999 | ib_set_client_data(device, &sa_client, sa_dev); |
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1010 | goto err; | 1010 | goto err; |
1011 | 1011 | ||
1012 | for (i = 0; i <= e - s; ++i) | 1012 | for (i = 0; i <= e - s; ++i) |
1013 | update_sm_ah(&sa_dev->port[i]); | 1013 | update_sm_ah(&sa_dev->port[i].update_task); |
1014 | 1014 | ||
1015 | return; | 1015 | return; |
1016 | 1016 | ||
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index efe147dbeb4..db12cc0841d 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) | |||
179 | up_write(¤t->mm->mmap_sem); | 179 | up_write(¤t->mm->mmap_sem); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void ib_umem_account(void *work_ptr) | 182 | static void ib_umem_account(struct work_struct *_work) |
183 | { | 183 | { |
184 | struct ib_umem_account_work *work = work_ptr; | 184 | struct ib_umem_account_work *work = |
185 | container_of(_work, struct ib_umem_account_work, work); | ||
185 | 186 | ||
186 | down_write(&work->mm->mmap_sem); | 187 | down_write(&work->mm->mmap_sem); |
187 | work->mm->locked_vm -= work->diff; | 188 | work->mm->locked_vm -= work->diff; |
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
216 | return; | 217 | return; |
217 | } | 218 | } |
218 | 219 | ||
219 | INIT_WORK(&work->work, ib_umem_account, work); | 220 | INIT_WORK(&work->work, ib_umem_account); |
220 | work->mm = mm; | 221 | work->mm = mm; |
221 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 222 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
222 | 223 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 413754b1d8a..8536aeb96af 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -214,9 +214,10 @@ struct ipath_user_pages_work { | |||
214 | unsigned long num_pages; | 214 | unsigned long num_pages; |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static void user_pages_account(void *ptr) | 217 | static void user_pages_account(struct work_struct *_work) |
218 | { | 218 | { |
219 | struct ipath_user_pages_work *work = ptr; | 219 | struct ipath_user_pages_work *work = |
220 | container_of(_work, struct ipath_user_pages_work, work); | ||
220 | 221 | ||
221 | down_write(&work->mm->mmap_sem); | 222 | down_write(&work->mm->mmap_sem); |
222 | work->mm->locked_vm -= work->num_pages; | 223 | work->mm->locked_vm -= work->num_pages; |
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) | |||
242 | 243 | ||
243 | goto bail; | 244 | goto bail; |
244 | 245 | ||
245 | INIT_WORK(&work->work, user_pages_account, work); | 246 | INIT_WORK(&work->work, user_pages_account); |
246 | work->mm = mm; | 247 | work->mm = mm; |
247 | work->num_pages = num_pages; | 248 | work->num_pages = num_pages; |
248 | 249 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index cd044ea2dfa..e948158a28d 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -57,7 +57,7 @@ static int catas_reset_disable; | |||
57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); | 57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); |
58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); | 58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); |
59 | 59 | ||
60 | static void catas_reset(void *work_ptr) | 60 | static void catas_reset(struct work_struct *work) |
61 | { | 61 | { |
62 | struct mthca_dev *dev, *tmpdev; | 62 | struct mthca_dev *dev, *tmpdev; |
63 | LIST_HEAD(tlist); | 63 | LIST_HEAD(tlist); |
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) | |||
203 | 203 | ||
204 | int __init mthca_catas_init(void) | 204 | int __init mthca_catas_init(void) |
205 | { | 205 | { |
206 | INIT_WORK(&catas_work, catas_reset, NULL); | 206 | INIT_WORK(&catas_work, catas_reset); |
207 | 207 | ||
208 | catas_wq = create_singlethread_workqueue("mthca_catas"); | 208 | catas_wq = create_singlethread_workqueue("mthca_catas"); |
209 | if (!catas_wq) | 209 | if (!catas_wq) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index f2b61851a49..99547996aba 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -136,11 +136,11 @@ struct ipoib_dev_priv { | |||
136 | struct list_head multicast_list; | 136 | struct list_head multicast_list; |
137 | struct rb_root multicast_tree; | 137 | struct rb_root multicast_tree; |
138 | 138 | ||
139 | struct work_struct pkey_task; | 139 | struct delayed_work pkey_task; |
140 | struct work_struct mcast_task; | 140 | struct delayed_work mcast_task; |
141 | struct work_struct flush_task; | 141 | struct work_struct flush_task; |
142 | struct work_struct restart_task; | 142 | struct work_struct restart_task; |
143 | struct work_struct ah_reap_task; | 143 | struct delayed_work ah_reap_task; |
144 | 144 | ||
145 | struct ib_device *ca; | 145 | struct ib_device *ca; |
146 | u8 port; | 146 | u8 port; |
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); | |||
254 | 254 | ||
255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, | 255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, |
256 | struct ipoib_ah *address, u32 qpn); | 256 | struct ipoib_ah *address, u32 qpn); |
257 | void ipoib_reap_ah(void *dev_ptr); | 257 | void ipoib_reap_ah(struct work_struct *work); |
258 | 258 | ||
259 | void ipoib_flush_paths(struct net_device *dev); | 259 | void ipoib_flush_paths(struct net_device *dev); |
260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | 260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); |
261 | 261 | ||
262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
263 | void ipoib_ib_dev_flush(void *dev); | 263 | void ipoib_ib_dev_flush(struct work_struct *work); |
264 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 264 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
265 | 265 | ||
266 | int ipoib_ib_dev_open(struct net_device *dev); | 266 | int ipoib_ib_dev_open(struct net_device *dev); |
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); | |||
271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
272 | void ipoib_dev_cleanup(struct net_device *dev); | 272 | void ipoib_dev_cleanup(struct net_device *dev); |
273 | 273 | ||
274 | void ipoib_mcast_join_task(void *dev_ptr); | 274 | void ipoib_mcast_join_task(struct work_struct *work); |
275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); | 275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); |
276 | 276 | ||
277 | void ipoib_mcast_restart_task(void *dev_ptr); | 277 | void ipoib_mcast_restart_task(struct work_struct *work); |
278 | int ipoib_mcast_start_thread(struct net_device *dev); | 278 | int ipoib_mcast_start_thread(struct net_device *dev); |
279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); | 279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
280 | 280 | ||
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, | |||
312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); | 312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); |
313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); | 313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); |
314 | 314 | ||
315 | void ipoib_pkey_poll(void *dev); | 315 | void ipoib_pkey_poll(struct work_struct *work); |
316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
317 | 317 | ||
318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 8bf5e9ec7c9..f10fba5d326 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev) | |||
400 | spin_unlock_irq(&priv->tx_lock); | 400 | spin_unlock_irq(&priv->tx_lock); |
401 | } | 401 | } |
402 | 402 | ||
403 | void ipoib_reap_ah(void *dev_ptr) | 403 | void ipoib_reap_ah(struct work_struct *work) |
404 | { | 404 | { |
405 | struct net_device *dev = dev_ptr; | 405 | struct ipoib_dev_priv *priv = |
406 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 406 | container_of(work, struct ipoib_dev_priv, ah_reap_task.work); |
407 | struct net_device *dev = priv->dev; | ||
407 | 408 | ||
408 | __ipoib_reap_ah(dev); | 409 | __ipoib_reap_ah(dev); |
409 | 410 | ||
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
613 | return 0; | 614 | return 0; |
614 | } | 615 | } |
615 | 616 | ||
616 | void ipoib_ib_dev_flush(void *_dev) | 617 | void ipoib_ib_dev_flush(struct work_struct *work) |
617 | { | 618 | { |
618 | struct net_device *dev = (struct net_device *)_dev; | 619 | struct ipoib_dev_priv *cpriv, *priv = |
619 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; | 620 | container_of(work, struct ipoib_dev_priv, flush_task); |
621 | struct net_device *dev = priv->dev; | ||
620 | 622 | ||
621 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { | 623 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { |
622 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 624 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev) | |||
638 | */ | 640 | */ |
639 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 641 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
640 | ipoib_ib_dev_up(dev); | 642 | ipoib_ib_dev_up(dev); |
641 | ipoib_mcast_restart_task(dev); | 643 | ipoib_mcast_restart_task(&priv->restart_task); |
642 | } | 644 | } |
643 | 645 | ||
644 | mutex_lock(&priv->vlan_mutex); | 646 | mutex_lock(&priv->vlan_mutex); |
645 | 647 | ||
646 | /* Flush any child interfaces too */ | 648 | /* Flush any child interfaces too */ |
647 | list_for_each_entry(cpriv, &priv->child_intfs, list) | 649 | list_for_each_entry(cpriv, &priv->child_intfs, list) |
648 | ipoib_ib_dev_flush(cpriv->dev); | 650 | ipoib_ib_dev_flush(&cpriv->flush_task); |
649 | 651 | ||
650 | mutex_unlock(&priv->vlan_mutex); | 652 | mutex_unlock(&priv->vlan_mutex); |
651 | } | 653 | } |
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
672 | * change async notification is available. | 674 | * change async notification is available. |
673 | */ | 675 | */ |
674 | 676 | ||
675 | void ipoib_pkey_poll(void *dev_ptr) | 677 | void ipoib_pkey_poll(struct work_struct *work) |
676 | { | 678 | { |
677 | struct net_device *dev = dev_ptr; | 679 | struct ipoib_dev_priv *priv = |
678 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 680 | container_of(work, struct ipoib_dev_priv, pkey_task.work); |
681 | struct net_device *dev = priv->dev; | ||
679 | 682 | ||
680 | ipoib_pkey_dev_check_presence(dev); | 683 | ipoib_pkey_dev_check_presence(dev); |
681 | 684 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5ba3154320b..c0928024372 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev) | |||
940 | INIT_LIST_HEAD(&priv->dead_ahs); | 940 | INIT_LIST_HEAD(&priv->dead_ahs); |
941 | INIT_LIST_HEAD(&priv->multicast_list); | 941 | INIT_LIST_HEAD(&priv->multicast_list); |
942 | 942 | ||
943 | INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); | 943 | INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); |
944 | INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); | 944 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); |
945 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); | 945 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); |
946 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); | 946 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); |
947 | INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); | 947 | INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); |
948 | } | 948 | } |
949 | 949 | ||
950 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) | 950 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d282d65e3ee..b04b72ca32e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status, | |||
399 | mcast->backoff = 1; | 399 | mcast->backoff = 1; |
400 | mutex_lock(&mcast_mutex); | 400 | mutex_lock(&mcast_mutex); |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | 402 | queue_delayed_work(ipoib_workqueue, |
403 | &priv->mcast_task, 0); | ||
403 | mutex_unlock(&mcast_mutex); | 404 | mutex_unlock(&mcast_mutex); |
404 | complete(&mcast->done); | 405 | complete(&mcast->done); |
405 | return; | 406 | return; |
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status, | |||
435 | 436 | ||
436 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { | 437 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { |
437 | if (status == -ETIMEDOUT) | 438 | if (status == -ETIMEDOUT) |
438 | queue_work(ipoib_workqueue, &priv->mcast_task); | 439 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
440 | 0); | ||
439 | else | 441 | else |
440 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | 442 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
441 | mcast->backoff * HZ); | 443 | mcast->backoff * HZ); |
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
517 | mcast->query_id = ret; | 519 | mcast->query_id = ret; |
518 | } | 520 | } |
519 | 521 | ||
520 | void ipoib_mcast_join_task(void *dev_ptr) | 522 | void ipoib_mcast_join_task(struct work_struct *work) |
521 | { | 523 | { |
522 | struct net_device *dev = dev_ptr; | 524 | struct ipoib_dev_priv *priv = |
523 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 525 | container_of(work, struct ipoib_dev_priv, mcast_task.work); |
526 | struct net_device *dev = priv->dev; | ||
524 | 527 | ||
525 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 528 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
526 | return; | 529 | return; |
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
610 | 613 | ||
611 | mutex_lock(&mcast_mutex); | 614 | mutex_lock(&mcast_mutex); |
612 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | 615 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
613 | queue_work(ipoib_workqueue, &priv->mcast_task); | 616 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
614 | mutex_unlock(&mcast_mutex); | 617 | mutex_unlock(&mcast_mutex); |
615 | 618 | ||
616 | spin_lock_irq(&priv->lock); | 619 | spin_lock_irq(&priv->lock); |
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
818 | } | 821 | } |
819 | } | 822 | } |
820 | 823 | ||
821 | void ipoib_mcast_restart_task(void *dev_ptr) | 824 | void ipoib_mcast_restart_task(struct work_struct *work) |
822 | { | 825 | { |
823 | struct net_device *dev = dev_ptr; | 826 | struct ipoib_dev_priv *priv = |
824 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 827 | container_of(work, struct ipoib_dev_priv, restart_task); |
828 | struct net_device *dev = priv->dev; | ||
825 | struct dev_mc_list *mclist; | 829 | struct dev_mc_list *mclist; |
826 | struct ipoib_mcast *mcast, *tmcast; | 830 | struct ipoib_mcast *mcast, *tmcast; |
827 | LIST_HEAD(remove_list); | 831 | LIST_HEAD(remove_list); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 18a00003499..693b7700289 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | static void iser_cq_tasklet_fn(unsigned long data); | 49 | static void iser_cq_tasklet_fn(unsigned long data); |
50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
51 | static void iser_comp_error_worker(void *data); | 51 | static void iser_comp_error_worker(struct work_struct *work); |
52 | 52 | ||
53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) | 53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) |
54 | { | 54 | { |
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn) | |||
480 | init_waitqueue_head(&ib_conn->wait); | 480 | init_waitqueue_head(&ib_conn->wait); |
481 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 481 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
482 | atomic_set(&ib_conn->post_send_buf_count, 0); | 482 | atomic_set(&ib_conn->post_send_buf_count, 0); |
483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, | 483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); |
484 | ib_conn); | ||
485 | INIT_LIST_HEAD(&ib_conn->conn_list); | 484 | INIT_LIST_HEAD(&ib_conn->conn_list); |
486 | spin_lock_init(&ib_conn->lock); | 485 | spin_lock_init(&ib_conn->lock); |
487 | 486 | ||
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
754 | return ret_val; | 753 | return ret_val; |
755 | } | 754 | } |
756 | 755 | ||
757 | static void iser_comp_error_worker(void *data) | 756 | static void iser_comp_error_worker(struct work_struct *work) |
758 | { | 757 | { |
759 | struct iser_conn *ib_conn = data; | 758 | struct iser_conn *ib_conn = |
759 | container_of(work, struct iser_conn, comperror_work); | ||
760 | 760 | ||
761 | /* getting here when the state is UP means that the conn is being * | 761 | /* getting here when the state is UP means that the conn is being * |
762 | * terminated asynchronously from the iSCSI layer's perspective. */ | 762 | * terminated asynchronously from the iSCSI layer's perspective. */ |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 64ab5fc7cca..a6289595557 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
390 | wait_for_completion(&target->done); | 390 | wait_for_completion(&target->done); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void srp_remove_work(void *target_ptr) | 393 | static void srp_remove_work(struct work_struct *work) |
394 | { | 394 | { |
395 | struct srp_target_port *target = target_ptr; | 395 | struct srp_target_port *target = |
396 | container_of(work, struct srp_target_port, work); | ||
396 | 397 | ||
397 | spin_lock_irq(target->scsi_host->host_lock); | 398 | spin_lock_irq(target->scsi_host->host_lock); |
398 | if (target->state != SRP_TARGET_DEAD) { | 399 | if (target->state != SRP_TARGET_DEAD) { |
@@ -575,7 +576,7 @@ err: | |||
575 | spin_lock_irq(target->scsi_host->host_lock); | 576 | spin_lock_irq(target->scsi_host->host_lock); |
576 | if (target->state == SRP_TARGET_CONNECTING) { | 577 | if (target->state == SRP_TARGET_CONNECTING) { |
577 | target->state = SRP_TARGET_DEAD; | 578 | target->state = SRP_TARGET_DEAD; |
578 | INIT_WORK(&target->work, srp_remove_work, target); | 579 | INIT_WORK(&target->work, srp_remove_work); |
579 | schedule_work(&target->work); | 580 | schedule_work(&target->work); |
580 | } | 581 | } |
581 | spin_unlock_irq(target->scsi_host->host_lock); | 582 | spin_unlock_irq(target->scsi_host->host_lock); |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index cbb93669d1c..8451b29a3db 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd) | |||
567 | * interrupt context. | 567 | * interrupt context. |
568 | */ | 568 | */ |
569 | 569 | ||
570 | static void atkbd_event_work(void *data) | 570 | static void atkbd_event_work(struct work_struct *work) |
571 | { | 571 | { |
572 | struct atkbd *atkbd = data; | 572 | struct atkbd *atkbd = container_of(work, struct atkbd, event_work); |
573 | 573 | ||
574 | mutex_lock(&atkbd->event_mutex); | 574 | mutex_lock(&atkbd->event_mutex); |
575 | 575 | ||
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) | |||
943 | 943 | ||
944 | atkbd->dev = dev; | 944 | atkbd->dev = dev; |
945 | ps2_init(&atkbd->ps2dev, serio); | 945 | ps2_init(&atkbd->ps2dev, serio); |
946 | INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); | 946 | INIT_WORK(&atkbd->event_work, atkbd_event_work); |
947 | mutex_init(&atkbd->event_mutex); | 947 | mutex_init(&atkbd->event_mutex); |
948 | 948 | ||
949 | switch (serio->id.type) { | 949 | switch (serio->id.type) { |
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c index 979b93e33da..b7f049b45b6 100644 --- a/drivers/input/keyboard/lkkbd.c +++ b/drivers/input/keyboard/lkkbd.c | |||
@@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code, | |||
572 | * were in. | 572 | * were in. |
573 | */ | 573 | */ |
574 | static void | 574 | static void |
575 | lkkbd_reinit (void *data) | 575 | lkkbd_reinit (struct work_struct *work) |
576 | { | 576 | { |
577 | struct lkkbd *lk = data; | 577 | struct lkkbd *lk = container_of(work, struct lkkbd, tq); |
578 | int division; | 578 | int division; |
579 | unsigned char leds_on = 0; | 579 | unsigned char leds_on = 0; |
580 | unsigned char leds_off = 0; | 580 | unsigned char leds_off = 0; |
@@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv) | |||
651 | 651 | ||
652 | lk->serio = serio; | 652 | lk->serio = serio; |
653 | lk->dev = input_dev; | 653 | lk->dev = input_dev; |
654 | INIT_WORK (&lk->tq, lkkbd_reinit, lk); | 654 | INIT_WORK (&lk->tq, lkkbd_reinit); |
655 | lk->bell_volume = bell_volume; | 655 | lk->bell_volume = bell_volume; |
656 | lk->keyclick_volume = keyclick_volume; | 656 | lk->keyclick_volume = keyclick_volume; |
657 | lk->ctrlclick_volume = ctrlclick_volume; | 657 | lk->ctrlclick_volume = ctrlclick_volume; |
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index cac4781103c..6cd887c5eb0 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c | |||
@@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) | |||
208 | * were in. | 208 | * were in. |
209 | */ | 209 | */ |
210 | 210 | ||
211 | static void sunkbd_reinit(void *data) | 211 | static void sunkbd_reinit(struct work_struct *work) |
212 | { | 212 | { |
213 | struct sunkbd *sunkbd = data; | 213 | struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); |
214 | 214 | ||
215 | wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); | 215 | wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); |
216 | 216 | ||
@@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv) | |||
248 | sunkbd->serio = serio; | 248 | sunkbd->serio = serio; |
249 | sunkbd->dev = input_dev; | 249 | sunkbd->dev = input_dev; |
250 | init_waitqueue_head(&sunkbd->wait); | 250 | init_waitqueue_head(&sunkbd->wait); |
251 | INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); | 251 | INIT_WORK(&sunkbd->tq, sunkbd_reinit); |
252 | snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); | 252 | snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); |
253 | 253 | ||
254 | serio_set_drvdata(serio, sunkbd); | 254 | serio_set_drvdata(serio, sunkbd); |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 6f9b2c7cc9c..52bb2226ce2 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse) | |||
888 | * psmouse_resync() attempts to re-validate current protocol. | 888 | * psmouse_resync() attempts to re-validate current protocol. |
889 | */ | 889 | */ |
890 | 890 | ||
891 | static void psmouse_resync(void *p) | 891 | static void psmouse_resync(struct work_struct *work) |
892 | { | 892 | { |
893 | struct psmouse *psmouse = p, *parent = NULL; | 893 | struct psmouse *parent = NULL, *psmouse = |
894 | container_of(work, struct psmouse, resync_work); | ||
894 | struct serio *serio = psmouse->ps2dev.serio; | 895 | struct serio *serio = psmouse->ps2dev.serio; |
895 | psmouse_ret_t rc = PSMOUSE_GOOD_DATA; | 896 | psmouse_ret_t rc = PSMOUSE_GOOD_DATA; |
896 | int failed = 0, enabled = 0; | 897 | int failed = 0, enabled = 0; |
@@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv) | |||
1121 | goto out; | 1122 | goto out; |
1122 | 1123 | ||
1123 | ps2_init(&psmouse->ps2dev, serio); | 1124 | ps2_init(&psmouse->ps2dev, serio); |
1124 | INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); | 1125 | INIT_WORK(&psmouse->resync_work, psmouse_resync); |
1125 | psmouse->dev = input_dev; | 1126 | psmouse->dev = input_dev; |
1126 | snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); | 1127 | snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); |
1127 | 1128 | ||
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index e5b1b60757b..b3e84d3bb7f 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c | |||
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command); | |||
251 | * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) | 251 | * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) |
252 | */ | 252 | */ |
253 | 253 | ||
254 | static void ps2_execute_scheduled_command(void *data) | 254 | static void ps2_execute_scheduled_command(struct work_struct *work) |
255 | { | 255 | { |
256 | struct ps2work *ps2work = data; | 256 | struct ps2work *ps2work = container_of(work, struct ps2work, work); |
257 | 257 | ||
258 | ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); | 258 | ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); |
259 | kfree(ps2work); | 259 | kfree(ps2work); |
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman | |||
278 | ps2work->ps2dev = ps2dev; | 278 | ps2work->ps2dev = ps2dev; |
279 | ps2work->command = command; | 279 | ps2work->command = command; |
280 | memcpy(ps2work->param, param, send); | 280 | memcpy(ps2work->param, param, send); |
281 | INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); | 281 | INIT_WORK(&ps2work->work, ps2_execute_scheduled_command); |
282 | 282 | ||
283 | if (!schedule_work(&ps2work->work)) { | 283 | if (!schedule_work(&ps2work->work)) { |
284 | kfree(ps2work); | 284 | kfree(ps2work); |
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c index 6ae6eb32211..946c38cf6f8 100644 --- a/drivers/isdn/act2000/capi.c +++ b/drivers/isdn/act2000/capi.c | |||
@@ -627,8 +627,10 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { | |||
627 | } | 627 | } |
628 | 628 | ||
629 | void | 629 | void |
630 | actcapi_dispatch(act2000_card *card) | 630 | actcapi_dispatch(struct work_struct *work) |
631 | { | 631 | { |
632 | struct act2000_card *card = | ||
633 | container_of(work, struct act2000_card, rcv_tq); | ||
632 | struct sk_buff *skb; | 634 | struct sk_buff *skb; |
633 | actcapi_msg *msg; | 635 | actcapi_msg *msg; |
634 | __u16 ccmd; | 636 | __u16 ccmd; |
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h index 49f453c53c6..e55f6a931f6 100644 --- a/drivers/isdn/act2000/capi.h +++ b/drivers/isdn/act2000/capi.h | |||
@@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int | |||
356 | extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); | 356 | extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); |
357 | extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); | 357 | extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); |
358 | extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); | 358 | extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); |
359 | extern void actcapi_dispatch(act2000_card *); | 359 | extern void actcapi_dispatch(struct work_struct *); |
360 | #ifdef DEBUG_MSG | 360 | #ifdef DEBUG_MSG |
361 | extern void actcapi_debug_msg(struct sk_buff *skb, int); | 361 | extern void actcapi_debug_msg(struct sk_buff *skb, int); |
362 | #else | 362 | #else |
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index d89dcde4ead..90593e2ef87 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c | |||
@@ -192,8 +192,11 @@ act2000_set_msn(act2000_card *card, char *eazmsn) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | static void | 194 | static void |
195 | act2000_transmit(struct act2000_card *card) | 195 | act2000_transmit(struct work_struct *work) |
196 | { | 196 | { |
197 | struct act2000_card *card = | ||
198 | container_of(work, struct act2000_card, snd_tq); | ||
199 | |||
197 | switch (card->bus) { | 200 | switch (card->bus) { |
198 | case ACT2000_BUS_ISA: | 201 | case ACT2000_BUS_ISA: |
199 | act2000_isa_send(card); | 202 | act2000_isa_send(card); |
@@ -207,8 +210,11 @@ act2000_transmit(struct act2000_card *card) | |||
207 | } | 210 | } |
208 | 211 | ||
209 | static void | 212 | static void |
210 | act2000_receive(struct act2000_card *card) | 213 | act2000_receive(struct work_struct *work) |
211 | { | 214 | { |
215 | struct act2000_card *card = | ||
216 | container_of(work, struct act2000_card, poll_tq); | ||
217 | |||
212 | switch (card->bus) { | 218 | switch (card->bus) { |
213 | case ACT2000_BUS_ISA: | 219 | case ACT2000_BUS_ISA: |
214 | act2000_isa_receive(card); | 220 | act2000_isa_receive(card); |
@@ -227,7 +233,7 @@ act2000_poll(unsigned long data) | |||
227 | act2000_card * card = (act2000_card *)data; | 233 | act2000_card * card = (act2000_card *)data; |
228 | unsigned long flags; | 234 | unsigned long flags; |
229 | 235 | ||
230 | act2000_receive(card); | 236 | act2000_receive(&card->poll_tq); |
231 | spin_lock_irqsave(&card->lock, flags); | 237 | spin_lock_irqsave(&card->lock, flags); |
232 | mod_timer(&card->ptimer, jiffies+3); | 238 | mod_timer(&card->ptimer, jiffies+3); |
233 | spin_unlock_irqrestore(&card->lock, flags); | 239 | spin_unlock_irqrestore(&card->lock, flags); |
@@ -578,9 +584,9 @@ act2000_alloccard(int bus, int port, int irq, char *id) | |||
578 | skb_queue_head_init(&card->sndq); | 584 | skb_queue_head_init(&card->sndq); |
579 | skb_queue_head_init(&card->rcvq); | 585 | skb_queue_head_init(&card->rcvq); |
580 | skb_queue_head_init(&card->ackq); | 586 | skb_queue_head_init(&card->ackq); |
581 | INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); | 587 | INIT_WORK(&card->snd_tq, act2000_transmit); |
582 | INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); | 588 | INIT_WORK(&card->rcv_tq, actcapi_dispatch); |
583 | INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); | 589 | INIT_WORK(&card->poll_tq, act2000_receive); |
584 | init_timer(&card->ptimer); | 590 | init_timer(&card->ptimer); |
585 | card->interface.owner = THIS_MODULE; | 591 | card->interface.owner = THIS_MODULE; |
586 | card->interface.channels = ACT2000_BCH; | 592 | card->interface.channels = ACT2000_BCH; |
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 8c4fcb9027b..783a2552631 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c | |||
@@ -208,9 +208,10 @@ static void notify_down(u32 contr) | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | static void notify_handler(void *data) | 211 | static void notify_handler(struct work_struct *work) |
212 | { | 212 | { |
213 | struct capi_notifier *np = data; | 213 | struct capi_notifier *np = |
214 | container_of(work, struct capi_notifier, work); | ||
214 | 215 | ||
215 | switch (np->cmd) { | 216 | switch (np->cmd) { |
216 | case KCI_CONTRUP: | 217 | case KCI_CONTRUP: |
@@ -235,7 +236,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) | |||
235 | if (!np) | 236 | if (!np) |
236 | return -ENOMEM; | 237 | return -ENOMEM; |
237 | 238 | ||
238 | INIT_WORK(&np->work, notify_handler, np); | 239 | INIT_WORK(&np->work, notify_handler); |
239 | np->cmd = cmd; | 240 | np->cmd = cmd; |
240 | np->controller = controller; | 241 | np->controller = controller; |
241 | np->applid = applid; | 242 | np->applid = applid; |
@@ -248,10 +249,11 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) | |||
248 | 249 | ||
249 | /* -------- Receiver ------------------------------------------ */ | 250 | /* -------- Receiver ------------------------------------------ */ |
250 | 251 | ||
251 | static void recv_handler(void *_ap) | 252 | static void recv_handler(struct work_struct *work) |
252 | { | 253 | { |
253 | struct sk_buff *skb; | 254 | struct sk_buff *skb; |
254 | struct capi20_appl *ap = (struct capi20_appl *) _ap; | 255 | struct capi20_appl *ap = |
256 | container_of(work, struct capi20_appl, recv_work); | ||
255 | 257 | ||
256 | if ((!ap) || (ap->release_in_progress)) | 258 | if ((!ap) || (ap->release_in_progress)) |
257 | return; | 259 | return; |
@@ -527,7 +529,7 @@ u16 capi20_register(struct capi20_appl *ap) | |||
527 | ap->callback = NULL; | 529 | ap->callback = NULL; |
528 | init_MUTEX(&ap->recv_sem); | 530 | init_MUTEX(&ap->recv_sem); |
529 | skb_queue_head_init(&ap->recv_queue); | 531 | skb_queue_head_init(&ap->recv_queue); |
530 | INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); | 532 | INIT_WORK(&ap->recv_work, recv_handler); |
531 | ap->release_in_progress = 0; | 533 | ap->release_in_progress = 0; |
532 | 534 | ||
533 | write_unlock_irqrestore(&application_lock, flags); | 535 | write_unlock_irqrestore(&application_lock, flags); |
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index bec59010bc6..3b19caeba25 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c | |||
@@ -232,9 +232,10 @@ Amd7930_new_ph(struct IsdnCardState *cs) | |||
232 | 232 | ||
233 | 233 | ||
234 | static void | 234 | static void |
235 | Amd7930_bh(struct IsdnCardState *cs) | 235 | Amd7930_bh(struct work_struct *work) |
236 | { | 236 | { |
237 | 237 | struct IsdnCardState *cs = | |
238 | container_of(work, struct IsdnCardState, tqueue); | ||
238 | struct PStack *stptr; | 239 | struct PStack *stptr; |
239 | 240 | ||
240 | if (!cs) | 241 | if (!cs) |
@@ -789,7 +790,7 @@ Amd7930_init(struct IsdnCardState *cs) | |||
789 | void __devinit | 790 | void __devinit |
790 | setup_Amd7930(struct IsdnCardState *cs) | 791 | setup_Amd7930(struct IsdnCardState *cs) |
791 | { | 792 | { |
792 | INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); | 793 | INIT_WORK(&cs->tqueue, Amd7930_bh); |
793 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 794 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
794 | cs->dbusytimer.data = (long) cs; | 795 | cs->dbusytimer.data = (long) cs; |
795 | init_timer(&cs->dbusytimer); | 796 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 785b08554fc..cede72cdbb3 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c | |||
@@ -1137,7 +1137,6 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow | |||
1137 | cs->tx_skb = NULL; | 1137 | cs->tx_skb = NULL; |
1138 | cs->tx_cnt = 0; | 1138 | cs->tx_cnt = 0; |
1139 | cs->event = 0; | 1139 | cs->event = 0; |
1140 | cs->tqueue.data = cs; | ||
1141 | 1140 | ||
1142 | skb_queue_head_init(&cs->rq); | 1141 | skb_queue_head_init(&cs->rq); |
1143 | skb_queue_head_init(&cs->sq); | 1142 | skb_queue_head_init(&cs->sq); |
@@ -1554,7 +1553,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg); | |||
1554 | static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); | 1553 | static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); |
1555 | static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); | 1554 | static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); |
1556 | static void hisax_bc_close(struct BCState *bcs); | 1555 | static void hisax_bc_close(struct BCState *bcs); |
1557 | static void hisax_bh(struct IsdnCardState *cs); | 1556 | static void hisax_bh(struct work_struct *work); |
1558 | static void EChannel_proc_rcv(struct hisax_d_if *d_if); | 1557 | static void EChannel_proc_rcv(struct hisax_d_if *d_if); |
1559 | 1558 | ||
1560 | int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], | 1559 | int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], |
@@ -1586,7 +1585,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], | |||
1586 | hisax_d_if->cs = cs; | 1585 | hisax_d_if->cs = cs; |
1587 | cs->hw.hisax_d_if = hisax_d_if; | 1586 | cs->hw.hisax_d_if = hisax_d_if; |
1588 | cs->cardmsg = hisax_cardmsg; | 1587 | cs->cardmsg = hisax_cardmsg; |
1589 | INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); | 1588 | INIT_WORK(&cs->tqueue, hisax_bh); |
1590 | cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; | 1589 | cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; |
1591 | for (i = 0; i < 2; i++) { | 1590 | for (i = 0; i < 2; i++) { |
1592 | cs->bcs[i].BC_SetStack = hisax_bc_setstack; | 1591 | cs->bcs[i].BC_SetStack = hisax_bc_setstack; |
@@ -1618,8 +1617,10 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event) | |||
1618 | schedule_work(&cs->tqueue); | 1617 | schedule_work(&cs->tqueue); |
1619 | } | 1618 | } |
1620 | 1619 | ||
1621 | static void hisax_bh(struct IsdnCardState *cs) | 1620 | static void hisax_bh(struct work_struct *work) |
1622 | { | 1621 | { |
1622 | struct IsdnCardState *cs = | ||
1623 | container_of(work, struct IsdnCardState, tqueue); | ||
1623 | struct PStack *st; | 1624 | struct PStack *st; |
1624 | int pr; | 1625 | int pr; |
1625 | 1626 | ||
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c index d852c9d998b..de9b1a4d6ba 100644 --- a/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/drivers/isdn/hisax/hfc4s8s_l1.c | |||
@@ -1083,8 +1083,9 @@ tx_b_frame(struct hfc4s8s_btype *bch) | |||
1083 | /* bottom half handler for interrupt */ | 1083 | /* bottom half handler for interrupt */ |
1084 | /*************************************/ | 1084 | /*************************************/ |
1085 | static void | 1085 | static void |
1086 | hfc4s8s_bh(hfc4s8s_hw * hw) | 1086 | hfc4s8s_bh(struct work_struct *work) |
1087 | { | 1087 | { |
1088 | hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue); | ||
1088 | u_char b; | 1089 | u_char b; |
1089 | struct hfc4s8s_l1 *l1p; | 1090 | struct hfc4s8s_l1 *l1p; |
1090 | volatile u_char *fifo_stat; | 1091 | volatile u_char *fifo_stat; |
@@ -1550,7 +1551,7 @@ setup_instance(hfc4s8s_hw * hw) | |||
1550 | goto out; | 1551 | goto out; |
1551 | } | 1552 | } |
1552 | 1553 | ||
1553 | INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); | 1554 | INIT_WORK(&hw->tqueue, hfc4s8s_bh); |
1554 | 1555 | ||
1555 | if (request_irq | 1556 | if (request_irq |
1556 | (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { | 1557 | (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { |
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c index 6360e821472..8d9864453a2 100644 --- a/drivers/isdn/hisax/hfc_2bds0.c +++ b/drivers/isdn/hisax/hfc_2bds0.c | |||
@@ -549,10 +549,11 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
549 | } | 549 | } |
550 | 550 | ||
551 | static void | 551 | static void |
552 | hfcd_bh(struct IsdnCardState *cs) | 552 | hfcd_bh(struct work_struct *work) |
553 | { | 553 | { |
554 | if (!cs) | 554 | struct IsdnCardState *cs = |
555 | return; | 555 | container_of(work, struct IsdnCardState, tqueue); |
556 | |||
556 | if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { | 557 | if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { |
557 | switch (cs->dc.hfcd.ph_state) { | 558 | switch (cs->dc.hfcd.ph_state) { |
558 | case (0): | 559 | case (0): |
@@ -1072,5 +1073,5 @@ set_cs_func(struct IsdnCardState *cs) | |||
1072 | cs->dbusytimer.function = (void *) hfc_dbusy_timer; | 1073 | cs->dbusytimer.function = (void *) hfc_dbusy_timer; |
1073 | cs->dbusytimer.data = (long) cs; | 1074 | cs->dbusytimer.data = (long) cs; |
1074 | init_timer(&cs->dbusytimer); | 1075 | init_timer(&cs->dbusytimer); |
1075 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); | 1076 | INIT_WORK(&cs->tqueue, hfcd_bh); |
1076 | } | 1077 | } |
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 93f60b56351..5db0a85b827 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -1506,8 +1506,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
1506 | /* handle L1 state changes */ | 1506 | /* handle L1 state changes */ |
1507 | /***************************/ | 1507 | /***************************/ |
1508 | static void | 1508 | static void |
1509 | hfcpci_bh(struct IsdnCardState *cs) | 1509 | hfcpci_bh(struct work_struct *work) |
1510 | { | 1510 | { |
1511 | struct IsdnCardState *cs = | ||
1512 | container_of(work, struct IsdnCardState, tqueue); | ||
1511 | u_long flags; | 1513 | u_long flags; |
1512 | // struct PStack *stptr; | 1514 | // struct PStack *stptr; |
1513 | 1515 | ||
@@ -1722,7 +1724,7 @@ setup_hfcpci(struct IsdnCard *card) | |||
1722 | Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); | 1724 | Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); |
1723 | /* At this point the needed PCI config is done */ | 1725 | /* At this point the needed PCI config is done */ |
1724 | /* fifos are still not enabled */ | 1726 | /* fifos are still not enabled */ |
1725 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); | 1727 | INIT_WORK(&cs->tqueue, hfcpci_bh); |
1726 | cs->setstack_d = setstack_hfcpci; | 1728 | cs->setstack_d = setstack_hfcpci; |
1727 | cs->BC_Send_Data = &hfcpci_send_data; | 1729 | cs->BC_Send_Data = &hfcpci_send_data; |
1728 | cs->readisac = NULL; | 1730 | cs->readisac = NULL; |
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index 954d1536db1..4fd09d21a27 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c | |||
@@ -1251,8 +1251,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
1251 | /* handle L1 state changes */ | 1251 | /* handle L1 state changes */ |
1252 | /***************************/ | 1252 | /***************************/ |
1253 | static void | 1253 | static void |
1254 | hfcsx_bh(struct IsdnCardState *cs) | 1254 | hfcsx_bh(struct work_struct *work) |
1255 | { | 1255 | { |
1256 | struct IsdnCardState *cs = | ||
1257 | container_of(work, struct IsdnCardState, tqueue); | ||
1256 | u_long flags; | 1258 | u_long flags; |
1257 | 1259 | ||
1258 | if (!cs) | 1260 | if (!cs) |
@@ -1499,7 +1501,7 @@ setup_hfcsx(struct IsdnCard *card) | |||
1499 | cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; | 1501 | cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; |
1500 | cs->dbusytimer.data = (long) cs; | 1502 | cs->dbusytimer.data = (long) cs; |
1501 | init_timer(&cs->dbusytimer); | 1503 | init_timer(&cs->dbusytimer); |
1502 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); | 1504 | INIT_WORK(&cs->tqueue, hfcsx_bh); |
1503 | cs->readisac = NULL; | 1505 | cs->readisac = NULL; |
1504 | cs->writeisac = NULL; | 1506 | cs->writeisac = NULL; |
1505 | cs->readisacfifo = NULL; | 1507 | cs->readisacfifo = NULL; |
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c index da706925d54..682cac32f25 100644 --- a/drivers/isdn/hisax/icc.c +++ b/drivers/isdn/hisax/icc.c | |||
@@ -77,8 +77,10 @@ icc_new_ph(struct IsdnCardState *cs) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static void | 79 | static void |
80 | icc_bh(struct IsdnCardState *cs) | 80 | icc_bh(struct work_struct *work) |
81 | { | 81 | { |
82 | struct IsdnCardState *cs = | ||
83 | container_of(work, struct IsdnCardState, tqueue); | ||
82 | struct PStack *stptr; | 84 | struct PStack *stptr; |
83 | 85 | ||
84 | if (!cs) | 86 | if (!cs) |
@@ -674,7 +676,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs) | |||
674 | void __devinit | 676 | void __devinit |
675 | setup_icc(struct IsdnCardState *cs) | 677 | setup_icc(struct IsdnCardState *cs) |
676 | { | 678 | { |
677 | INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); | 679 | INIT_WORK(&cs->tqueue, icc_bh); |
678 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 680 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
679 | cs->dbusytimer.data = (long) cs; | 681 | cs->dbusytimer.data = (long) cs; |
680 | init_timer(&cs->dbusytimer); | 682 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c index 282f349408b..4e9f23803da 100644 --- a/drivers/isdn/hisax/isac.c +++ b/drivers/isdn/hisax/isac.c | |||
@@ -81,8 +81,10 @@ isac_new_ph(struct IsdnCardState *cs) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static void | 83 | static void |
84 | isac_bh(struct IsdnCardState *cs) | 84 | isac_bh(struct work_struct *work) |
85 | { | 85 | { |
86 | struct IsdnCardState *cs = | ||
87 | container_of(work, struct IsdnCardState, tqueue); | ||
86 | struct PStack *stptr; | 88 | struct PStack *stptr; |
87 | 89 | ||
88 | if (!cs) | 90 | if (!cs) |
@@ -674,7 +676,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs) | |||
674 | void __devinit | 676 | void __devinit |
675 | setup_isac(struct IsdnCardState *cs) | 677 | setup_isac(struct IsdnCardState *cs) |
676 | { | 678 | { |
677 | INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); | 679 | INIT_WORK(&cs->tqueue, isac_bh); |
678 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 680 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
679 | cs->dbusytimer.data = (long) cs; | 681 | cs->dbusytimer.data = (long) cs; |
680 | init_timer(&cs->dbusytimer); | 682 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index 674af673ff9..6f1a6583b17 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c | |||
@@ -437,8 +437,10 @@ extern void BChannel_bh(struct BCState *); | |||
437 | #define B_LL_OK 10 | 437 | #define B_LL_OK 10 |
438 | 438 | ||
439 | static void | 439 | static void |
440 | isar_bh(struct BCState *bcs) | 440 | isar_bh(struct work_struct *work) |
441 | { | 441 | { |
442 | struct BCState *bcs = container_of(work, struct BCState, tqueue); | ||
443 | |||
442 | BChannel_bh(bcs); | 444 | BChannel_bh(bcs); |
443 | if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) | 445 | if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) |
444 | ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); | 446 | ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); |
@@ -1580,7 +1582,7 @@ isar_setup(struct IsdnCardState *cs) | |||
1580 | cs->bcs[i].mode = 0; | 1582 | cs->bcs[i].mode = 0; |
1581 | cs->bcs[i].hw.isar.dpath = i + 1; | 1583 | cs->bcs[i].hw.isar.dpath = i + 1; |
1582 | modeisar(&cs->bcs[i], 0, 0); | 1584 | modeisar(&cs->bcs[i], 0, 0); |
1583 | INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); | 1585 | INIT_WORK(&cs->bcs[i].tqueue, isar_bh); |
1584 | } | 1586 | } |
1585 | } | 1587 | } |
1586 | 1588 | ||
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c index bab35688648..a14204ec88e 100644 --- a/drivers/isdn/hisax/isdnl1.c +++ b/drivers/isdn/hisax/isdnl1.c | |||
@@ -315,8 +315,10 @@ BChannel_proc_ack(struct BCState *bcs) | |||
315 | } | 315 | } |
316 | 316 | ||
317 | void | 317 | void |
318 | BChannel_bh(struct BCState *bcs) | 318 | BChannel_bh(struct work_struct *work) |
319 | { | 319 | { |
320 | struct BCState *bcs = container_of(work, struct BCState, tqueue); | ||
321 | |||
320 | if (!bcs) | 322 | if (!bcs) |
321 | return; | 323 | return; |
322 | if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) | 324 | if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) |
@@ -362,7 +364,7 @@ init_bcstate(struct IsdnCardState *cs, int bc) | |||
362 | 364 | ||
363 | bcs->cs = cs; | 365 | bcs->cs = cs; |
364 | bcs->channel = bc; | 366 | bcs->channel = bc; |
365 | INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); | 367 | INIT_WORK(&bcs->tqueue, BChannel_bh); |
366 | spin_lock_init(&bcs->aclock); | 368 | spin_lock_init(&bcs->aclock); |
367 | bcs->BC_SetStack = NULL; | 369 | bcs->BC_SetStack = NULL; |
368 | bcs->BC_Close = NULL; | 370 | bcs->BC_Close = NULL; |
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index 1655341797a..3aeceaf9769 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c | |||
@@ -101,8 +101,10 @@ W6692_new_ph(struct IsdnCardState *cs) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static void | 103 | static void |
104 | W6692_bh(struct IsdnCardState *cs) | 104 | W6692_bh(struct work_struct *work) |
105 | { | 105 | { |
106 | struct IsdnCardState *cs = | ||
107 | container_of(work, struct IsdnCardState, tqueue); | ||
106 | struct PStack *stptr; | 108 | struct PStack *stptr; |
107 | 109 | ||
108 | if (!cs) | 110 | if (!cs) |
@@ -1070,7 +1072,7 @@ setup_w6692(struct IsdnCard *card) | |||
1070 | id_list[cs->subtyp].card_name, cs->irq, | 1072 | id_list[cs->subtyp].card_name, cs->irq, |
1071 | cs->hw.w6692.iobase); | 1073 | cs->hw.w6692.iobase); |
1072 | 1074 | ||
1073 | INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); | 1075 | INIT_WORK(&cs->tqueue, W6692_bh); |
1074 | cs->readW6692 = &ReadW6692; | 1076 | cs->readW6692 = &ReadW6692; |
1075 | cs->writeW6692 = &WriteW6692; | 1077 | cs->writeW6692 = &WriteW6692; |
1076 | cs->readisacfifo = &ReadISACfifo; | 1078 | cs->readisacfifo = &ReadISACfifo; |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 1f8d6ae66b4..2e4daebfb7e 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb) | |||
984 | /* | 984 | /* |
985 | * called from tq_immediate | 985 | * called from tq_immediate |
986 | */ | 986 | */ |
987 | static void isdn_net_softint(void *private) | 987 | static void isdn_net_softint(struct work_struct *work) |
988 | { | 988 | { |
989 | isdn_net_local *lp = private; | 989 | isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); |
990 | struct sk_buff *skb; | 990 | struct sk_buff *skb; |
991 | 991 | ||
992 | spin_lock_bh(&lp->xmit_lock); | 992 | spin_lock_bh(&lp->xmit_lock); |
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master) | |||
2596 | netdev->local->netdev = netdev; | 2596 | netdev->local->netdev = netdev; |
2597 | netdev->local->next = netdev->local; | 2597 | netdev->local->next = netdev->local; |
2598 | 2598 | ||
2599 | INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); | 2599 | INIT_WORK(&netdev->local->tqueue, isdn_net_softint); |
2600 | spin_lock_init(&netdev->local->xmit_lock); | 2600 | spin_lock_init(&netdev->local->xmit_lock); |
2601 | 2601 | ||
2602 | netdev->local->isdn_device = -1; | 2602 | netdev->local->isdn_device = -1; |
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c index 6ead5e1508b..1966f3410a1 100644 --- a/drivers/isdn/pcbit/drv.c +++ b/drivers/isdn/pcbit/drv.c | |||
@@ -68,8 +68,6 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list); | |||
68 | static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); | 68 | static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); |
69 | 69 | ||
70 | 70 | ||
71 | extern void pcbit_deliver(void * data); | ||
72 | |||
73 | int pcbit_init_dev(int board, int mem_base, int irq) | 71 | int pcbit_init_dev(int board, int mem_base, int irq) |
74 | { | 72 | { |
75 | struct pcbit_dev *dev; | 73 | struct pcbit_dev *dev; |
@@ -129,7 +127,7 @@ int pcbit_init_dev(int board, int mem_base, int irq) | |||
129 | memset(dev->b2, 0, sizeof(struct pcbit_chan)); | 127 | memset(dev->b2, 0, sizeof(struct pcbit_chan)); |
130 | dev->b2->id = 1; | 128 | dev->b2->id = 1; |
131 | 129 | ||
132 | INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); | 130 | INIT_WORK(&dev->qdelivery, pcbit_deliver); |
133 | 131 | ||
134 | /* | 132 | /* |
135 | * interrupts | 133 | * interrupts |
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c index 937fd212038..0c9f6df873f 100644 --- a/drivers/isdn/pcbit/layer2.c +++ b/drivers/isdn/pcbit/layer2.c | |||
@@ -67,7 +67,6 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg, | |||
67 | * Prototypes | 67 | * Prototypes |
68 | */ | 68 | */ |
69 | 69 | ||
70 | void pcbit_deliver(void *data); | ||
71 | static void pcbit_transmit(struct pcbit_dev *dev); | 70 | static void pcbit_transmit(struct pcbit_dev *dev); |
72 | 71 | ||
73 | static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); | 72 | static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); |
@@ -299,11 +298,12 @@ pcbit_transmit(struct pcbit_dev *dev) | |||
299 | */ | 298 | */ |
300 | 299 | ||
301 | void | 300 | void |
302 | pcbit_deliver(void *data) | 301 | pcbit_deliver(struct work_struct *work) |
303 | { | 302 | { |
304 | struct frame_buf *frame; | 303 | struct frame_buf *frame; |
305 | unsigned long flags, msg; | 304 | unsigned long flags, msg; |
306 | struct pcbit_dev *dev = (struct pcbit_dev *) data; | 305 | struct pcbit_dev *dev = |
306 | container_of(work, struct pcbit_dev, qdelivery); | ||
307 | 307 | ||
308 | spin_lock_irqsave(&dev->lock, flags); | 308 | spin_lock_irqsave(&dev->lock, flags); |
309 | 309 | ||
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h index 388bacefd23..19c18e88ff1 100644 --- a/drivers/isdn/pcbit/pcbit.h +++ b/drivers/isdn/pcbit/pcbit.h | |||
@@ -166,4 +166,6 @@ struct pcbit_ioctl { | |||
166 | #define L2_RUNNING 5 | 166 | #define L2_RUNNING 5 |
167 | #define L2_ERROR 6 | 167 | #define L2_ERROR 6 |
168 | 168 | ||
169 | extern void pcbit_deliver(struct work_struct *work); | ||
170 | |||
169 | #endif | 171 | #endif |
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index f1b6f563673..5ed41fe84e5 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c | |||
@@ -48,7 +48,8 @@ struct rackmeter_dma { | |||
48 | } ____cacheline_aligned; | 48 | } ____cacheline_aligned; |
49 | 49 | ||
50 | struct rackmeter_cpu { | 50 | struct rackmeter_cpu { |
51 | struct work_struct sniffer; | 51 | struct delayed_work sniffer; |
52 | struct rackmeter *rm; | ||
52 | cputime64_t prev_wall; | 53 | cputime64_t prev_wall; |
53 | cputime64_t prev_idle; | 54 | cputime64_t prev_idle; |
54 | int zero; | 55 | int zero; |
@@ -208,11 +209,12 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm) | |||
208 | rackmeter_do_pause(rm, 0); | 209 | rackmeter_do_pause(rm, 0); |
209 | } | 210 | } |
210 | 211 | ||
211 | static void rackmeter_do_timer(void *data) | 212 | static void rackmeter_do_timer(struct work_struct *work) |
212 | { | 213 | { |
213 | struct rackmeter *rm = data; | 214 | struct rackmeter_cpu *rcpu = |
215 | container_of(work, struct rackmeter_cpu, sniffer.work); | ||
216 | struct rackmeter *rm = rcpu->rm; | ||
214 | unsigned int cpu = smp_processor_id(); | 217 | unsigned int cpu = smp_processor_id(); |
215 | struct rackmeter_cpu *rcpu = &rm->cpu[cpu]; | ||
216 | cputime64_t cur_jiffies, total_idle_ticks; | 218 | cputime64_t cur_jiffies, total_idle_ticks; |
217 | unsigned int total_ticks, idle_ticks; | 219 | unsigned int total_ticks, idle_ticks; |
218 | int i, offset, load, cumm, pause; | 220 | int i, offset, load, cumm, pause; |
@@ -263,8 +265,10 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) | |||
263 | * on those machines yet | 265 | * on those machines yet |
264 | */ | 266 | */ |
265 | 267 | ||
266 | INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm); | 268 | rm->cpu[0].rm = rm; |
267 | INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm); | 269 | INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer); |
270 | rm->cpu[1].rm = rm; | ||
271 | INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer); | ||
268 | 272 | ||
269 | for_each_online_cpu(cpu) { | 273 | for_each_online_cpu(cpu) { |
270 | struct rackmeter_cpu *rcpu; | 274 | struct rackmeter_cpu *rcpu; |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 4f724cdd2ef..6dde27ab79a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -601,7 +601,7 @@ core_initcall(smu_late_init); | |||
601 | * sysfs visibility | 601 | * sysfs visibility |
602 | */ | 602 | */ |
603 | 603 | ||
604 | static void smu_expose_childs(void *unused) | 604 | static void smu_expose_childs(struct work_struct *unused) |
605 | { | 605 | { |
606 | struct device_node *np; | 606 | struct device_node *np; |
607 | 607 | ||
@@ -611,7 +611,7 @@ static void smu_expose_childs(void *unused) | |||
611 | &smu->of_dev->dev); | 611 | &smu->of_dev->dev); |
612 | } | 612 | } |
613 | 613 | ||
614 | static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); | 614 | static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); |
615 | 615 | ||
616 | static int smu_platform_probe(struct of_device* dev, | 616 | static int smu_platform_probe(struct of_device* dev, |
617 | const struct of_device_id *match) | 617 | const struct of_device_id *match) |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 08a40f4e4f6..ed2d4ef27fd 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -458,11 +458,11 @@ static void dec_pending(struct crypt_io *io, int error) | |||
458 | * interrupt context. | 458 | * interrupt context. |
459 | */ | 459 | */ |
460 | static struct workqueue_struct *_kcryptd_workqueue; | 460 | static struct workqueue_struct *_kcryptd_workqueue; |
461 | static void kcryptd_do_work(void *data); | 461 | static void kcryptd_do_work(struct work_struct *work); |
462 | 462 | ||
463 | static void kcryptd_queue_io(struct crypt_io *io) | 463 | static void kcryptd_queue_io(struct crypt_io *io) |
464 | { | 464 | { |
465 | INIT_WORK(&io->work, kcryptd_do_work, io); | 465 | INIT_WORK(&io->work, kcryptd_do_work); |
466 | queue_work(_kcryptd_workqueue, &io->work); | 466 | queue_work(_kcryptd_workqueue, &io->work); |
467 | } | 467 | } |
468 | 468 | ||
@@ -618,9 +618,9 @@ static void process_read_endio(struct crypt_io *io) | |||
618 | dec_pending(io, crypt_convert(cc, &ctx)); | 618 | dec_pending(io, crypt_convert(cc, &ctx)); |
619 | } | 619 | } |
620 | 620 | ||
621 | static void kcryptd_do_work(void *data) | 621 | static void kcryptd_do_work(struct work_struct *work) |
622 | { | 622 | { |
623 | struct crypt_io *io = data; | 623 | struct crypt_io *io = container_of(work, struct crypt_io, work); |
624 | 624 | ||
625 | if (io->post_process) | 625 | if (io->post_process) |
626 | process_read_endio(io); | 626 | process_read_endio(io); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d754e0bc6e9..e77ee6fd104 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -104,8 +104,8 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
104 | static kmem_cache_t *_mpio_cache; | 104 | static kmem_cache_t *_mpio_cache; |
105 | 105 | ||
106 | struct workqueue_struct *kmultipathd; | 106 | struct workqueue_struct *kmultipathd; |
107 | static void process_queued_ios(void *data); | 107 | static void process_queued_ios(struct work_struct *work); |
108 | static void trigger_event(void *data); | 108 | static void trigger_event(struct work_struct *work); |
109 | 109 | ||
110 | 110 | ||
111 | /*----------------------------------------------- | 111 | /*----------------------------------------------- |
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
173 | INIT_LIST_HEAD(&m->priority_groups); | 173 | INIT_LIST_HEAD(&m->priority_groups); |
174 | spin_lock_init(&m->lock); | 174 | spin_lock_init(&m->lock); |
175 | m->queue_io = 1; | 175 | m->queue_io = 1; |
176 | INIT_WORK(&m->process_queued_ios, process_queued_ios, m); | 176 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
177 | INIT_WORK(&m->trigger_event, trigger_event, m); | 177 | INIT_WORK(&m->trigger_event, trigger_event); |
178 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 178 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
179 | if (!m->mpio_pool) { | 179 | if (!m->mpio_pool) { |
180 | kfree(m); | 180 | kfree(m); |
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m) | |||
379 | } | 379 | } |
380 | } | 380 | } |
381 | 381 | ||
382 | static void process_queued_ios(void *data) | 382 | static void process_queued_ios(struct work_struct *work) |
383 | { | 383 | { |
384 | struct multipath *m = (struct multipath *) data; | 384 | struct multipath *m = |
385 | container_of(work, struct multipath, process_queued_ios); | ||
385 | struct hw_handler *hwh = &m->hw_handler; | 386 | struct hw_handler *hwh = &m->hw_handler; |
386 | struct pgpath *pgpath = NULL; | 387 | struct pgpath *pgpath = NULL; |
387 | unsigned init_required = 0, must_queue = 1; | 388 | unsigned init_required = 0, must_queue = 1; |
@@ -421,9 +422,10 @@ out: | |||
421 | * An event is triggered whenever a path is taken out of use. | 422 | * An event is triggered whenever a path is taken out of use. |
422 | * Includes path failure and PG bypass. | 423 | * Includes path failure and PG bypass. |
423 | */ | 424 | */ |
424 | static void trigger_event(void *data) | 425 | static void trigger_event(struct work_struct *work) |
425 | { | 426 | { |
426 | struct multipath *m = (struct multipath *) data; | 427 | struct multipath *m = |
428 | container_of(work, struct multipath, trigger_event); | ||
427 | 429 | ||
428 | dm_table_event(m->ti->table); | 430 | dm_table_event(m->ti->table); |
429 | } | 431 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 48a653b3f51..fc8cbb168e3 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms) | |||
883 | do_writes(ms, &writes); | 883 | do_writes(ms, &writes); |
884 | } | 884 | } |
885 | 885 | ||
886 | static void do_work(void *ignored) | 886 | static void do_work(struct work_struct *ignored) |
887 | { | 887 | { |
888 | struct mirror_set *ms; | 888 | struct mirror_set *ms; |
889 | 889 | ||
@@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void) | |||
1269 | dm_dirty_log_exit(); | 1269 | dm_dirty_log_exit(); |
1270 | return r; | 1270 | return r; |
1271 | } | 1271 | } |
1272 | INIT_WORK(&_kmirrord_work, do_work, NULL); | 1272 | INIT_WORK(&_kmirrord_work, do_work); |
1273 | 1273 | ||
1274 | r = dm_register_target(&mirror_target); | 1274 | r = dm_register_target(&mirror_target); |
1275 | if (r < 0) { | 1275 | if (r < 0) { |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5281e009407..91c7aa1fed0 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #define SNAPSHOT_PAGES 256 | 40 | #define SNAPSHOT_PAGES 256 |
41 | 41 | ||
42 | struct workqueue_struct *ksnapd; | 42 | struct workqueue_struct *ksnapd; |
43 | static void flush_queued_bios(void *data); | 43 | static void flush_queued_bios(struct work_struct *work); |
44 | 44 | ||
45 | struct pending_exception { | 45 | struct pending_exception { |
46 | struct exception e; | 46 | struct exception e; |
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
528 | } | 528 | } |
529 | 529 | ||
530 | bio_list_init(&s->queued_bios); | 530 | bio_list_init(&s->queued_bios); |
531 | INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); | 531 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); |
532 | 532 | ||
533 | /* Add snapshot to the list of snapshots for this origin */ | 533 | /* Add snapshot to the list of snapshots for this origin */ |
534 | /* Exceptions aren't triggered till snapshot_resume() is called */ | 534 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio) | |||
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | static void flush_queued_bios(void *data) | 606 | static void flush_queued_bios(struct work_struct *work) |
607 | { | 607 | { |
608 | struct dm_snapshot *s = (struct dm_snapshot *) data; | 608 | struct dm_snapshot *s = |
609 | container_of(work, struct dm_snapshot, queued_bios_work); | ||
609 | struct bio *queued_bios; | 610 | struct bio *queued_bios; |
610 | unsigned long flags; | 611 | unsigned long flags; |
611 | 612 | ||
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index f1db6eff485..b3c01496c73 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c | |||
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) | |||
417 | /* | 417 | /* |
418 | * kcopyd does this every time it's woken up. | 418 | * kcopyd does this every time it's woken up. |
419 | */ | 419 | */ |
420 | static void do_work(void *ignored) | 420 | static void do_work(struct work_struct *ignored) |
421 | { | 421 | { |
422 | /* | 422 | /* |
423 | * The order that these are called is *very* important. | 423 | * The order that these are called is *very* important. |
@@ -628,7 +628,7 @@ static int kcopyd_init(void) | |||
628 | } | 628 | } |
629 | 629 | ||
630 | kcopyd_clients++; | 630 | kcopyd_clients++; |
631 | INIT_WORK(&_kcopyd_work, do_work, NULL); | 631 | INIT_WORK(&_kcopyd_work, do_work); |
632 | mutex_unlock(&kcopyd_init_lock); | 632 | mutex_unlock(&kcopyd_init_lock); |
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c index 06893243f3d..6e166801505 100644 --- a/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/drivers/media/dvb/b2c2/flexcop-pci.c | |||
@@ -63,7 +63,7 @@ struct flexcop_pci { | |||
63 | 63 | ||
64 | unsigned long last_irq; | 64 | unsigned long last_irq; |
65 | 65 | ||
66 | struct work_struct irq_check_work; | 66 | struct delayed_work irq_check_work; |
67 | 67 | ||
68 | struct flexcop_device *fc_dev; | 68 | struct flexcop_device *fc_dev; |
69 | }; | 69 | }; |
@@ -97,9 +97,10 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static void flexcop_pci_irq_check_work(void *data) | 100 | static void flexcop_pci_irq_check_work(struct work_struct *work) |
101 | { | 101 | { |
102 | struct flexcop_pci *fc_pci = data; | 102 | struct flexcop_pci *fc_pci = |
103 | container_of(work, struct flexcop_pci, irq_check_work.work); | ||
103 | struct flexcop_device *fc = fc_pci->fc_dev; | 104 | struct flexcop_device *fc = fc_pci->fc_dev; |
104 | 105 | ||
105 | flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); | 106 | flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); |
@@ -371,7 +372,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
371 | if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) | 372 | if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) |
372 | goto err_fc_exit; | 373 | goto err_fc_exit; |
373 | 374 | ||
374 | INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); | 375 | INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work); |
375 | 376 | ||
376 | return ret; | 377 | return ret; |
377 | 378 | ||
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c index 8a7dd507cf6..206c13e47a0 100644 --- a/drivers/media/dvb/cinergyT2/cinergyT2.c +++ b/drivers/media/dvb/cinergyT2/cinergyT2.c | |||
@@ -128,7 +128,7 @@ struct cinergyt2 { | |||
128 | 128 | ||
129 | struct dvbt_set_parameters_msg param; | 129 | struct dvbt_set_parameters_msg param; |
130 | struct dvbt_get_status_msg status; | 130 | struct dvbt_get_status_msg status; |
131 | struct work_struct query_work; | 131 | struct delayed_work query_work; |
132 | 132 | ||
133 | wait_queue_head_t poll_wq; | 133 | wait_queue_head_t poll_wq; |
134 | int pending_fe_events; | 134 | int pending_fe_events; |
@@ -142,7 +142,7 @@ struct cinergyt2 { | |||
142 | #ifdef ENABLE_RC | 142 | #ifdef ENABLE_RC |
143 | struct input_dev *rc_input_dev; | 143 | struct input_dev *rc_input_dev; |
144 | char phys[64]; | 144 | char phys[64]; |
145 | struct work_struct rc_query_work; | 145 | struct delayed_work rc_query_work; |
146 | int rc_input_event; | 146 | int rc_input_event; |
147 | u32 rc_last_code; | 147 | u32 rc_last_code; |
148 | unsigned long last_event_jiffies; | 148 | unsigned long last_event_jiffies; |
@@ -723,9 +723,10 @@ static struct dvb_device cinergyt2_fe_template = { | |||
723 | 723 | ||
724 | #ifdef ENABLE_RC | 724 | #ifdef ENABLE_RC |
725 | 725 | ||
726 | static void cinergyt2_query_rc (void *data) | 726 | static void cinergyt2_query_rc (struct work_struct *work) |
727 | { | 727 | { |
728 | struct cinergyt2 *cinergyt2 = data; | 728 | struct cinergyt2 *cinergyt2 = |
729 | container_of(work, struct cinergyt2, rc_query_work.work); | ||
729 | char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; | 730 | char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; |
730 | struct cinergyt2_rc_event rc_events[12]; | 731 | struct cinergyt2_rc_event rc_events[12]; |
731 | int n, len, i; | 732 | int n, len, i; |
@@ -806,7 +807,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2) | |||
806 | strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); | 807 | strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); |
807 | cinergyt2->rc_input_event = KEY_MAX; | 808 | cinergyt2->rc_input_event = KEY_MAX; |
808 | cinergyt2->rc_last_code = ~0; | 809 | cinergyt2->rc_last_code = ~0; |
809 | INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); | 810 | INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc); |
810 | 811 | ||
811 | input_dev->name = DRIVER_NAME " remote control"; | 812 | input_dev->name = DRIVER_NAME " remote control"; |
812 | input_dev->phys = cinergyt2->phys; | 813 | input_dev->phys = cinergyt2->phys; |
@@ -847,9 +848,10 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { } | |||
847 | 848 | ||
848 | #endif /* ENABLE_RC */ | 849 | #endif /* ENABLE_RC */ |
849 | 850 | ||
850 | static void cinergyt2_query (void *data) | 851 | static void cinergyt2_query (struct work_struct *work) |
851 | { | 852 | { |
852 | struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; | 853 | struct cinergyt2 *cinergyt2 = |
854 | container_of(work, struct cinergyt2, query_work.work); | ||
853 | char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | 855 | char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; |
854 | struct dvbt_get_status_msg *s = &cinergyt2->status; | 856 | struct dvbt_get_status_msg *s = &cinergyt2->status; |
855 | uint8_t lock_bits; | 857 | uint8_t lock_bits; |
@@ -893,7 +895,7 @@ static int cinergyt2_probe (struct usb_interface *intf, | |||
893 | 895 | ||
894 | mutex_init(&cinergyt2->sem); | 896 | mutex_init(&cinergyt2->sem); |
895 | init_waitqueue_head (&cinergyt2->poll_wq); | 897 | init_waitqueue_head (&cinergyt2->poll_wq); |
896 | INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); | 898 | INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query); |
897 | 899 | ||
898 | cinergyt2->udev = interface_to_usbdev(intf); | 900 | cinergyt2->udev = interface_to_usbdev(intf); |
899 | cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; | 901 | cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; |
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 8859ab74f0f..ebf4dc5190f 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -127,6 +127,7 @@ struct dvb_net_priv { | |||
127 | int in_use; | 127 | int in_use; |
128 | struct net_device_stats stats; | 128 | struct net_device_stats stats; |
129 | u16 pid; | 129 | u16 pid; |
130 | struct net_device *net; | ||
130 | struct dvb_net *host; | 131 | struct dvb_net *host; |
131 | struct dmx_demux *demux; | 132 | struct dmx_demux *demux; |
132 | struct dmx_section_feed *secfeed; | 133 | struct dmx_section_feed *secfeed; |
@@ -1123,10 +1124,11 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc) | |||
1123 | } | 1124 | } |
1124 | 1125 | ||
1125 | 1126 | ||
1126 | static void wq_set_multicast_list (void *data) | 1127 | static void wq_set_multicast_list (struct work_struct *work) |
1127 | { | 1128 | { |
1128 | struct net_device *dev = data; | 1129 | struct dvb_net_priv *priv = |
1129 | struct dvb_net_priv *priv = dev->priv; | 1130 | container_of(work, struct dvb_net_priv, set_multicast_list_wq); |
1131 | struct net_device *dev = priv->net; | ||
1130 | 1132 | ||
1131 | dvb_net_feed_stop(dev); | 1133 | dvb_net_feed_stop(dev); |
1132 | priv->rx_mode = RX_MODE_UNI; | 1134 | priv->rx_mode = RX_MODE_UNI; |
@@ -1167,9 +1169,11 @@ static void dvb_net_set_multicast_list (struct net_device *dev) | |||
1167 | } | 1169 | } |
1168 | 1170 | ||
1169 | 1171 | ||
1170 | static void wq_restart_net_feed (void *data) | 1172 | static void wq_restart_net_feed (struct work_struct *work) |
1171 | { | 1173 | { |
1172 | struct net_device *dev = data; | 1174 | struct dvb_net_priv *priv = |
1175 | container_of(work, struct dvb_net_priv, restart_net_feed_wq); | ||
1176 | struct net_device *dev = priv->net; | ||
1173 | 1177 | ||
1174 | if (netif_running(dev)) { | 1178 | if (netif_running(dev)) { |
1175 | dvb_net_feed_stop(dev); | 1179 | dvb_net_feed_stop(dev); |
@@ -1276,6 +1280,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) | |||
1276 | dvbnet->device[if_num] = net; | 1280 | dvbnet->device[if_num] = net; |
1277 | 1281 | ||
1278 | priv = net->priv; | 1282 | priv = net->priv; |
1283 | priv->net = net; | ||
1279 | priv->demux = dvbnet->demux; | 1284 | priv->demux = dvbnet->demux; |
1280 | priv->pid = pid; | 1285 | priv->pid = pid; |
1281 | priv->rx_mode = RX_MODE_UNI; | 1286 | priv->rx_mode = RX_MODE_UNI; |
@@ -1284,8 +1289,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) | |||
1284 | priv->feedtype = feedtype; | 1289 | priv->feedtype = feedtype; |
1285 | reset_ule(priv); | 1290 | reset_ule(priv); |
1286 | 1291 | ||
1287 | INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); | 1292 | INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list); |
1288 | INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); | 1293 | INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed); |
1289 | mutex_init(&priv->mutex); | 1294 | mutex_init(&priv->mutex); |
1290 | 1295 | ||
1291 | net->base_addr = pid; | 1296 | net->base_addr = pid; |
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c index 0a3a0b6c235..794e4471561 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c | |||
@@ -13,9 +13,10 @@ | |||
13 | * | 13 | * |
14 | * TODO: Fix the repeat rate of the input device. | 14 | * TODO: Fix the repeat rate of the input device. |
15 | */ | 15 | */ |
16 | static void dvb_usb_read_remote_control(void *data) | 16 | static void dvb_usb_read_remote_control(struct work_struct *work) |
17 | { | 17 | { |
18 | struct dvb_usb_device *d = data; | 18 | struct dvb_usb_device *d = |
19 | container_of(work, struct dvb_usb_device, rc_query_work.work); | ||
19 | u32 event; | 20 | u32 event; |
20 | int state; | 21 | int state; |
21 | 22 | ||
@@ -128,7 +129,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d) | |||
128 | 129 | ||
129 | input_register_device(d->rc_input_dev); | 130 | input_register_device(d->rc_input_dev); |
130 | 131 | ||
131 | INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); | 132 | INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control); |
132 | 133 | ||
133 | info("schedule remote query interval to %d msecs.", d->props.rc_interval); | 134 | info("schedule remote query interval to %d msecs.", d->props.rc_interval); |
134 | schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); | 135 | schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); |
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h index 376c45a8e77..0d721731a52 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb.h +++ b/drivers/media/dvb/dvb-usb/dvb-usb.h | |||
@@ -369,7 +369,7 @@ struct dvb_usb_device { | |||
369 | /* remote control */ | 369 | /* remote control */ |
370 | struct input_dev *rc_input_dev; | 370 | struct input_dev *rc_input_dev; |
371 | char rc_phys[64]; | 371 | char rc_phys[64]; |
372 | struct work_struct rc_query_work; | 372 | struct delayed_work rc_query_work; |
373 | u32 last_event; | 373 | u32 last_event; |
374 | int last_state; | 374 | int last_state; |
375 | 375 | ||
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c index 41f4b8d1755..b12cec94f4c 100644 --- a/drivers/media/video/cpia_pp.c +++ b/drivers/media/video/cpia_pp.c | |||
@@ -82,6 +82,8 @@ struct pp_cam_entry { | |||
82 | struct pardevice *pdev; | 82 | struct pardevice *pdev; |
83 | struct parport *port; | 83 | struct parport *port; |
84 | struct work_struct cb_task; | 84 | struct work_struct cb_task; |
85 | void (*cb_func)(void *cbdata); | ||
86 | void *cb_data; | ||
85 | int open_count; | 87 | int open_count; |
86 | wait_queue_head_t wq_stream; | 88 | wait_queue_head_t wq_stream; |
87 | /* image state flags */ | 89 | /* image state flags */ |
@@ -130,6 +132,20 @@ static void cpia_parport_disable_irq( struct parport *port ) { | |||
130 | #define PARPORT_CHUNK_SIZE PAGE_SIZE | 132 | #define PARPORT_CHUNK_SIZE PAGE_SIZE |
131 | 133 | ||
132 | 134 | ||
135 | static void cpia_pp_run_callback(struct work_struct *work) | ||
136 | { | ||
137 | void (*cb_func)(void *cbdata); | ||
138 | void *cb_data; | ||
139 | struct pp_cam_entry *cam; | ||
140 | |||
141 | cam = container_of(work, struct pp_cam_entry, cb_task); | ||
142 | cb_func = cam->cb_func; | ||
143 | cb_data = cam->cb_data; | ||
144 | work_release(work); | ||
145 | |||
146 | cb_func(cb_data); | ||
147 | } | ||
148 | |||
133 | /**************************************************************************** | 149 | /**************************************************************************** |
134 | * | 150 | * |
135 | * CPiA-specific low-level parport functions for nibble uploads | 151 | * CPiA-specific low-level parport functions for nibble uploads |
@@ -664,7 +680,9 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo | |||
664 | int retval = 0; | 680 | int retval = 0; |
665 | 681 | ||
666 | if(cam->port->irq != PARPORT_IRQ_NONE) { | 682 | if(cam->port->irq != PARPORT_IRQ_NONE) { |
667 | INIT_WORK(&cam->cb_task, cb, cbdata); | 683 | cam->cb_func = cb; |
684 | cam->cb_data = cbdata; | ||
685 | INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback); | ||
668 | } else { | 686 | } else { |
669 | retval = -1; | 687 | retval = -1; |
670 | } | 688 | } |
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index 57e1c024a54..e60a0a52e4b 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c | |||
@@ -145,9 +145,9 @@ static void ir_timer(unsigned long data) | |||
145 | schedule_work(&ir->work); | 145 | schedule_work(&ir->work); |
146 | } | 146 | } |
147 | 147 | ||
148 | static void cx88_ir_work(void *data) | 148 | static void cx88_ir_work(struct work_struct *work) |
149 | { | 149 | { |
150 | struct cx88_IR *ir = data; | 150 | struct cx88_IR *ir = container_of(work, struct cx88_IR, work); |
151 | unsigned long timeout; | 151 | unsigned long timeout; |
152 | 152 | ||
153 | cx88_ir_handle_key(ir); | 153 | cx88_ir_handle_key(ir); |
@@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) | |||
308 | core->ir = ir; | 308 | core->ir = ir; |
309 | 309 | ||
310 | if (ir->polling) { | 310 | if (ir->polling) { |
311 | INIT_WORK(&ir->work, cx88_ir_work, ir); | 311 | INIT_WORK(&ir->work, cx88_ir_work); |
312 | init_timer(&ir->timer); | 312 | init_timer(&ir->timer); |
313 | ir->timer.function = ir_timer; | 313 | ir->timer.function = ir_timer; |
314 | ir->timer.data = (unsigned long)ir; | 314 | ir->timer.data = (unsigned long)ir; |
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 1457b160222..ab87e7bfe84 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c | |||
@@ -268,9 +268,9 @@ static void ir_timer(unsigned long data) | |||
268 | schedule_work(&ir->work); | 268 | schedule_work(&ir->work); |
269 | } | 269 | } |
270 | 270 | ||
271 | static void ir_work(void *data) | 271 | static void ir_work(struct work_struct *work) |
272 | { | 272 | { |
273 | struct IR_i2c *ir = data; | 273 | struct IR_i2c *ir = container_of(work, struct IR_i2c, work); |
274 | ir_key_poll(ir); | 274 | ir_key_poll(ir); |
275 | mod_timer(&ir->timer, jiffies+HZ/10); | 275 | mod_timer(&ir->timer, jiffies+HZ/10); |
276 | } | 276 | } |
@@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr, | |||
400 | ir->input->name,ir->input->phys,adap->name); | 400 | ir->input->name,ir->input->phys,adap->name); |
401 | 401 | ||
402 | /* start polling via eventd */ | 402 | /* start polling via eventd */ |
403 | INIT_WORK(&ir->work, ir_work, ir); | 403 | INIT_WORK(&ir->work, ir_work); |
404 | init_timer(&ir->timer); | 404 | init_timer(&ir->timer); |
405 | ir->timer.function = ir_timer; | 405 | ir->timer.function = ir_timer; |
406 | ir->timer.data = (unsigned long)ir; | 406 | ir->timer.data = (unsigned long)ir; |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c index f129f316d20..cf129746205 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-context.c +++ b/drivers/media/video/pvrusb2/pvrusb2-context.c | |||
@@ -45,16 +45,21 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | 47 | ||
48 | static void pvr2_context_poll(struct pvr2_context *mp) | 48 | static void pvr2_context_poll(struct work_struct *work) |
49 | { | 49 | { |
50 | struct pvr2_context *mp = | ||
51 | container_of(work, struct pvr2_context, workpoll); | ||
50 | pvr2_context_enter(mp); do { | 52 | pvr2_context_enter(mp); do { |
51 | pvr2_hdw_poll(mp->hdw); | 53 | pvr2_hdw_poll(mp->hdw); |
52 | } while (0); pvr2_context_exit(mp); | 54 | } while (0); pvr2_context_exit(mp); |
53 | } | 55 | } |
54 | 56 | ||
55 | 57 | ||
56 | static void pvr2_context_setup(struct pvr2_context *mp) | 58 | static void pvr2_context_setup(struct work_struct *work) |
57 | { | 59 | { |
60 | struct pvr2_context *mp = | ||
61 | container_of(work, struct pvr2_context, workinit); | ||
62 | |||
58 | pvr2_context_enter(mp); do { | 63 | pvr2_context_enter(mp); do { |
59 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; | 64 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; |
60 | pvr2_hdw_setup(mp->hdw); | 65 | pvr2_hdw_setup(mp->hdw); |
@@ -92,8 +97,8 @@ struct pvr2_context *pvr2_context_create( | |||
92 | } | 97 | } |
93 | 98 | ||
94 | mp->workqueue = create_singlethread_workqueue("pvrusb2"); | 99 | mp->workqueue = create_singlethread_workqueue("pvrusb2"); |
95 | INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); | 100 | INIT_WORK(&mp->workinit, pvr2_context_setup); |
96 | INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); | 101 | INIT_WORK(&mp->workpoll, pvr2_context_poll); |
97 | queue_work(mp->workqueue,&mp->workinit); | 102 | queue_work(mp->workqueue,&mp->workinit); |
98 | done: | 103 | done: |
99 | return mp; | 104 | return mp; |
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c index 7b9859c3301..92eabf88a09 100644 --- a/drivers/media/video/saa6588.c +++ b/drivers/media/video/saa6588.c | |||
@@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data) | |||
324 | schedule_work(&s->work); | 324 | schedule_work(&s->work); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void saa6588_work(void *data) | 327 | static void saa6588_work(struct work_struct *work) |
328 | { | 328 | { |
329 | struct saa6588 *s = (struct saa6588 *)data; | 329 | struct saa6588 *s = container_of(work, struct saa6588, work); |
330 | 330 | ||
331 | saa6588_i2c_poll(s); | 331 | saa6588_i2c_poll(s); |
332 | mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); | 332 | mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); |
@@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind) | |||
419 | saa6588_configure(s); | 419 | saa6588_configure(s); |
420 | 420 | ||
421 | /* start polling via eventd */ | 421 | /* start polling via eventd */ |
422 | INIT_WORK(&s->work, saa6588_work, s); | 422 | INIT_WORK(&s->work, saa6588_work); |
423 | init_timer(&s->timer); | 423 | init_timer(&s->timer); |
424 | s->timer.function = saa6588_timer; | 424 | s->timer.function = saa6588_timer; |
425 | s->timer.data = (unsigned long)s; | 425 | s->timer.data = (unsigned long)s; |
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 65d044086ce..daaae870a2c 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c | |||
@@ -343,9 +343,10 @@ static struct video_device saa7134_empress_template = | |||
343 | .minor = -1, | 343 | .minor = -1, |
344 | }; | 344 | }; |
345 | 345 | ||
346 | static void empress_signal_update(void* data) | 346 | static void empress_signal_update(struct work_struct *work) |
347 | { | 347 | { |
348 | struct saa7134_dev* dev = (struct saa7134_dev*) data; | 348 | struct saa7134_dev* dev = |
349 | container_of(work, struct saa7134_dev, empress_workqueue); | ||
349 | 350 | ||
350 | if (dev->nosignal) { | 351 | if (dev->nosignal) { |
351 | dprintk("no video signal\n"); | 352 | dprintk("no video signal\n"); |
@@ -378,7 +379,7 @@ static int empress_init(struct saa7134_dev *dev) | |||
378 | "%s empress (%s)", dev->name, | 379 | "%s empress (%s)", dev->name, |
379 | saa7134_boards[dev->board].name); | 380 | saa7134_boards[dev->board].name); |
380 | 381 | ||
381 | INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); | 382 | INIT_WORK(&dev->empress_workqueue, empress_signal_update); |
382 | 383 | ||
383 | err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, | 384 | err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, |
384 | empress_nr[dev->nr]); | 385 | empress_nr[dev->nr]); |
@@ -399,7 +400,7 @@ static int empress_init(struct saa7134_dev *dev) | |||
399 | sizeof(struct saa7134_buf), | 400 | sizeof(struct saa7134_buf), |
400 | dev); | 401 | dev); |
401 | 402 | ||
402 | empress_signal_update(dev); | 403 | empress_signal_update(&dev->empress_workqueue); |
403 | return 0; | 404 | return 0; |
404 | } | 405 | } |
405 | 406 | ||
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 1dd49177315..ef2b55e1991 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) | |||
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | static void | 1020 | static void |
1021 | mptfc_setup_reset(void *arg) | 1021 | mptfc_setup_reset(struct work_struct *work) |
1022 | { | 1022 | { |
1023 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 1023 | MPT_ADAPTER *ioc = |
1024 | container_of(work, MPT_ADAPTER, fc_setup_reset_work); | ||
1024 | u64 pn; | 1025 | u64 pn; |
1025 | struct mptfc_rport_info *ri; | 1026 | struct mptfc_rport_info *ri; |
1026 | 1027 | ||
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg) | |||
1043 | } | 1044 | } |
1044 | 1045 | ||
1045 | static void | 1046 | static void |
1046 | mptfc_rescan_devices(void *arg) | 1047 | mptfc_rescan_devices(struct work_struct *work) |
1047 | { | 1048 | { |
1048 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 1049 | MPT_ADAPTER *ioc = |
1050 | container_of(work, MPT_ADAPTER, fc_rescan_work); | ||
1049 | int ii; | 1051 | int ii; |
1050 | u64 pn; | 1052 | u64 pn; |
1051 | struct mptfc_rport_info *ri; | 1053 | struct mptfc_rport_info *ri; |
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1154 | } | 1156 | } |
1155 | 1157 | ||
1156 | spin_lock_init(&ioc->fc_rescan_work_lock); | 1158 | spin_lock_init(&ioc->fc_rescan_work_lock); |
1157 | INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); | 1159 | INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); |
1158 | INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); | 1160 | INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); |
1159 | 1161 | ||
1160 | spin_lock_irqsave(&ioc->FreeQlock, flags); | 1162 | spin_lock_irqsave(&ioc->FreeQlock, flags); |
1161 | 1163 | ||
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 314c3a27585..b7c4407c5e3 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c | |||
@@ -111,7 +111,8 @@ struct mpt_lan_priv { | |||
111 | u32 total_received; | 111 | u32 total_received; |
112 | struct net_device_stats stats; /* Per device statistics */ | 112 | struct net_device_stats stats; /* Per device statistics */ |
113 | 113 | ||
114 | struct work_struct post_buckets_task; | 114 | struct delayed_work post_buckets_task; |
115 | struct net_device *dev; | ||
115 | unsigned long post_buckets_active; | 116 | unsigned long post_buckets_active; |
116 | }; | 117 | }; |
117 | 118 | ||
@@ -132,7 +133,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, | |||
132 | static int mpt_lan_open(struct net_device *dev); | 133 | static int mpt_lan_open(struct net_device *dev); |
133 | static int mpt_lan_reset(struct net_device *dev); | 134 | static int mpt_lan_reset(struct net_device *dev); |
134 | static int mpt_lan_close(struct net_device *dev); | 135 | static int mpt_lan_close(struct net_device *dev); |
135 | static void mpt_lan_post_receive_buckets(void *dev_id); | 136 | static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv); |
136 | static void mpt_lan_wake_post_buckets_task(struct net_device *dev, | 137 | static void mpt_lan_wake_post_buckets_task(struct net_device *dev, |
137 | int priority); | 138 | int priority); |
138 | static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); | 139 | static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); |
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) | |||
345 | priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; | 346 | priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; |
346 | spin_unlock_irqrestore(&priv->rxfidx_lock, flags); | 347 | spin_unlock_irqrestore(&priv->rxfidx_lock, flags); |
347 | } else { | 348 | } else { |
348 | mpt_lan_post_receive_buckets(dev); | 349 | mpt_lan_post_receive_buckets(priv); |
349 | netif_wake_queue(dev); | 350 | netif_wake_queue(dev); |
350 | } | 351 | } |
351 | 352 | ||
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev) | |||
441 | 442 | ||
442 | dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); | 443 | dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); |
443 | 444 | ||
444 | mpt_lan_post_receive_buckets(dev); | 445 | mpt_lan_post_receive_buckets(priv); |
445 | printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", | 446 | printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", |
446 | IOC_AND_NETDEV_NAMES_s_s(dev)); | 447 | IOC_AND_NETDEV_NAMES_s_s(dev)); |
447 | 448 | ||
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) | |||
854 | 855 | ||
855 | if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { | 856 | if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { |
856 | if (priority) { | 857 | if (priority) { |
857 | schedule_work(&priv->post_buckets_task); | 858 | schedule_delayed_work(&priv->post_buckets_task, 0); |
858 | } else { | 859 | } else { |
859 | schedule_delayed_work(&priv->post_buckets_task, 1); | 860 | schedule_delayed_work(&priv->post_buckets_task, 1); |
860 | dioprintk((KERN_INFO MYNAM ": post_buckets queued on " | 861 | dioprintk((KERN_INFO MYNAM ": post_buckets queued on " |
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1188 | /* Simple SGE's only at the moment */ | 1189 | /* Simple SGE's only at the moment */ |
1189 | 1190 | ||
1190 | static void | 1191 | static void |
1191 | mpt_lan_post_receive_buckets(void *dev_id) | 1192 | mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) |
1192 | { | 1193 | { |
1193 | struct net_device *dev = dev_id; | 1194 | struct net_device *dev = priv->dev; |
1194 | struct mpt_lan_priv *priv = dev->priv; | ||
1195 | MPT_ADAPTER *mpt_dev = priv->mpt_dev; | 1195 | MPT_ADAPTER *mpt_dev = priv->mpt_dev; |
1196 | MPT_FRAME_HDR *mf; | 1196 | MPT_FRAME_HDR *mf; |
1197 | LANReceivePostRequest_t *pRecvReq; | 1197 | LANReceivePostRequest_t *pRecvReq; |
@@ -1335,6 +1335,13 @@ out: | |||
1335 | clear_bit(0, &priv->post_buckets_active); | 1335 | clear_bit(0, &priv->post_buckets_active); |
1336 | } | 1336 | } |
1337 | 1337 | ||
1338 | static void | ||
1339 | mpt_lan_post_receive_buckets_work(struct work_struct *work) | ||
1340 | { | ||
1341 | mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, | ||
1342 | post_buckets_task.work)); | ||
1343 | } | ||
1344 | |||
1338 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 1345 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
1339 | static struct net_device * | 1346 | static struct net_device * |
1340 | mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) | 1347 | mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) |
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) | |||
1350 | 1357 | ||
1351 | priv = netdev_priv(dev); | 1358 | priv = netdev_priv(dev); |
1352 | 1359 | ||
1360 | priv->dev = dev; | ||
1353 | priv->mpt_dev = mpt_dev; | 1361 | priv->mpt_dev = mpt_dev; |
1354 | priv->pnum = pnum; | 1362 | priv->pnum = pnum; |
1355 | 1363 | ||
1356 | memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); | 1364 | memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task)); |
1357 | INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); | 1365 | INIT_DELAYED_WORK(&priv->post_buckets_task, |
1366 | mpt_lan_post_receive_buckets_work); | ||
1358 | priv->post_buckets_active = 0; | 1367 | priv->post_buckets_active = 0; |
1359 | 1368 | ||
1360 | dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", | 1369 | dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index b752a479f6d..4f0c530e47b 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc) | |||
2006 | *(Mutex LOCKED) | 2006 | *(Mutex LOCKED) |
2007 | */ | 2007 | */ |
2008 | static void | 2008 | static void |
2009 | mptsas_discovery_work(void * arg) | 2009 | mptsas_discovery_work(struct work_struct *work) |
2010 | { | 2010 | { |
2011 | struct mptsas_discovery_event *ev = arg; | 2011 | struct mptsas_discovery_event *ev = |
2012 | container_of(work, struct mptsas_discovery_event, work); | ||
2012 | MPT_ADAPTER *ioc = ev->ioc; | 2013 | MPT_ADAPTER *ioc = ev->ioc; |
2013 | 2014 | ||
2014 | mutex_lock(&ioc->sas_discovery_mutex); | 2015 | mutex_lock(&ioc->sas_discovery_mutex); |
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id) | |||
2068 | * Work queue thread to clear the persitency table | 2069 | * Work queue thread to clear the persitency table |
2069 | */ | 2070 | */ |
2070 | static void | 2071 | static void |
2071 | mptsas_persist_clear_table(void * arg) | 2072 | mptsas_persist_clear_table(struct work_struct *work) |
2072 | { | 2073 | { |
2073 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 2074 | MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); |
2074 | 2075 | ||
2075 | mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); | 2076 | mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); |
2076 | } | 2077 | } |
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach) | |||
2093 | * Work queue thread to handle SAS hotplug events | 2094 | * Work queue thread to handle SAS hotplug events |
2094 | */ | 2095 | */ |
2095 | static void | 2096 | static void |
2096 | mptsas_hotplug_work(void *arg) | 2097 | mptsas_hotplug_work(struct work_struct *work) |
2097 | { | 2098 | { |
2098 | struct mptsas_hotplug_event *ev = arg; | 2099 | struct mptsas_hotplug_event *ev = |
2100 | container_of(work, struct mptsas_hotplug_event, work); | ||
2099 | MPT_ADAPTER *ioc = ev->ioc; | 2101 | MPT_ADAPTER *ioc = ev->ioc; |
2100 | struct mptsas_phyinfo *phy_info; | 2102 | struct mptsas_phyinfo *phy_info; |
2101 | struct sas_rphy *rphy; | 2103 | struct sas_rphy *rphy; |
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, | |||
2341 | break; | 2343 | break; |
2342 | } | 2344 | } |
2343 | 2345 | ||
2344 | INIT_WORK(&ev->work, mptsas_hotplug_work, ev); | 2346 | INIT_WORK(&ev->work, mptsas_hotplug_work); |
2345 | ev->ioc = ioc; | 2347 | ev->ioc = ioc; |
2346 | ev->handle = le16_to_cpu(sas_event_data->DevHandle); | 2348 | ev->handle = le16_to_cpu(sas_event_data->DevHandle); |
2347 | ev->parent_handle = | 2349 | ev->parent_handle = |
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, | |||
2366 | * Persistent table is full. | 2368 | * Persistent table is full. |
2367 | */ | 2369 | */ |
2368 | INIT_WORK(&ioc->sas_persist_task, | 2370 | INIT_WORK(&ioc->sas_persist_task, |
2369 | mptsas_persist_clear_table, (void *)ioc); | 2371 | mptsas_persist_clear_table); |
2370 | schedule_work(&ioc->sas_persist_task); | 2372 | schedule_work(&ioc->sas_persist_task); |
2371 | break; | 2373 | break; |
2372 | case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: | 2374 | case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: |
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc, | |||
2395 | return; | 2397 | return; |
2396 | } | 2398 | } |
2397 | 2399 | ||
2398 | INIT_WORK(&ev->work, mptsas_hotplug_work, ev); | 2400 | INIT_WORK(&ev->work, mptsas_hotplug_work); |
2399 | ev->ioc = ioc; | 2401 | ev->ioc = ioc; |
2400 | ev->id = raid_event_data->VolumeID; | 2402 | ev->id = raid_event_data->VolumeID; |
2401 | ev->event_type = MPTSAS_IGNORE_EVENT; | 2403 | ev->event_type = MPTSAS_IGNORE_EVENT; |
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, | |||
2474 | ev = kzalloc(sizeof(*ev), GFP_ATOMIC); | 2476 | ev = kzalloc(sizeof(*ev), GFP_ATOMIC); |
2475 | if (!ev) | 2477 | if (!ev) |
2476 | return; | 2478 | return; |
2477 | INIT_WORK(&ev->work, mptsas_discovery_work, ev); | 2479 | INIT_WORK(&ev->work, mptsas_discovery_work); |
2478 | ev->ioc = ioc; | 2480 | ev->ioc = ioc; |
2479 | schedule_work(&ev->work); | 2481 | schedule_work(&ev->work); |
2480 | }; | 2482 | }; |
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) | |||
2511 | break; | 2513 | break; |
2512 | case MPI_EVENT_PERSISTENT_TABLE_FULL: | 2514 | case MPI_EVENT_PERSISTENT_TABLE_FULL: |
2513 | INIT_WORK(&ioc->sas_persist_task, | 2515 | INIT_WORK(&ioc->sas_persist_task, |
2514 | mptsas_persist_clear_table, | 2516 | mptsas_persist_clear_table); |
2515 | (void *)ioc); | ||
2516 | schedule_work(&ioc->sas_persist_task); | 2517 | schedule_work(&ioc->sas_persist_task); |
2517 | break; | 2518 | break; |
2518 | case MPI_EVENT_SAS_DISCOVERY: | 2519 | case MPI_EVENT_SAS_DISCOVERY: |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index e4cc3dd5fc9..f422c0d0621 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
@@ -646,9 +646,10 @@ struct work_queue_wrapper { | |||
646 | int disk; | 646 | int disk; |
647 | }; | 647 | }; |
648 | 648 | ||
649 | static void mpt_work_wrapper(void *data) | 649 | static void mpt_work_wrapper(struct work_struct *work) |
650 | { | 650 | { |
651 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 651 | struct work_queue_wrapper *wqw = |
652 | container_of(work, struct work_queue_wrapper, work); | ||
652 | struct _MPT_SCSI_HOST *hd = wqw->hd; | 653 | struct _MPT_SCSI_HOST *hd = wqw->hd; |
653 | struct Scsi_Host *shost = hd->ioc->sh; | 654 | struct Scsi_Host *shost = hd->ioc->sh; |
654 | struct scsi_device *sdev; | 655 | struct scsi_device *sdev; |
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) | |||
695 | disk); | 696 | disk); |
696 | return; | 697 | return; |
697 | } | 698 | } |
698 | INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); | 699 | INIT_WORK(&wqw->work, mpt_work_wrapper); |
699 | wqw->hd = hd; | 700 | wqw->hd = hd; |
700 | wqw->disk = disk; | 701 | wqw->disk = disk; |
701 | 702 | ||
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table); | |||
784 | * renegotiate for a given target | 785 | * renegotiate for a given target |
785 | */ | 786 | */ |
786 | static void | 787 | static void |
787 | mptspi_dv_renegotiate_work(void *data) | 788 | mptspi_dv_renegotiate_work(struct work_struct *work) |
788 | { | 789 | { |
789 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 790 | struct work_queue_wrapper *wqw = |
791 | container_of(work, struct work_queue_wrapper, work); | ||
790 | struct _MPT_SCSI_HOST *hd = wqw->hd; | 792 | struct _MPT_SCSI_HOST *hd = wqw->hd; |
791 | struct scsi_device *sdev; | 793 | struct scsi_device *sdev; |
792 | 794 | ||
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) | |||
804 | if (!wqw) | 806 | if (!wqw) |
805 | return; | 807 | return; |
806 | 808 | ||
807 | INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); | 809 | INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); |
808 | wqw->hd = hd; | 810 | wqw->hd = hd; |
809 | 811 | ||
810 | schedule_work(&wqw->work); | 812 | schedule_work(&wqw->work); |
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 64130227574..7fc7399bd2e 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c | |||
@@ -232,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) | |||
232 | break; | 232 | break; |
233 | } | 233 | } |
234 | 234 | ||
235 | INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); | 235 | INIT_WORK(&evt->work, drv->event); |
236 | queue_work(drv->event_queue, &evt->work); | 236 | queue_work(drv->event_queue, &evt->work); |
237 | return 1; | 237 | return 1; |
238 | } | 238 | } |
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index a2350640384..9e529d8dd5c 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c | |||
@@ -371,8 +371,10 @@ static int i2o_exec_remove(struct device *dev) | |||
371 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY | 371 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY |
372 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. | 372 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. |
373 | */ | 373 | */ |
374 | static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) | 374 | static void i2o_exec_lct_modified(struct work_struct *_work) |
375 | { | 375 | { |
376 | struct i2o_exec_lct_notify_work *work = | ||
377 | container_of(_work, struct i2o_exec_lct_notify_work, work); | ||
376 | u32 change_ind = 0; | 378 | u32 change_ind = 0; |
377 | struct i2o_controller *c = work->c; | 379 | struct i2o_controller *c = work->c; |
378 | 380 | ||
@@ -439,8 +441,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
439 | 441 | ||
440 | work->c = c; | 442 | work->c = c; |
441 | 443 | ||
442 | INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, | 444 | INIT_WORK(&work->work, i2o_exec_lct_modified); |
443 | work); | ||
444 | queue_work(i2o_exec_driver.event_queue, &work->work); | 445 | queue_work(i2o_exec_driver.event_queue, &work->work); |
445 | return 1; | 446 | return 1; |
446 | } | 447 | } |
@@ -460,13 +461,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
460 | 461 | ||
461 | /** | 462 | /** |
462 | * i2o_exec_event - Event handling function | 463 | * i2o_exec_event - Event handling function |
463 | * @evt: Event which occurs | 464 | * @work: Work item in occurring event |
464 | * | 465 | * |
465 | * Handles events send by the Executive device. At the moment does not do | 466 | * Handles events send by the Executive device. At the moment does not do |
466 | * anything useful. | 467 | * anything useful. |
467 | */ | 468 | */ |
468 | static void i2o_exec_event(struct i2o_event *evt) | 469 | static void i2o_exec_event(struct work_struct *work) |
469 | { | 470 | { |
471 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
472 | |||
470 | if (likely(evt->i2o_dev)) | 473 | if (likely(evt->i2o_dev)) |
471 | osm_debug("Event received from device: %d\n", | 474 | osm_debug("Event received from device: %d\n", |
472 | evt->i2o_dev->lct_data.tid); | 475 | evt->i2o_dev->lct_data.tid); |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index eaba81bf2ec..70ae0025332 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -419,16 +419,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
419 | 419 | ||
420 | /** | 420 | /** |
421 | * i2o_block_delayed_request_fn - delayed request queue function | 421 | * i2o_block_delayed_request_fn - delayed request queue function |
422 | * delayed_request: the delayed request with the queue to start | 422 | * @work: the delayed request with the queue to start |
423 | * | 423 | * |
424 | * If the request queue is stopped for a disk, and there is no open | 424 | * If the request queue is stopped for a disk, and there is no open |
425 | * request, a new event is created, which calls this function to start | 425 | * request, a new event is created, which calls this function to start |
426 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never | 426 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never |
427 | * be started again. | 427 | * be started again. |
428 | */ | 428 | */ |
429 | static void i2o_block_delayed_request_fn(void *delayed_request) | 429 | static void i2o_block_delayed_request_fn(struct work_struct *work) |
430 | { | 430 | { |
431 | struct i2o_block_delayed_request *dreq = delayed_request; | 431 | struct i2o_block_delayed_request *dreq = |
432 | container_of(work, struct i2o_block_delayed_request, | ||
433 | work.work); | ||
432 | struct request_queue *q = dreq->queue; | 434 | struct request_queue *q = dreq->queue; |
433 | unsigned long flags; | 435 | unsigned long flags; |
434 | 436 | ||
@@ -538,8 +540,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, | |||
538 | return 1; | 540 | return 1; |
539 | }; | 541 | }; |
540 | 542 | ||
541 | static void i2o_block_event(struct i2o_event *evt) | 543 | static void i2o_block_event(struct work_struct *work) |
542 | { | 544 | { |
545 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
543 | osm_debug("event received\n"); | 546 | osm_debug("event received\n"); |
544 | kfree(evt); | 547 | kfree(evt); |
545 | }; | 548 | }; |
@@ -938,8 +941,8 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
938 | continue; | 941 | continue; |
939 | 942 | ||
940 | dreq->queue = q; | 943 | dreq->queue = q; |
941 | INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, | 944 | INIT_DELAYED_WORK(&dreq->work, |
942 | dreq); | 945 | i2o_block_delayed_request_fn); |
943 | 946 | ||
944 | if (!queue_delayed_work(i2o_block_driver.event_queue, | 947 | if (!queue_delayed_work(i2o_block_driver.event_queue, |
945 | &dreq->work, | 948 | &dreq->work, |
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h index 4fdaa5bda41..d9fdc95b440 100644 --- a/drivers/message/i2o/i2o_block.h +++ b/drivers/message/i2o/i2o_block.h | |||
@@ -96,7 +96,7 @@ struct i2o_block_request { | |||
96 | 96 | ||
97 | /* I2O Block device delayed request */ | 97 | /* I2O Block device delayed request */ |
98 | struct i2o_block_delayed_request { | 98 | struct i2o_block_delayed_request { |
99 | struct work_struct work; | 99 | struct delayed_work work; |
100 | struct request_queue *queue; | 100 | struct request_queue *queue; |
101 | }; | 101 | }; |
102 | 102 | ||
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 1ba8754e938..2ab7add78f9 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c | |||
@@ -33,9 +33,10 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) | |||
33 | spin_unlock_irqrestore(&fm->lock, flags); | 33 | spin_unlock_irqrestore(&fm->lock, flags); |
34 | } | 34 | } |
35 | 35 | ||
36 | static void tifm_7xx1_remove_media(void *adapter) | 36 | static void tifm_7xx1_remove_media(struct work_struct *work) |
37 | { | 37 | { |
38 | struct tifm_adapter *fm = adapter; | 38 | struct tifm_adapter *fm = |
39 | container_of(work, struct tifm_adapter, media_remover); | ||
39 | unsigned long flags; | 40 | unsigned long flags; |
40 | int cnt; | 41 | int cnt; |
41 | struct tifm_dev *sock; | 42 | struct tifm_dev *sock; |
@@ -169,9 +170,10 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) | |||
169 | return base_addr + ((sock_num + 1) << 10); | 170 | return base_addr + ((sock_num + 1) << 10); |
170 | } | 171 | } |
171 | 172 | ||
172 | static void tifm_7xx1_insert_media(void *adapter) | 173 | static void tifm_7xx1_insert_media(struct work_struct *work) |
173 | { | 174 | { |
174 | struct tifm_adapter *fm = adapter; | 175 | struct tifm_adapter *fm = |
176 | container_of(work, struct tifm_adapter, media_inserter); | ||
175 | unsigned long flags; | 177 | unsigned long flags; |
176 | tifm_media_id media_id; | 178 | tifm_media_id media_id; |
177 | char *card_name = "xx"; | 179 | char *card_name = "xx"; |
@@ -261,7 +263,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) | |||
261 | spin_unlock_irqrestore(&fm->lock, flags); | 263 | spin_unlock_irqrestore(&fm->lock, flags); |
262 | flush_workqueue(fm->wq); | 264 | flush_workqueue(fm->wq); |
263 | 265 | ||
264 | tifm_7xx1_remove_media(fm); | 266 | tifm_7xx1_remove_media(&fm->media_remover); |
265 | 267 | ||
266 | pci_set_power_state(dev, PCI_D3hot); | 268 | pci_set_power_state(dev, PCI_D3hot); |
267 | pci_disable_device(dev); | 269 | pci_disable_device(dev); |
@@ -328,8 +330,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev, | |||
328 | if (!fm->sockets) | 330 | if (!fm->sockets) |
329 | goto err_out_free; | 331 | goto err_out_free; |
330 | 332 | ||
331 | INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); | 333 | INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media); |
332 | INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); | 334 | INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media); |
333 | fm->eject = tifm_7xx1_eject; | 335 | fm->eject = tifm_7xx1_eject; |
334 | pci_set_drvdata(dev, fm); | 336 | pci_set_drvdata(dev, fm); |
335 | 337 | ||
@@ -384,7 +386,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev) | |||
384 | 386 | ||
385 | flush_workqueue(fm->wq); | 387 | flush_workqueue(fm->wq); |
386 | 388 | ||
387 | tifm_7xx1_remove_media(fm); | 389 | tifm_7xx1_remove_media(&fm->media_remover); |
388 | 390 | ||
389 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | 391 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); |
390 | free_irq(dev->irq, fm); | 392 | free_irq(dev->irq, fm); |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 9d190022a49..6f2a282e2b9 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -1419,18 +1419,16 @@ static void mmc_setup(struct mmc_host *host) | |||
1419 | */ | 1419 | */ |
1420 | void mmc_detect_change(struct mmc_host *host, unsigned long delay) | 1420 | void mmc_detect_change(struct mmc_host *host, unsigned long delay) |
1421 | { | 1421 | { |
1422 | if (delay) | 1422 | mmc_schedule_delayed_work(&host->detect, delay); |
1423 | mmc_schedule_delayed_work(&host->detect, delay); | ||
1424 | else | ||
1425 | mmc_schedule_work(&host->detect); | ||
1426 | } | 1423 | } |
1427 | 1424 | ||
1428 | EXPORT_SYMBOL(mmc_detect_change); | 1425 | EXPORT_SYMBOL(mmc_detect_change); |
1429 | 1426 | ||
1430 | 1427 | ||
1431 | static void mmc_rescan(void *data) | 1428 | static void mmc_rescan(struct work_struct *work) |
1432 | { | 1429 | { |
1433 | struct mmc_host *host = data; | 1430 | struct mmc_host *host = |
1431 | container_of(work, struct mmc_host, detect.work); | ||
1434 | struct list_head *l, *n; | 1432 | struct list_head *l, *n; |
1435 | unsigned char power_mode; | 1433 | unsigned char power_mode; |
1436 | 1434 | ||
@@ -1513,7 +1511,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
1513 | spin_lock_init(&host->lock); | 1511 | spin_lock_init(&host->lock); |
1514 | init_waitqueue_head(&host->wq); | 1512 | init_waitqueue_head(&host->wq); |
1515 | INIT_LIST_HEAD(&host->cards); | 1513 | INIT_LIST_HEAD(&host->cards); |
1516 | INIT_WORK(&host->detect, mmc_rescan, host); | 1514 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
1517 | 1515 | ||
1518 | /* | 1516 | /* |
1519 | * By default, hosts do not support SGIO or large requests. | 1517 | * By default, hosts do not support SGIO or large requests. |
@@ -1611,7 +1609,7 @@ EXPORT_SYMBOL(mmc_suspend_host); | |||
1611 | */ | 1609 | */ |
1612 | int mmc_resume_host(struct mmc_host *host) | 1610 | int mmc_resume_host(struct mmc_host *host) |
1613 | { | 1611 | { |
1614 | mmc_rescan(host); | 1612 | mmc_rescan(&host->detect.work); |
1615 | 1613 | ||
1616 | return 0; | 1614 | return 0; |
1617 | } | 1615 | } |
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h index cd5e0ab3d84..149affe0b68 100644 --- a/drivers/mmc/mmc.h +++ b/drivers/mmc/mmc.h | |||
@@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host); | |||
20 | void mmc_free_host_sysfs(struct mmc_host *host); | 20 | void mmc_free_host_sysfs(struct mmc_host *host); |
21 | 21 | ||
22 | int mmc_schedule_work(struct work_struct *work); | 22 | int mmc_schedule_work(struct work_struct *work); |
23 | int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); | 23 | int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
24 | void mmc_flush_scheduled_work(void); | 24 | void mmc_flush_scheduled_work(void); |
25 | #endif | 25 | #endif |
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c index ac532963604..e334acd045b 100644 --- a/drivers/mmc/mmc_sysfs.c +++ b/drivers/mmc/mmc_sysfs.c | |||
@@ -321,17 +321,9 @@ void mmc_free_host_sysfs(struct mmc_host *host) | |||
321 | static struct workqueue_struct *workqueue; | 321 | static struct workqueue_struct *workqueue; |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * Internal function. Schedule work in the MMC work queue. | ||
325 | */ | ||
326 | int mmc_schedule_work(struct work_struct *work) | ||
327 | { | ||
328 | return queue_work(workqueue, work); | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Internal function. Schedule delayed work in the MMC work queue. | 324 | * Internal function. Schedule delayed work in the MMC work queue. |
333 | */ | 325 | */ |
334 | int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) | 326 | int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) |
335 | { | 327 | { |
336 | return queue_delayed_work(workqueue, work, delay); | 328 | return queue_delayed_work(workqueue, work, delay); |
337 | } | 329 | } |
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 0fdc55b08a6..e846499a004 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c | |||
@@ -99,7 +99,7 @@ struct tifm_sd { | |||
99 | 99 | ||
100 | struct mmc_request *req; | 100 | struct mmc_request *req; |
101 | struct work_struct cmd_handler; | 101 | struct work_struct cmd_handler; |
102 | struct work_struct abort_handler; | 102 | struct delayed_work abort_handler; |
103 | wait_queue_head_t can_eject; | 103 | wait_queue_head_t can_eject; |
104 | 104 | ||
105 | size_t written_blocks; | 105 | size_t written_blocks; |
@@ -496,9 +496,9 @@ err_out: | |||
496 | mmc_request_done(mmc, mrq); | 496 | mmc_request_done(mmc, mrq); |
497 | } | 497 | } |
498 | 498 | ||
499 | static void tifm_sd_end_cmd(void *data) | 499 | static void tifm_sd_end_cmd(struct work_struct *work) |
500 | { | 500 | { |
501 | struct tifm_sd *host = data; | 501 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
502 | struct tifm_dev *sock = host->dev; | 502 | struct tifm_dev *sock = host->dev; |
503 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 503 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
504 | struct mmc_request *mrq; | 504 | struct mmc_request *mrq; |
@@ -608,9 +608,9 @@ err_out: | |||
608 | mmc_request_done(mmc, mrq); | 608 | mmc_request_done(mmc, mrq); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void tifm_sd_end_cmd_nodma(void *data) | 611 | static void tifm_sd_end_cmd_nodma(struct work_struct *work) |
612 | { | 612 | { |
613 | struct tifm_sd *host = (struct tifm_sd*)data; | 613 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
614 | struct tifm_dev *sock = host->dev; | 614 | struct tifm_dev *sock = host->dev; |
615 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 615 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
616 | struct mmc_request *mrq; | 616 | struct mmc_request *mrq; |
@@ -661,11 +661,14 @@ static void tifm_sd_end_cmd_nodma(void *data) | |||
661 | mmc_request_done(mmc, mrq); | 661 | mmc_request_done(mmc, mrq); |
662 | } | 662 | } |
663 | 663 | ||
664 | static void tifm_sd_abort(void *data) | 664 | static void tifm_sd_abort(struct work_struct *work) |
665 | { | 665 | { |
666 | struct tifm_sd *host = | ||
667 | container_of(work, struct tifm_sd, abort_handler.work); | ||
668 | |||
666 | printk(KERN_ERR DRIVER_NAME | 669 | printk(KERN_ERR DRIVER_NAME |
667 | ": card failed to respond for a long period of time"); | 670 | ": card failed to respond for a long period of time"); |
668 | tifm_eject(((struct tifm_sd*)data)->dev); | 671 | tifm_eject(host->dev); |
669 | } | 672 | } |
670 | 673 | ||
671 | static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 674 | static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
@@ -762,9 +765,9 @@ static struct mmc_host_ops tifm_sd_ops = { | |||
762 | .get_ro = tifm_sd_ro | 765 | .get_ro = tifm_sd_ro |
763 | }; | 766 | }; |
764 | 767 | ||
765 | static void tifm_sd_register_host(void *data) | 768 | static void tifm_sd_register_host(struct work_struct *work) |
766 | { | 769 | { |
767 | struct tifm_sd *host = (struct tifm_sd*)data; | 770 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
768 | struct tifm_dev *sock = host->dev; | 771 | struct tifm_dev *sock = host->dev; |
769 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 772 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
770 | unsigned long flags; | 773 | unsigned long flags; |
@@ -772,8 +775,7 @@ static void tifm_sd_register_host(void *data) | |||
772 | spin_lock_irqsave(&sock->lock, flags); | 775 | spin_lock_irqsave(&sock->lock, flags); |
773 | host->flags |= HOST_REG; | 776 | host->flags |= HOST_REG; |
774 | PREPARE_WORK(&host->cmd_handler, | 777 | PREPARE_WORK(&host->cmd_handler, |
775 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, | 778 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); |
776 | data); | ||
777 | spin_unlock_irqrestore(&sock->lock, flags); | 779 | spin_unlock_irqrestore(&sock->lock, flags); |
778 | dev_dbg(&sock->dev, "adding host\n"); | 780 | dev_dbg(&sock->dev, "adding host\n"); |
779 | mmc_add_host(mmc); | 781 | mmc_add_host(mmc); |
@@ -799,8 +801,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
799 | host->dev = sock; | 801 | host->dev = sock; |
800 | host->clk_div = 61; | 802 | host->clk_div = 61; |
801 | init_waitqueue_head(&host->can_eject); | 803 | init_waitqueue_head(&host->can_eject); |
802 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); | 804 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host); |
803 | INIT_WORK(&host->abort_handler, tifm_sd_abort, host); | 805 | INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); |
804 | 806 | ||
805 | tifm_set_drvdata(sock, mmc); | 807 | tifm_set_drvdata(sock, mmc); |
806 | sock->signal_irq = tifm_sd_signal_irq; | 808 | sock->signal_irq = tifm_sd_signal_irq; |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index d02ed51abfc..931028f672d 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -594,7 +594,7 @@ struct rtl8139_private { | |||
594 | u32 rx_config; | 594 | u32 rx_config; |
595 | struct rtl_extra_stats xstats; | 595 | struct rtl_extra_stats xstats; |
596 | 596 | ||
597 | struct work_struct thread; | 597 | struct delayed_work thread; |
598 | 598 | ||
599 | struct mii_if_info mii; | 599 | struct mii_if_info mii; |
600 | unsigned int regs_len; | 600 | unsigned int regs_len; |
@@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); | |||
636 | static void rtl8139_set_rx_mode (struct net_device *dev); | 636 | static void rtl8139_set_rx_mode (struct net_device *dev); |
637 | static void __set_rx_mode (struct net_device *dev); | 637 | static void __set_rx_mode (struct net_device *dev); |
638 | static void rtl8139_hw_start (struct net_device *dev); | 638 | static void rtl8139_hw_start (struct net_device *dev); |
639 | static void rtl8139_thread (void *_data); | 639 | static void rtl8139_thread (struct work_struct *work); |
640 | static void rtl8139_tx_timeout_task(void *_data); | 640 | static void rtl8139_tx_timeout_task(struct work_struct *work); |
641 | static const struct ethtool_ops rtl8139_ethtool_ops; | 641 | static const struct ethtool_ops rtl8139_ethtool_ops; |
642 | 642 | ||
643 | /* write MMIO register, with flush */ | 643 | /* write MMIO register, with flush */ |
@@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
1010 | (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); | 1010 | (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); |
1011 | spin_lock_init (&tp->lock); | 1011 | spin_lock_init (&tp->lock); |
1012 | spin_lock_init (&tp->rx_lock); | 1012 | spin_lock_init (&tp->rx_lock); |
1013 | INIT_WORK(&tp->thread, rtl8139_thread, dev); | 1013 | INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); |
1014 | tp->mii.dev = dev; | 1014 | tp->mii.dev = dev; |
1015 | tp->mii.mdio_read = mdio_read; | 1015 | tp->mii.mdio_read = mdio_read; |
1016 | tp->mii.mdio_write = mdio_write; | 1016 | tp->mii.mdio_write = mdio_write; |
@@ -1596,15 +1596,16 @@ static inline void rtl8139_thread_iter (struct net_device *dev, | |||
1596 | RTL_R8 (Config1)); | 1596 | RTL_R8 (Config1)); |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | static void rtl8139_thread (void *_data) | 1599 | static void rtl8139_thread (struct work_struct *work) |
1600 | { | 1600 | { |
1601 | struct net_device *dev = _data; | 1601 | struct rtl8139_private *tp = |
1602 | struct rtl8139_private *tp = netdev_priv(dev); | 1602 | container_of(work, struct rtl8139_private, thread.work); |
1603 | struct net_device *dev = tp->mii.dev; | ||
1603 | unsigned long thr_delay = next_tick; | 1604 | unsigned long thr_delay = next_tick; |
1604 | 1605 | ||
1605 | if (tp->watchdog_fired) { | 1606 | if (tp->watchdog_fired) { |
1606 | tp->watchdog_fired = 0; | 1607 | tp->watchdog_fired = 0; |
1607 | rtl8139_tx_timeout_task(_data); | 1608 | rtl8139_tx_timeout_task(work); |
1608 | } else if (rtnl_trylock()) { | 1609 | } else if (rtnl_trylock()) { |
1609 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); | 1610 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); |
1610 | rtnl_unlock (); | 1611 | rtnl_unlock (); |
@@ -1646,10 +1647,11 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp) | |||
1646 | /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ | 1647 | /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ |
1647 | } | 1648 | } |
1648 | 1649 | ||
1649 | static void rtl8139_tx_timeout_task (void *_data) | 1650 | static void rtl8139_tx_timeout_task (struct work_struct *work) |
1650 | { | 1651 | { |
1651 | struct net_device *dev = _data; | 1652 | struct rtl8139_private *tp = |
1652 | struct rtl8139_private *tp = netdev_priv(dev); | 1653 | container_of(work, struct rtl8139_private, thread.work); |
1654 | struct net_device *dev = tp->mii.dev; | ||
1653 | void __iomem *ioaddr = tp->mmio_addr; | 1655 | void __iomem *ioaddr = tp->mmio_addr; |
1654 | int i; | 1656 | int i; |
1655 | u8 tmp8; | 1657 | u8 tmp8; |
@@ -1695,7 +1697,7 @@ static void rtl8139_tx_timeout (struct net_device *dev) | |||
1695 | struct rtl8139_private *tp = netdev_priv(dev); | 1697 | struct rtl8139_private *tp = netdev_priv(dev); |
1696 | 1698 | ||
1697 | if (!tp->have_thread) { | 1699 | if (!tp->have_thread) { |
1698 | INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); | 1700 | INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task); |
1699 | schedule_delayed_work(&tp->thread, next_tick); | 1701 | schedule_delayed_work(&tp->thread, next_tick); |
1700 | } else | 1702 | } else |
1701 | tp->watchdog_fired = 1; | 1703 | tp->watchdog_fired = 1; |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index fc2f1d1c7ea..5bacb7587df 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev) | |||
4411 | } | 4411 | } |
4412 | 4412 | ||
4413 | static void | 4413 | static void |
4414 | bnx2_reset_task(void *data) | 4414 | bnx2_reset_task(struct work_struct *work) |
4415 | { | 4415 | { |
4416 | struct bnx2 *bp = data; | 4416 | struct bnx2 *bp = container_of(work, struct bnx2, reset_task); |
4417 | 4417 | ||
4418 | if (!netif_running(bp->dev)) | 4418 | if (!netif_running(bp->dev)) |
4419 | return; | 4419 | return; |
@@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5702 | bp->pdev = pdev; | 5702 | bp->pdev = pdev; |
5703 | 5703 | ||
5704 | spin_lock_init(&bp->phy_lock); | 5704 | spin_lock_init(&bp->phy_lock); |
5705 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); | 5705 | INIT_WORK(&bp->reset_task, bnx2_reset_task); |
5706 | 5706 | ||
5707 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 5707 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); |
5708 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); | 5708 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index fd2cc13f7d9..c8126484c2b 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp) | |||
4066 | return 0; | 4066 | return 0; |
4067 | } | 4067 | } |
4068 | 4068 | ||
4069 | static void cas_reset_task(void *data) | 4069 | static void cas_reset_task(struct work_struct *work) |
4070 | { | 4070 | { |
4071 | struct cas *cp = (struct cas *) data; | 4071 | struct cas *cp = container_of(work, struct cas, reset_task); |
4072 | #if 0 | 4072 | #if 0 |
4073 | int pending = atomic_read(&cp->reset_task_pending); | 4073 | int pending = atomic_read(&cp->reset_task_pending); |
4074 | #else | 4074 | #else |
@@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5006 | atomic_set(&cp->reset_task_pending_spare, 0); | 5006 | atomic_set(&cp->reset_task_pending_spare, 0); |
5007 | atomic_set(&cp->reset_task_pending_mtu, 0); | 5007 | atomic_set(&cp->reset_task_pending_mtu, 0); |
5008 | #endif | 5008 | #endif |
5009 | INIT_WORK(&cp->reset_task, cas_reset_task, cp); | 5009 | INIT_WORK(&cp->reset_task, cas_reset_task); |
5010 | 5010 | ||
5011 | /* Default link parameters */ | 5011 | /* Default link parameters */ |
5012 | if (link_mode >= 0 && link_mode <= 6) | 5012 | if (link_mode >= 0 && link_mode <= 6) |
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index b265941e137..74758d2c7af 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -279,7 +279,7 @@ struct adapter { | |||
279 | struct petp *tp; | 279 | struct petp *tp; |
280 | 280 | ||
281 | struct port_info port[MAX_NPORTS]; | 281 | struct port_info port[MAX_NPORTS]; |
282 | struct work_struct stats_update_task; | 282 | struct delayed_work stats_update_task; |
283 | struct timer_list stats_update_timer; | 283 | struct timer_list stats_update_timer; |
284 | 284 | ||
285 | spinlock_t tpi_lock; | 285 | spinlock_t tpi_lock; |
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h index 60901f25014..cf914349988 100644 --- a/drivers/net/chelsio/cphy.h +++ b/drivers/net/chelsio/cphy.h | |||
@@ -91,7 +91,7 @@ struct cphy { | |||
91 | int state; /* Link status state machine */ | 91 | int state; /* Link status state machine */ |
92 | adapter_t *adapter; /* associated adapter */ | 92 | adapter_t *adapter; /* associated adapter */ |
93 | 93 | ||
94 | struct work_struct phy_update; | 94 | struct delayed_work phy_update; |
95 | 95 | ||
96 | u16 bmsr; | 96 | u16 bmsr; |
97 | int count; | 97 | int count; |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 53bec673981..de48eadddbc 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -953,10 +953,11 @@ static void t1_netpoll(struct net_device *dev) | |||
953 | * Periodic accumulation of MAC statistics. This is used only if the MAC | 953 | * Periodic accumulation of MAC statistics. This is used only if the MAC |
954 | * does not have any other way to prevent stats counter overflow. | 954 | * does not have any other way to prevent stats counter overflow. |
955 | */ | 955 | */ |
956 | static void mac_stats_task(void *data) | 956 | static void mac_stats_task(struct work_struct *work) |
957 | { | 957 | { |
958 | int i; | 958 | int i; |
959 | struct adapter *adapter = data; | 959 | struct adapter *adapter = |
960 | container_of(work, struct adapter, stats_update_task.work); | ||
960 | 961 | ||
961 | for_each_port(adapter, i) { | 962 | for_each_port(adapter, i) { |
962 | struct port_info *p = &adapter->port[i]; | 963 | struct port_info *p = &adapter->port[i]; |
@@ -977,9 +978,10 @@ static void mac_stats_task(void *data) | |||
977 | /* | 978 | /* |
978 | * Processes elmer0 external interrupts in process context. | 979 | * Processes elmer0 external interrupts in process context. |
979 | */ | 980 | */ |
980 | static void ext_intr_task(void *data) | 981 | static void ext_intr_task(struct work_struct *work) |
981 | { | 982 | { |
982 | struct adapter *adapter = data; | 983 | struct adapter *adapter = |
984 | container_of(work, struct adapter, ext_intr_handler_task); | ||
983 | 985 | ||
984 | t1_elmer0_ext_intr_handler(adapter); | 986 | t1_elmer0_ext_intr_handler(adapter); |
985 | 987 | ||
@@ -1113,9 +1115,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1113 | spin_lock_init(&adapter->mac_lock); | 1115 | spin_lock_init(&adapter->mac_lock); |
1114 | 1116 | ||
1115 | INIT_WORK(&adapter->ext_intr_handler_task, | 1117 | INIT_WORK(&adapter->ext_intr_handler_task, |
1116 | ext_intr_task, adapter); | 1118 | ext_intr_task); |
1117 | INIT_WORK(&adapter->stats_update_task, mac_stats_task, | 1119 | INIT_DELAYED_WORK(&adapter->stats_update_task, |
1118 | adapter); | 1120 | mac_stats_task); |
1119 | 1121 | ||
1120 | pci_set_drvdata(pdev, netdev); | 1122 | pci_set_drvdata(pdev, netdev); |
1121 | } | 1123 | } |
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c index 0b90014d5b3..c7731b6f9de 100644 --- a/drivers/net/chelsio/my3126.c +++ b/drivers/net/chelsio/my3126.c | |||
@@ -93,9 +93,11 @@ static int my3126_interrupt_handler(struct cphy *cphy) | |||
93 | return cphy_cause_link_change; | 93 | return cphy_cause_link_change; |
94 | } | 94 | } |
95 | 95 | ||
96 | static void my3216_poll(void *arg) | 96 | static void my3216_poll(struct work_struct *work) |
97 | { | 97 | { |
98 | my3126_interrupt_handler(arg); | 98 | struct cphy *cphy = container_of(work, struct cphy, phy_update.work); |
99 | |||
100 | my3126_interrupt_handler(cphy); | ||
99 | } | 101 | } |
100 | 102 | ||
101 | static int my3126_set_loopback(struct cphy *cphy, int on) | 103 | static int my3126_set_loopback(struct cphy *cphy, int on) |
@@ -171,7 +173,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter, | |||
171 | if (cphy) | 173 | if (cphy) |
172 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); | 174 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); |
173 | 175 | ||
174 | INIT_WORK(&cphy->phy_update, my3216_poll, cphy); | 176 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); |
175 | cphy->bmsr = 0; | 177 | cphy->bmsr = 0; |
176 | 178 | ||
177 | return (cphy); | 179 | return (cphy); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 3a8df479cbd..03bf164f9e8 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -2102,9 +2102,10 @@ static void e100_tx_timeout(struct net_device *netdev) | |||
2102 | schedule_work(&nic->tx_timeout_task); | 2102 | schedule_work(&nic->tx_timeout_task); |
2103 | } | 2103 | } |
2104 | 2104 | ||
2105 | static void e100_tx_timeout_task(struct net_device *netdev) | 2105 | static void e100_tx_timeout_task(struct work_struct *work) |
2106 | { | 2106 | { |
2107 | struct nic *nic = netdev_priv(netdev); | 2107 | struct nic *nic = container_of(work, struct nic, tx_timeout_task); |
2108 | struct net_device *netdev = nic->netdev; | ||
2108 | 2109 | ||
2109 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 2110 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", |
2110 | readb(&nic->csr->scb.status)); | 2111 | readb(&nic->csr->scb.status)); |
@@ -2637,8 +2638,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2637 | nic->blink_timer.function = e100_blink_led; | 2638 | nic->blink_timer.function = e100_blink_led; |
2638 | nic->blink_timer.data = (unsigned long)nic; | 2639 | nic->blink_timer.data = (unsigned long)nic; |
2639 | 2640 | ||
2640 | INIT_WORK(&nic->tx_timeout_task, | 2641 | INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); |
2641 | (void (*)(void *))e100_tx_timeout_task, netdev); | ||
2642 | 2642 | ||
2643 | if((err = e100_alloc(nic))) { | 2643 | if((err = e100_alloc(nic))) { |
2644 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); | 2644 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 32dde0adb68..73f3a85fd23 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev); | |||
190 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | 190 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
191 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | 191 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
192 | static void e1000_tx_timeout(struct net_device *dev); | 192 | static void e1000_tx_timeout(struct net_device *dev); |
193 | static void e1000_reset_task(struct net_device *dev); | 193 | static void e1000_reset_task(struct work_struct *work); |
194 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 194 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
195 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 195 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
196 | struct sk_buff *skb); | 196 | struct sk_buff *skb); |
@@ -914,8 +914,7 @@ e1000_probe(struct pci_dev *pdev, | |||
914 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 914 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
915 | adapter->phy_info_timer.data = (unsigned long) adapter; | 915 | adapter->phy_info_timer.data = (unsigned long) adapter; |
916 | 916 | ||
917 | INIT_WORK(&adapter->reset_task, | 917 | INIT_WORK(&adapter->reset_task, e1000_reset_task); |
918 | (void (*)(void *))e1000_reset_task, netdev); | ||
919 | 918 | ||
920 | e1000_check_options(adapter); | 919 | e1000_check_options(adapter); |
921 | 920 | ||
@@ -3306,9 +3305,10 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3306 | } | 3305 | } |
3307 | 3306 | ||
3308 | static void | 3307 | static void |
3309 | e1000_reset_task(struct net_device *netdev) | 3308 | e1000_reset_task(struct work_struct *work) |
3310 | { | 3309 | { |
3311 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3310 | struct e1000_adapter *adapter = |
3311 | container_of(work, struct e1000_adapter, reset_task); | ||
3312 | 3312 | ||
3313 | e1000_reinit_locked(adapter); | 3313 | e1000_reinit_locked(adapter); |
3314 | } | 3314 | } |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 6ad69610141..83fa32f7239 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2224,11 +2224,12 @@ static int ehea_stop(struct net_device *dev) | |||
2224 | return ret; | 2224 | return ret; |
2225 | } | 2225 | } |
2226 | 2226 | ||
2227 | static void ehea_reset_port(void *data) | 2227 | static void ehea_reset_port(struct work_struct *work) |
2228 | { | 2228 | { |
2229 | int ret; | 2229 | int ret; |
2230 | struct net_device *dev = data; | 2230 | struct ehea_port *port = |
2231 | struct ehea_port *port = netdev_priv(dev); | 2231 | container_of(work, struct ehea_port, reset_task); |
2232 | struct net_device *dev = port->netdev; | ||
2232 | 2233 | ||
2233 | port->resets++; | 2234 | port->resets++; |
2234 | down(&port->port_lock); | 2235 | down(&port->port_lock); |
@@ -2379,7 +2380,7 @@ static int ehea_setup_single_port(struct ehea_port *port, | |||
2379 | dev->tx_timeout = &ehea_tx_watchdog; | 2380 | dev->tx_timeout = &ehea_tx_watchdog; |
2380 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | 2381 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; |
2381 | 2382 | ||
2382 | INIT_WORK(&port->reset_task, ehea_reset_port, dev); | 2383 | INIT_WORK(&port->reset_task, ehea_reset_port); |
2383 | 2384 | ||
2384 | ehea_set_ethtool_ops(dev); | 2385 | ehea_set_ethtool_ops(dev); |
2385 | 2386 | ||
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 1ed9cccd3c1..3c33d6f6a6a 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -168,8 +168,9 @@ struct baycom_state { | |||
168 | int magic; | 168 | int magic; |
169 | 169 | ||
170 | struct pardevice *pdev; | 170 | struct pardevice *pdev; |
171 | struct net_device *dev; | ||
171 | unsigned int work_running; | 172 | unsigned int work_running; |
172 | struct work_struct run_work; | 173 | struct delayed_work run_work; |
173 | unsigned int modem; | 174 | unsigned int modem; |
174 | unsigned int bitrate; | 175 | unsigned int bitrate; |
175 | unsigned char stat; | 176 | unsigned char stat; |
@@ -659,16 +660,18 @@ static int receive(struct net_device *dev, int cnt) | |||
659 | #define GETTICK(x) | 660 | #define GETTICK(x) |
660 | #endif /* __i386__ */ | 661 | #endif /* __i386__ */ |
661 | 662 | ||
662 | static void epp_bh(struct net_device *dev) | 663 | static void epp_bh(struct work_struct *work) |
663 | { | 664 | { |
665 | struct net_device *dev; | ||
664 | struct baycom_state *bc; | 666 | struct baycom_state *bc; |
665 | struct parport *pp; | 667 | struct parport *pp; |
666 | unsigned char stat; | 668 | unsigned char stat; |
667 | unsigned char tmp[2]; | 669 | unsigned char tmp[2]; |
668 | unsigned int time1 = 0, time2 = 0, time3 = 0; | 670 | unsigned int time1 = 0, time2 = 0, time3 = 0; |
669 | int cnt, cnt2; | 671 | int cnt, cnt2; |
670 | 672 | ||
671 | bc = netdev_priv(dev); | 673 | bc = container_of(work, struct baycom_state, run_work.work); |
674 | dev = bc->dev; | ||
672 | if (!bc->work_running) | 675 | if (!bc->work_running) |
673 | return; | 676 | return; |
674 | baycom_int_freq(bc); | 677 | baycom_int_freq(bc); |
@@ -889,7 +892,7 @@ static int epp_open(struct net_device *dev) | |||
889 | return -EBUSY; | 892 | return -EBUSY; |
890 | } | 893 | } |
891 | dev->irq = /*pp->irq*/ 0; | 894 | dev->irq = /*pp->irq*/ 0; |
892 | INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); | 895 | INIT_DELAYED_WORK(&bc->run_work, epp_bh); |
893 | bc->work_running = 1; | 896 | bc->work_running = 1; |
894 | bc->modem = EPP_CONVENTIONAL; | 897 | bc->modem = EPP_CONVENTIONAL; |
895 | if (eppconfig(bc)) | 898 | if (eppconfig(bc)) |
@@ -1213,6 +1216,7 @@ static void __init baycom_epp_dev_setup(struct net_device *dev) | |||
1213 | /* | 1216 | /* |
1214 | * initialize part of the baycom_state struct | 1217 | * initialize part of the baycom_state struct |
1215 | */ | 1218 | */ |
1219 | bc->dev = dev; | ||
1216 | bc->magic = BAYCOM_MAGIC; | 1220 | bc->magic = BAYCOM_MAGIC; |
1217 | bc->cfg.fclk = 19666600; | 1221 | bc->cfg.fclk = 19666600; |
1218 | bc->cfg.bps = 9600; | 1222 | bc->cfg.bps = 9600; |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index b32c52ed19d..f0c61f3b2a8 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs, | |||
560 | return ret; | 560 | return ret; |
561 | } | 561 | } |
562 | 562 | ||
563 | static void mcs_speed_work(void *arg) | 563 | static void mcs_speed_work(struct work_struct *work) |
564 | { | 564 | { |
565 | struct mcs_cb *mcs = arg; | 565 | struct mcs_cb *mcs = container_of(work, struct mcs_cb, work); |
566 | struct net_device *netdev = mcs->netdev; | 566 | struct net_device *netdev = mcs->netdev; |
567 | 567 | ||
568 | mcs_speed_change(mcs); | 568 | mcs_speed_change(mcs); |
@@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
927 | irda_qos_bits_to_value(&mcs->qos); | 927 | irda_qos_bits_to_value(&mcs->qos); |
928 | 928 | ||
929 | /* Speed change work initialisation*/ | 929 | /* Speed change work initialisation*/ |
930 | INIT_WORK(&mcs->work, mcs_speed_work, mcs); | 930 | INIT_WORK(&mcs->work, mcs_speed_work); |
931 | 931 | ||
932 | /* Override the network functions we need to use */ | 932 | /* Override the network functions we need to use */ |
933 | ndev->hard_start_xmit = mcs_hard_xmit; | 933 | ndev->hard_start_xmit = mcs_hard_xmit; |
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h index 9fa294a546d..2a57bc67ce3 100644 --- a/drivers/net/irda/sir-dev.h +++ b/drivers/net/irda/sir-dev.h | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | struct sir_fsm { | 23 | struct sir_fsm { |
24 | struct semaphore sem; | 24 | struct semaphore sem; |
25 | struct work_struct work; | 25 | struct delayed_work work; |
26 | unsigned state, substate; | 26 | unsigned state, substate; |
27 | int param; | 27 | int param; |
28 | int result; | 28 | int result; |
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 3b5854d10c1..17b0c3ab620 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c | |||
@@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev) | |||
100 | * Both must be unlocked/restarted on completion - but only on final exit. | 100 | * Both must be unlocked/restarted on completion - but only on final exit. |
101 | */ | 101 | */ |
102 | 102 | ||
103 | static void sirdev_config_fsm(void *data) | 103 | static void sirdev_config_fsm(struct work_struct *work) |
104 | { | 104 | { |
105 | struct sir_dev *dev = data; | 105 | struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); |
106 | struct sir_fsm *fsm = &dev->fsm; | 106 | struct sir_fsm *fsm = &dev->fsm; |
107 | int next_state; | 107 | int next_state; |
108 | int ret = -1; | 108 | int ret = -1; |
@@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par | |||
309 | fsm->param = param; | 309 | fsm->param = param; |
310 | fsm->result = 0; | 310 | fsm->result = 0; |
311 | 311 | ||
312 | INIT_WORK(&fsm->work, sirdev_config_fsm, dev); | 312 | INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); |
313 | queue_work(irda_sir_wq, &fsm->work); | 313 | queue_delayed_work(irda_sir_wq, &fsm->work, 0); |
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 2284e2ce169..d6f4f185bf3 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -166,7 +166,7 @@ struct veth_msg { | |||
166 | 166 | ||
167 | struct veth_lpar_connection { | 167 | struct veth_lpar_connection { |
168 | HvLpIndex remote_lp; | 168 | HvLpIndex remote_lp; |
169 | struct work_struct statemachine_wq; | 169 | struct delayed_work statemachine_wq; |
170 | struct veth_msg *msgs; | 170 | struct veth_msg *msgs; |
171 | int num_events; | 171 | int num_events; |
172 | struct veth_cap_data local_caps; | 172 | struct veth_cap_data local_caps; |
@@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = { | |||
456 | 456 | ||
457 | static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) | 457 | static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) |
458 | { | 458 | { |
459 | schedule_work(&cnx->statemachine_wq); | 459 | schedule_delayed_work(&cnx->statemachine_wq, 0); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void veth_take_cap(struct veth_lpar_connection *cnx, | 462 | static void veth_take_cap(struct veth_lpar_connection *cnx, |
@@ -638,9 +638,11 @@ static int veth_process_caps(struct veth_lpar_connection *cnx) | |||
638 | } | 638 | } |
639 | 639 | ||
640 | /* FIXME: The gotos here are a bit dubious */ | 640 | /* FIXME: The gotos here are a bit dubious */ |
641 | static void veth_statemachine(void *p) | 641 | static void veth_statemachine(struct work_struct *work) |
642 | { | 642 | { |
643 | struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; | 643 | struct veth_lpar_connection *cnx = |
644 | container_of(work, struct veth_lpar_connection, | ||
645 | statemachine_wq.work); | ||
644 | int rlp = cnx->remote_lp; | 646 | int rlp = cnx->remote_lp; |
645 | int rc; | 647 | int rc; |
646 | 648 | ||
@@ -827,7 +829,7 @@ static int veth_init_connection(u8 rlp) | |||
827 | 829 | ||
828 | cnx->remote_lp = rlp; | 830 | cnx->remote_lp = rlp; |
829 | spin_lock_init(&cnx->lock); | 831 | spin_lock_init(&cnx->lock); |
830 | INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); | 832 | INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine); |
831 | 833 | ||
832 | init_timer(&cnx->ack_timer); | 834 | init_timer(&cnx->ack_timer); |
833 | cnx->ack_timer.function = veth_timed_ack; | 835 | cnx->ack_timer.function = veth_timed_ack; |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 7b127212e62..e628126c9c4 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); | |||
106 | static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); | 106 | static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); |
107 | void ixgb_set_ethtool_ops(struct net_device *netdev); | 107 | void ixgb_set_ethtool_ops(struct net_device *netdev); |
108 | static void ixgb_tx_timeout(struct net_device *dev); | 108 | static void ixgb_tx_timeout(struct net_device *dev); |
109 | static void ixgb_tx_timeout_task(struct net_device *dev); | 109 | static void ixgb_tx_timeout_task(struct work_struct *work); |
110 | static void ixgb_vlan_rx_register(struct net_device *netdev, | 110 | static void ixgb_vlan_rx_register(struct net_device *netdev, |
111 | struct vlan_group *grp); | 111 | struct vlan_group *grp); |
112 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 112 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); |
@@ -489,8 +489,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
489 | adapter->watchdog_timer.function = &ixgb_watchdog; | 489 | adapter->watchdog_timer.function = &ixgb_watchdog; |
490 | adapter->watchdog_timer.data = (unsigned long)adapter; | 490 | adapter->watchdog_timer.data = (unsigned long)adapter; |
491 | 491 | ||
492 | INIT_WORK(&adapter->tx_timeout_task, | 492 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); |
493 | (void (*)(void *))ixgb_tx_timeout_task, netdev); | ||
494 | 493 | ||
495 | strcpy(netdev->name, "eth%d"); | 494 | strcpy(netdev->name, "eth%d"); |
496 | if((err = register_netdev(netdev))) | 495 | if((err = register_netdev(netdev))) |
@@ -1493,9 +1492,10 @@ ixgb_tx_timeout(struct net_device *netdev) | |||
1493 | } | 1492 | } |
1494 | 1493 | ||
1495 | static void | 1494 | static void |
1496 | ixgb_tx_timeout_task(struct net_device *netdev) | 1495 | ixgb_tx_timeout_task(struct work_struct *work) |
1497 | { | 1496 | { |
1498 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1497 | struct ixgb_adapter *adapter = |
1498 | container_of(work, struct ixgb_adapter, tx_timeout_task); | ||
1499 | 1499 | ||
1500 | adapter->tx_timeout_count++; | 1500 | adapter->tx_timeout_count++; |
1501 | ixgb_down(adapter, TRUE); | 1501 | ixgb_down(adapter, TRUE); |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 36350e6db1c..38df4280238 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2615,9 +2615,10 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) | |||
2615 | * This watchdog is used to check whether the board has suffered | 2615 | * This watchdog is used to check whether the board has suffered |
2616 | * from a parity error and needs to be recovered. | 2616 | * from a parity error and needs to be recovered. |
2617 | */ | 2617 | */ |
2618 | static void myri10ge_watchdog(void *arg) | 2618 | static void myri10ge_watchdog(struct work_struct *work) |
2619 | { | 2619 | { |
2620 | struct myri10ge_priv *mgp = arg; | 2620 | struct myri10ge_priv *mgp = |
2621 | container_of(work, struct myri10ge_priv, watchdog_work); | ||
2621 | u32 reboot; | 2622 | u32 reboot; |
2622 | int status; | 2623 | int status; |
2623 | u16 cmd, vendor; | 2624 | u16 cmd, vendor; |
@@ -2887,7 +2888,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2887 | (unsigned long)mgp); | 2888 | (unsigned long)mgp); |
2888 | 2889 | ||
2889 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); | 2890 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); |
2890 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); | 2891 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); |
2891 | status = register_netdev(netdev); | 2892 | status = register_netdev(netdev); |
2892 | if (status != 0) { | 2893 | if (status != 0) { |
2893 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | 2894 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index d925053fe59..9c588af8ab7 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -714,6 +714,7 @@ struct netxen_adapter { | |||
714 | spinlock_t lock; | 714 | spinlock_t lock; |
715 | struct work_struct watchdog_task; | 715 | struct work_struct watchdog_task; |
716 | struct work_struct tx_timeout_task; | 716 | struct work_struct tx_timeout_task; |
717 | struct net_device *netdev; | ||
717 | struct timer_list watchdog_timer; | 718 | struct timer_list watchdog_timer; |
718 | 719 | ||
719 | u32 curr_window; | 720 | u32 curr_window; |
@@ -921,7 +922,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | |||
921 | struct netxen_port *port); | 922 | struct netxen_port *port); |
922 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter); | 923 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter); |
923 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter); | 924 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter); |
924 | void netxen_watchdog_task(unsigned long v); | 925 | void netxen_watchdog_task(struct work_struct *work); |
925 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, | 926 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, |
926 | u32 ringid); | 927 | u32 ringid); |
927 | void netxen_process_cmd_ring(unsigned long data); | 928 | void netxen_process_cmd_ring(unsigned long data); |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 0dca029bc3e..eae18236aef 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -710,12 +710,13 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter) | |||
710 | return rv; | 710 | return rv; |
711 | } | 711 | } |
712 | 712 | ||
713 | void netxen_watchdog_task(unsigned long v) | 713 | void netxen_watchdog_task(struct work_struct *work) |
714 | { | 714 | { |
715 | int port_num; | 715 | int port_num; |
716 | struct netxen_port *port; | 716 | struct netxen_port *port; |
717 | struct net_device *netdev; | 717 | struct net_device *netdev; |
718 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | 718 | struct netxen_adapter *adapter = |
719 | container_of(work, struct netxen_adapter, watchdog_task); | ||
719 | 720 | ||
720 | if (netxen_nic_check_temp(adapter)) | 721 | if (netxen_nic_check_temp(adapter)) |
721 | return; | 722 | return; |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 1cb662d5bd7..df0bb36a1cf 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -64,7 +64,7 @@ static int netxen_nic_open(struct net_device *netdev); | |||
64 | static int netxen_nic_close(struct net_device *netdev); | 64 | static int netxen_nic_close(struct net_device *netdev); |
65 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); | 65 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); |
66 | static void netxen_tx_timeout(struct net_device *netdev); | 66 | static void netxen_tx_timeout(struct net_device *netdev); |
67 | static void netxen_tx_timeout_task(struct net_device *netdev); | 67 | static void netxen_tx_timeout_task(struct work_struct *work); |
68 | static void netxen_watchdog(unsigned long); | 68 | static void netxen_watchdog(unsigned long); |
69 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); | 69 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); |
70 | static int netxen_nic_ioctl(struct net_device *netdev, | 70 | static int netxen_nic_ioctl(struct net_device *netdev, |
@@ -274,8 +274,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
274 | adapter->ahw.xg_linkup = 0; | 274 | adapter->ahw.xg_linkup = 0; |
275 | adapter->watchdog_timer.function = &netxen_watchdog; | 275 | adapter->watchdog_timer.function = &netxen_watchdog; |
276 | adapter->watchdog_timer.data = (unsigned long)adapter; | 276 | adapter->watchdog_timer.data = (unsigned long)adapter; |
277 | INIT_WORK(&adapter->watchdog_task, | 277 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); |
278 | (void (*)(void *))netxen_watchdog_task, adapter); | ||
279 | adapter->ahw.pdev = pdev; | 278 | adapter->ahw.pdev = pdev; |
280 | adapter->proc_cmd_buf_counter = 0; | 279 | adapter->proc_cmd_buf_counter = 0; |
281 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); | 280 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); |
@@ -379,8 +378,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
379 | dev_addr); | 378 | dev_addr); |
380 | } | 379 | } |
381 | } | 380 | } |
382 | INIT_WORK(&adapter->tx_timeout_task, | 381 | adapter->netdev = netdev; |
383 | (void (*)(void *))netxen_tx_timeout_task, netdev); | 382 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); |
384 | netif_carrier_off(netdev); | 383 | netif_carrier_off(netdev); |
385 | netif_stop_queue(netdev); | 384 | netif_stop_queue(netdev); |
386 | 385 | ||
@@ -938,18 +937,20 @@ static void netxen_tx_timeout(struct net_device *netdev) | |||
938 | schedule_work(&adapter->tx_timeout_task); | 937 | schedule_work(&adapter->tx_timeout_task); |
939 | } | 938 | } |
940 | 939 | ||
941 | static void netxen_tx_timeout_task(struct net_device *netdev) | 940 | static void netxen_tx_timeout_task(struct work_struct *work) |
942 | { | 941 | { |
943 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | 942 | struct netxen_adapter *adapter = |
943 | container_of(work, struct netxen_adapter, tx_timeout_task); | ||
944 | struct net_device *netdev = adapter->netdev; | ||
944 | unsigned long flags; | 945 | unsigned long flags; |
945 | 946 | ||
946 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | 947 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", |
947 | netxen_nic_driver_name, netdev->name); | 948 | netxen_nic_driver_name, netdev->name); |
948 | 949 | ||
949 | spin_lock_irqsave(&port->adapter->lock, flags); | 950 | spin_lock_irqsave(&adapter->lock, flags); |
950 | netxen_nic_close(netdev); | 951 | netxen_nic_close(netdev); |
951 | netxen_nic_open(netdev); | 952 | netxen_nic_open(netdev); |
952 | spin_unlock_irqrestore(&port->adapter->lock, flags); | 953 | spin_unlock_irqrestore(&adapter->lock, flags); |
953 | netdev->trans_start = jiffies; | 954 | netdev->trans_start = jiffies; |
954 | netif_wake_queue(netdev); | 955 | netif_wake_queue(netdev); |
955 | } | 956 | } |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index b0127c71a5b..312e0e33171 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -427,6 +427,7 @@ struct ns83820 { | |||
427 | u8 __iomem *base; | 427 | u8 __iomem *base; |
428 | 428 | ||
429 | struct pci_dev *pci_dev; | 429 | struct pci_dev *pci_dev; |
430 | struct net_device *ndev; | ||
430 | 431 | ||
431 | #ifdef NS83820_VLAN_ACCEL_SUPPORT | 432 | #ifdef NS83820_VLAN_ACCEL_SUPPORT |
432 | struct vlan_group *vlgrp; | 433 | struct vlan_group *vlgrp; |
@@ -631,10 +632,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev) | |||
631 | } | 632 | } |
632 | 633 | ||
633 | /* REFILL */ | 634 | /* REFILL */ |
634 | static inline void queue_refill(void *_dev) | 635 | static inline void queue_refill(struct work_struct *work) |
635 | { | 636 | { |
636 | struct net_device *ndev = _dev; | 637 | struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); |
637 | struct ns83820 *dev = PRIV(ndev); | 638 | struct net_device *ndev = dev->ndev; |
638 | 639 | ||
639 | rx_refill(ndev, GFP_KERNEL); | 640 | rx_refill(ndev, GFP_KERNEL); |
640 | if (dev->rx_info.up) | 641 | if (dev->rx_info.up) |
@@ -1841,6 +1842,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1841 | 1842 | ||
1842 | ndev = alloc_etherdev(sizeof(struct ns83820)); | 1843 | ndev = alloc_etherdev(sizeof(struct ns83820)); |
1843 | dev = PRIV(ndev); | 1844 | dev = PRIV(ndev); |
1845 | dev->ndev = ndev; | ||
1844 | err = -ENOMEM; | 1846 | err = -ENOMEM; |
1845 | if (!dev) | 1847 | if (!dev) |
1846 | goto out; | 1848 | goto out; |
@@ -1853,7 +1855,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1853 | SET_MODULE_OWNER(ndev); | 1855 | SET_MODULE_OWNER(ndev); |
1854 | SET_NETDEV_DEV(ndev, &pci_dev->dev); | 1856 | SET_NETDEV_DEV(ndev, &pci_dev->dev); |
1855 | 1857 | ||
1856 | INIT_WORK(&dev->tq_refill, queue_refill, ndev); | 1858 | INIT_WORK(&dev->tq_refill, queue_refill); |
1857 | tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); | 1859 | tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); |
1858 | 1860 | ||
1859 | err = pci_enable_device(pci_dev); | 1861 | err = pci_enable_device(pci_dev); |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 69813406782..8478dca3d8d 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -332,6 +332,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); | |||
332 | */ | 332 | */ |
333 | 333 | ||
334 | typedef struct local_info_t { | 334 | typedef struct local_info_t { |
335 | struct net_device *dev; | ||
335 | struct pcmcia_device *p_dev; | 336 | struct pcmcia_device *p_dev; |
336 | dev_node_t node; | 337 | dev_node_t node; |
337 | struct net_device_stats stats; | 338 | struct net_device_stats stats; |
@@ -353,7 +354,7 @@ typedef struct local_info_t { | |||
353 | */ | 354 | */ |
354 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); | 355 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); |
355 | static void do_tx_timeout(struct net_device *dev); | 356 | static void do_tx_timeout(struct net_device *dev); |
356 | static void xirc2ps_tx_timeout_task(void *data); | 357 | static void xirc2ps_tx_timeout_task(struct work_struct *work); |
357 | static struct net_device_stats *do_get_stats(struct net_device *dev); | 358 | static struct net_device_stats *do_get_stats(struct net_device *dev); |
358 | static void set_addresses(struct net_device *dev); | 359 | static void set_addresses(struct net_device *dev); |
359 | static void set_multicast_list(struct net_device *dev); | 360 | static void set_multicast_list(struct net_device *dev); |
@@ -567,6 +568,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
567 | if (!dev) | 568 | if (!dev) |
568 | return -ENOMEM; | 569 | return -ENOMEM; |
569 | local = netdev_priv(dev); | 570 | local = netdev_priv(dev); |
571 | local->dev = dev; | ||
570 | local->p_dev = link; | 572 | local->p_dev = link; |
571 | link->priv = dev; | 573 | link->priv = dev; |
572 | 574 | ||
@@ -591,7 +593,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
591 | #ifdef HAVE_TX_TIMEOUT | 593 | #ifdef HAVE_TX_TIMEOUT |
592 | dev->tx_timeout = do_tx_timeout; | 594 | dev->tx_timeout = do_tx_timeout; |
593 | dev->watchdog_timeo = TX_TIMEOUT; | 595 | dev->watchdog_timeo = TX_TIMEOUT; |
594 | INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); | 596 | INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); |
595 | #endif | 597 | #endif |
596 | 598 | ||
597 | return xirc2ps_config(link); | 599 | return xirc2ps_config(link); |
@@ -1324,9 +1326,11 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1324 | /*====================================================================*/ | 1326 | /*====================================================================*/ |
1325 | 1327 | ||
1326 | static void | 1328 | static void |
1327 | xirc2ps_tx_timeout_task(void *data) | 1329 | xirc2ps_tx_timeout_task(struct work_struct *work) |
1328 | { | 1330 | { |
1329 | struct net_device *dev = data; | 1331 | local_info_t *local = |
1332 | container_of(work, local_info_t, tx_timeout_task); | ||
1333 | struct net_device *dev = local->dev; | ||
1330 | /* reset the card */ | 1334 | /* reset the card */ |
1331 | do_reset(dev,1); | 1335 | do_reset(dev,1); |
1332 | dev->trans_start = jiffies; | 1336 | dev->trans_start = jiffies; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 88237bdb525..4044bb1ada8 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -397,7 +397,7 @@ out_unlock: | |||
397 | EXPORT_SYMBOL(phy_start_aneg); | 397 | EXPORT_SYMBOL(phy_start_aneg); |
398 | 398 | ||
399 | 399 | ||
400 | static void phy_change(void *data); | 400 | static void phy_change(struct work_struct *work); |
401 | static void phy_timer(unsigned long data); | 401 | static void phy_timer(unsigned long data); |
402 | 402 | ||
403 | /* phy_start_machine: | 403 | /* phy_start_machine: |
@@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
555 | { | 555 | { |
556 | int err = 0; | 556 | int err = 0; |
557 | 557 | ||
558 | INIT_WORK(&phydev->phy_queue, phy_change, phydev); | 558 | INIT_WORK(&phydev->phy_queue, phy_change); |
559 | 559 | ||
560 | if (request_irq(phydev->irq, phy_interrupt, | 560 | if (request_irq(phydev->irq, phy_interrupt, |
561 | IRQF_SHARED, | 561 | IRQF_SHARED, |
@@ -598,10 +598,11 @@ EXPORT_SYMBOL(phy_stop_interrupts); | |||
598 | 598 | ||
599 | 599 | ||
600 | /* Scheduled by the phy_interrupt/timer to handle PHY changes */ | 600 | /* Scheduled by the phy_interrupt/timer to handle PHY changes */ |
601 | static void phy_change(void *data) | 601 | static void phy_change(struct work_struct *work) |
602 | { | 602 | { |
603 | int err; | 603 | int err; |
604 | struct phy_device *phydev = data; | 604 | struct phy_device *phydev = |
605 | container_of(work, struct phy_device, phy_queue); | ||
605 | 606 | ||
606 | err = phy_disable_interrupts(phydev); | 607 | err = phy_disable_interrupts(phydev); |
607 | 608 | ||
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 71afb274498..6bb085f5443 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG; | |||
138 | #define PLIP_NIBBLE_WAIT 3000 | 138 | #define PLIP_NIBBLE_WAIT 3000 |
139 | 139 | ||
140 | /* Bottom halves */ | 140 | /* Bottom halves */ |
141 | static void plip_kick_bh(struct net_device *dev); | 141 | static void plip_kick_bh(struct work_struct *work); |
142 | static void plip_bh(struct net_device *dev); | 142 | static void plip_bh(struct work_struct *work); |
143 | static void plip_timer_bh(struct net_device *dev); | 143 | static void plip_timer_bh(struct work_struct *work); |
144 | 144 | ||
145 | /* Interrupt handler */ | 145 | /* Interrupt handler */ |
146 | static void plip_interrupt(int irq, void *dev_id); | 146 | static void plip_interrupt(int irq, void *dev_id); |
@@ -207,9 +207,10 @@ struct plip_local { | |||
207 | 207 | ||
208 | struct net_local { | 208 | struct net_local { |
209 | struct net_device_stats enet_stats; | 209 | struct net_device_stats enet_stats; |
210 | struct net_device *dev; | ||
210 | struct work_struct immediate; | 211 | struct work_struct immediate; |
211 | struct work_struct deferred; | 212 | struct delayed_work deferred; |
212 | struct work_struct timer; | 213 | struct delayed_work timer; |
213 | struct plip_local snd_data; | 214 | struct plip_local snd_data; |
214 | struct plip_local rcv_data; | 215 | struct plip_local rcv_data; |
215 | struct pardevice *pardev; | 216 | struct pardevice *pardev; |
@@ -306,11 +307,11 @@ plip_init_netdev(struct net_device *dev) | |||
306 | nl->nibble = PLIP_NIBBLE_WAIT; | 307 | nl->nibble = PLIP_NIBBLE_WAIT; |
307 | 308 | ||
308 | /* Initialize task queue structures */ | 309 | /* Initialize task queue structures */ |
309 | INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); | 310 | INIT_WORK(&nl->immediate, plip_bh); |
310 | INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); | 311 | INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); |
311 | 312 | ||
312 | if (dev->irq == -1) | 313 | if (dev->irq == -1) |
313 | INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); | 314 | INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); |
314 | 315 | ||
315 | spin_lock_init(&nl->lock); | 316 | spin_lock_init(&nl->lock); |
316 | } | 317 | } |
@@ -319,9 +320,10 @@ plip_init_netdev(struct net_device *dev) | |||
319 | This routine is kicked by do_timer(). | 320 | This routine is kicked by do_timer(). |
320 | Request `plip_bh' to be invoked. */ | 321 | Request `plip_bh' to be invoked. */ |
321 | static void | 322 | static void |
322 | plip_kick_bh(struct net_device *dev) | 323 | plip_kick_bh(struct work_struct *work) |
323 | { | 324 | { |
324 | struct net_local *nl = netdev_priv(dev); | 325 | struct net_local *nl = |
326 | container_of(work, struct net_local, deferred.work); | ||
325 | 327 | ||
326 | if (nl->is_deferred) | 328 | if (nl->is_deferred) |
327 | schedule_work(&nl->immediate); | 329 | schedule_work(&nl->immediate); |
@@ -362,9 +364,9 @@ static const plip_func connection_state_table[] = | |||
362 | 364 | ||
363 | /* Bottom half handler of PLIP. */ | 365 | /* Bottom half handler of PLIP. */ |
364 | static void | 366 | static void |
365 | plip_bh(struct net_device *dev) | 367 | plip_bh(struct work_struct *work) |
366 | { | 368 | { |
367 | struct net_local *nl = netdev_priv(dev); | 369 | struct net_local *nl = container_of(work, struct net_local, immediate); |
368 | struct plip_local *snd = &nl->snd_data; | 370 | struct plip_local *snd = &nl->snd_data; |
369 | struct plip_local *rcv = &nl->rcv_data; | 371 | struct plip_local *rcv = &nl->rcv_data; |
370 | plip_func f; | 372 | plip_func f; |
@@ -372,20 +374,21 @@ plip_bh(struct net_device *dev) | |||
372 | 374 | ||
373 | nl->is_deferred = 0; | 375 | nl->is_deferred = 0; |
374 | f = connection_state_table[nl->connection]; | 376 | f = connection_state_table[nl->connection]; |
375 | if ((r = (*f)(dev, nl, snd, rcv)) != OK | 377 | if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK |
376 | && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { | 378 | && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { |
377 | nl->is_deferred = 1; | 379 | nl->is_deferred = 1; |
378 | schedule_delayed_work(&nl->deferred, 1); | 380 | schedule_delayed_work(&nl->deferred, 1); |
379 | } | 381 | } |
380 | } | 382 | } |
381 | 383 | ||
382 | static void | 384 | static void |
383 | plip_timer_bh(struct net_device *dev) | 385 | plip_timer_bh(struct work_struct *work) |
384 | { | 386 | { |
385 | struct net_local *nl = netdev_priv(dev); | 387 | struct net_local *nl = |
388 | container_of(work, struct net_local, timer.work); | ||
386 | 389 | ||
387 | if (!(atomic_read (&nl->kill_timer))) { | 390 | if (!(atomic_read (&nl->kill_timer))) { |
388 | plip_interrupt (-1, dev); | 391 | plip_interrupt (-1, nl->dev); |
389 | 392 | ||
390 | schedule_delayed_work(&nl->timer, 1); | 393 | schedule_delayed_work(&nl->timer, 1); |
391 | } | 394 | } |
@@ -1284,6 +1287,7 @@ static void plip_attach (struct parport *port) | |||
1284 | } | 1287 | } |
1285 | 1288 | ||
1286 | nl = netdev_priv(dev); | 1289 | nl = netdev_priv(dev); |
1290 | nl->dev = dev; | ||
1287 | nl->pardev = parport_register_device(port, name, plip_preempt, | 1291 | nl->pardev = parport_register_device(port, name, plip_preempt, |
1288 | plip_wakeup, plip_interrupt, | 1292 | plip_wakeup, plip_interrupt, |
1289 | 0, dev); | 1293 | 0, dev); |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index ec640f6229a..d79d141a601 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2008 | "%s: Another function issued a reset to the " | 2008 | "%s: Another function issued a reset to the " |
2009 | "chip. ISR value = %x.\n", ndev->name, value); | 2009 | "chip. ISR value = %x.\n", ndev->name, value); |
2010 | } | 2010 | } |
2011 | queue_work(qdev->workqueue, &qdev->reset_work); | 2011 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); |
2012 | spin_unlock(&qdev->adapter_lock); | 2012 | spin_unlock(&qdev->adapter_lock); |
2013 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2013 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2014 | ql_disable_interrupts(qdev); | 2014 | ql_disable_interrupts(qdev); |
@@ -3182,11 +3182,13 @@ static void ql3xxx_tx_timeout(struct net_device *ndev) | |||
3182 | /* | 3182 | /* |
3183 | * Wake up the worker to process this event. | 3183 | * Wake up the worker to process this event. |
3184 | */ | 3184 | */ |
3185 | queue_work(qdev->workqueue, &qdev->tx_timeout_work); | 3185 | queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); |
3186 | } | 3186 | } |
3187 | 3187 | ||
3188 | static void ql_reset_work(struct ql3_adapter *qdev) | 3188 | static void ql_reset_work(struct work_struct *work) |
3189 | { | 3189 | { |
3190 | struct ql3_adapter *qdev = | ||
3191 | container_of(work, struct ql3_adapter, reset_work.work); | ||
3190 | struct net_device *ndev = qdev->ndev; | 3192 | struct net_device *ndev = qdev->ndev; |
3191 | u32 value; | 3193 | u32 value; |
3192 | struct ql_tx_buf_cb *tx_cb; | 3194 | struct ql_tx_buf_cb *tx_cb; |
@@ -3278,9 +3280,12 @@ static void ql_reset_work(struct ql3_adapter *qdev) | |||
3278 | } | 3280 | } |
3279 | } | 3281 | } |
3280 | 3282 | ||
3281 | static void ql_tx_timeout_work(struct ql3_adapter *qdev) | 3283 | static void ql_tx_timeout_work(struct work_struct *work) |
3282 | { | 3284 | { |
3283 | ql_cycle_adapter(qdev,QL_DO_RESET); | 3285 | struct ql3_adapter *qdev = |
3286 | container_of(work, struct ql3_adapter, tx_timeout_work.work); | ||
3287 | |||
3288 | ql_cycle_adapter(qdev, QL_DO_RESET); | ||
3284 | } | 3289 | } |
3285 | 3290 | ||
3286 | static void ql_get_board_info(struct ql3_adapter *qdev) | 3291 | static void ql_get_board_info(struct ql3_adapter *qdev) |
@@ -3459,9 +3464,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3459 | netif_stop_queue(ndev); | 3464 | netif_stop_queue(ndev); |
3460 | 3465 | ||
3461 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | 3466 | qdev->workqueue = create_singlethread_workqueue(ndev->name); |
3462 | INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); | 3467 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); |
3463 | INIT_WORK(&qdev->tx_timeout_work, | 3468 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); |
3464 | (void (*)(void *))ql_tx_timeout_work, qdev); | ||
3465 | 3469 | ||
3466 | init_timer(&qdev->adapter_timer); | 3470 | init_timer(&qdev->adapter_timer); |
3467 | qdev->adapter_timer.function = ql3xxx_timer; | 3471 | qdev->adapter_timer.function = ql3xxx_timer; |
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 65da2c0bfda..ea94de7fd07 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -1186,8 +1186,8 @@ struct ql3_adapter { | |||
1186 | u32 numPorts; | 1186 | u32 numPorts; |
1187 | struct net_device_stats stats; | 1187 | struct net_device_stats stats; |
1188 | struct workqueue_struct *workqueue; | 1188 | struct workqueue_struct *workqueue; |
1189 | struct work_struct reset_work; | 1189 | struct delayed_work reset_work; |
1190 | struct work_struct tx_timeout_work; | 1190 | struct delayed_work tx_timeout_work; |
1191 | u32 max_frame_size; | 1191 | u32 max_frame_size; |
1192 | }; | 1192 | }; |
1193 | 1193 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 45d3ca43195..85a392fab5c 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -424,6 +424,7 @@ struct ring_info { | |||
424 | struct rtl8169_private { | 424 | struct rtl8169_private { |
425 | void __iomem *mmio_addr; /* memory map physical address */ | 425 | void __iomem *mmio_addr; /* memory map physical address */ |
426 | struct pci_dev *pci_dev; /* Index of PCI device */ | 426 | struct pci_dev *pci_dev; /* Index of PCI device */ |
427 | struct net_device *dev; | ||
427 | struct net_device_stats stats; /* statistics of net device */ | 428 | struct net_device_stats stats; /* statistics of net device */ |
428 | spinlock_t lock; /* spin lock flag */ | 429 | spinlock_t lock; /* spin lock flag */ |
429 | u32 msg_enable; | 430 | u32 msg_enable; |
@@ -455,7 +456,7 @@ struct rtl8169_private { | |||
455 | void (*phy_reset_enable)(void __iomem *); | 456 | void (*phy_reset_enable)(void __iomem *); |
456 | unsigned int (*phy_reset_pending)(void __iomem *); | 457 | unsigned int (*phy_reset_pending)(void __iomem *); |
457 | unsigned int (*link_ok)(void __iomem *); | 458 | unsigned int (*link_ok)(void __iomem *); |
458 | struct work_struct task; | 459 | struct delayed_work task; |
459 | unsigned wol_enabled : 1; | 460 | unsigned wol_enabled : 1; |
460 | }; | 461 | }; |
461 | 462 | ||
@@ -1510,6 +1511,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1510 | SET_MODULE_OWNER(dev); | 1511 | SET_MODULE_OWNER(dev); |
1511 | SET_NETDEV_DEV(dev, &pdev->dev); | 1512 | SET_NETDEV_DEV(dev, &pdev->dev); |
1512 | tp = netdev_priv(dev); | 1513 | tp = netdev_priv(dev); |
1514 | tp->dev = dev; | ||
1513 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); | 1515 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
1514 | 1516 | ||
1515 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 1517 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
@@ -1782,7 +1784,7 @@ static int rtl8169_open(struct net_device *dev) | |||
1782 | if (retval < 0) | 1784 | if (retval < 0) |
1783 | goto err_free_rx; | 1785 | goto err_free_rx; |
1784 | 1786 | ||
1785 | INIT_WORK(&tp->task, NULL, dev); | 1787 | INIT_DELAYED_WORK(&tp->task, NULL); |
1786 | 1788 | ||
1787 | rtl8169_hw_start(dev); | 1789 | rtl8169_hw_start(dev); |
1788 | 1790 | ||
@@ -2105,11 +2107,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) | |||
2105 | tp->cur_tx = tp->dirty_tx = 0; | 2107 | tp->cur_tx = tp->dirty_tx = 0; |
2106 | } | 2108 | } |
2107 | 2109 | ||
2108 | static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) | 2110 | static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) |
2109 | { | 2111 | { |
2110 | struct rtl8169_private *tp = netdev_priv(dev); | 2112 | struct rtl8169_private *tp = netdev_priv(dev); |
2111 | 2113 | ||
2112 | PREPARE_WORK(&tp->task, task, dev); | 2114 | PREPARE_DELAYED_WORK(&tp->task, task); |
2113 | schedule_delayed_work(&tp->task, 4); | 2115 | schedule_delayed_work(&tp->task, 4); |
2114 | } | 2116 | } |
2115 | 2117 | ||
@@ -2128,9 +2130,11 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) | |||
2128 | netif_poll_enable(dev); | 2130 | netif_poll_enable(dev); |
2129 | } | 2131 | } |
2130 | 2132 | ||
2131 | static void rtl8169_reinit_task(void *_data) | 2133 | static void rtl8169_reinit_task(struct work_struct *work) |
2132 | { | 2134 | { |
2133 | struct net_device *dev = _data; | 2135 | struct rtl8169_private *tp = |
2136 | container_of(work, struct rtl8169_private, task.work); | ||
2137 | struct net_device *dev = tp->dev; | ||
2134 | int ret; | 2138 | int ret; |
2135 | 2139 | ||
2136 | if (netif_running(dev)) { | 2140 | if (netif_running(dev)) { |
@@ -2153,10 +2157,11 @@ static void rtl8169_reinit_task(void *_data) | |||
2153 | } | 2157 | } |
2154 | } | 2158 | } |
2155 | 2159 | ||
2156 | static void rtl8169_reset_task(void *_data) | 2160 | static void rtl8169_reset_task(struct work_struct *work) |
2157 | { | 2161 | { |
2158 | struct net_device *dev = _data; | 2162 | struct rtl8169_private *tp = |
2159 | struct rtl8169_private *tp = netdev_priv(dev); | 2163 | container_of(work, struct rtl8169_private, task.work); |
2164 | struct net_device *dev = tp->dev; | ||
2160 | 2165 | ||
2161 | if (!netif_running(dev)) | 2166 | if (!netif_running(dev)) |
2162 | return; | 2167 | return; |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 33569ec9dbf..250cdbeefdf 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr) | |||
5872 | * Description: Sets the link status for the adapter | 5872 | * Description: Sets the link status for the adapter |
5873 | */ | 5873 | */ |
5874 | 5874 | ||
5875 | static void s2io_set_link(unsigned long data) | 5875 | static void s2io_set_link(struct work_struct *work) |
5876 | { | 5876 | { |
5877 | nic_t *nic = (nic_t *) data; | 5877 | nic_t *nic = container_of(work, nic_t, set_link_task); |
5878 | struct net_device *dev = nic->dev; | 5878 | struct net_device *dev = nic->dev; |
5879 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 5879 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
5880 | register u64 val64; | 5880 | register u64 val64; |
@@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp) | |||
6379 | * spin lock. | 6379 | * spin lock. |
6380 | */ | 6380 | */ |
6381 | 6381 | ||
6382 | static void s2io_restart_nic(unsigned long data) | 6382 | static void s2io_restart_nic(struct work_struct *work) |
6383 | { | 6383 | { |
6384 | struct net_device *dev = (struct net_device *) data; | 6384 | nic_t *sp = container_of(work, nic_t, rst_timer_task); |
6385 | nic_t *sp = dev->priv; | 6385 | struct net_device *dev = sp->dev; |
6386 | 6386 | ||
6387 | s2io_card_down(sp); | 6387 | s2io_card_down(sp); |
6388 | if (s2io_card_up(sp)) { | 6388 | if (s2io_card_up(sp)) { |
@@ -6992,10 +6992,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6992 | 6992 | ||
6993 | dev->tx_timeout = &s2io_tx_watchdog; | 6993 | dev->tx_timeout = &s2io_tx_watchdog; |
6994 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; | 6994 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; |
6995 | INIT_WORK(&sp->rst_timer_task, | 6995 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); |
6996 | (void (*)(void *)) s2io_restart_nic, dev); | 6996 | INIT_WORK(&sp->set_link_task, s2io_set_link); |
6997 | INIT_WORK(&sp->set_link_task, | ||
6998 | (void (*)(void *)) s2io_set_link, sp); | ||
6999 | 6997 | ||
7000 | pci_save_state(sp->pdev); | 6998 | pci_save_state(sp->pdev); |
7001 | 6999 | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 12b719f4d00..3b0bafd273c 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id); | |||
1000 | static irqreturn_t s2io_isr(int irq, void *dev_id); | 1000 | static irqreturn_t s2io_isr(int irq, void *dev_id); |
1001 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | 1001 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); |
1002 | static const struct ethtool_ops netdev_ethtool_ops; | 1002 | static const struct ethtool_ops netdev_ethtool_ops; |
1003 | static void s2io_set_link(unsigned long data); | 1003 | static void s2io_set_link(struct work_struct *work); |
1004 | static int s2io_set_swapper(nic_t * sp); | 1004 | static int s2io_set_swapper(nic_t * sp); |
1005 | static void s2io_card_down(nic_t *nic); | 1005 | static void s2io_card_down(nic_t *nic); |
1006 | static int s2io_card_up(nic_t *nic); | 1006 | static int s2io_card_up(nic_t *nic); |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index aaba458584f..b70ed79d412 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -280,6 +280,7 @@ enum sis190_feature { | |||
280 | struct sis190_private { | 280 | struct sis190_private { |
281 | void __iomem *mmio_addr; | 281 | void __iomem *mmio_addr; |
282 | struct pci_dev *pci_dev; | 282 | struct pci_dev *pci_dev; |
283 | struct net_device *dev; | ||
283 | struct net_device_stats stats; | 284 | struct net_device_stats stats; |
284 | spinlock_t lock; | 285 | spinlock_t lock; |
285 | u32 rx_buf_sz; | 286 | u32 rx_buf_sz; |
@@ -897,10 +898,11 @@ static void sis190_hw_start(struct net_device *dev) | |||
897 | netif_start_queue(dev); | 898 | netif_start_queue(dev); |
898 | } | 899 | } |
899 | 900 | ||
900 | static void sis190_phy_task(void * data) | 901 | static void sis190_phy_task(struct work_struct *work) |
901 | { | 902 | { |
902 | struct net_device *dev = data; | 903 | struct sis190_private *tp = |
903 | struct sis190_private *tp = netdev_priv(dev); | 904 | container_of(work, struct sis190_private, phy_task); |
905 | struct net_device *dev = tp->dev; | ||
904 | void __iomem *ioaddr = tp->mmio_addr; | 906 | void __iomem *ioaddr = tp->mmio_addr; |
905 | int phy_id = tp->mii_if.phy_id; | 907 | int phy_id = tp->mii_if.phy_id; |
906 | u16 val; | 908 | u16 val; |
@@ -1047,7 +1049,7 @@ static int sis190_open(struct net_device *dev) | |||
1047 | if (rc < 0) | 1049 | if (rc < 0) |
1048 | goto err_free_rx_1; | 1050 | goto err_free_rx_1; |
1049 | 1051 | ||
1050 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | 1052 | INIT_WORK(&tp->phy_task, sis190_phy_task); |
1051 | 1053 | ||
1052 | sis190_request_timer(dev); | 1054 | sis190_request_timer(dev); |
1053 | 1055 | ||
@@ -1436,6 +1438,7 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) | |||
1436 | SET_NETDEV_DEV(dev, &pdev->dev); | 1438 | SET_NETDEV_DEV(dev, &pdev->dev); |
1437 | 1439 | ||
1438 | tp = netdev_priv(dev); | 1440 | tp = netdev_priv(dev); |
1441 | tp->dev = dev; | ||
1439 | tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); | 1442 | tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); |
1440 | 1443 | ||
1441 | rc = pci_enable_device(pdev); | 1444 | rc = pci_enable_device(pdev); |
@@ -1798,7 +1801,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, | |||
1798 | 1801 | ||
1799 | sis190_init_rxfilter(dev); | 1802 | sis190_init_rxfilter(dev); |
1800 | 1803 | ||
1801 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | 1804 | INIT_WORK(&tp->phy_task, sis190_phy_task); |
1802 | 1805 | ||
1803 | dev->open = sis190_open; | 1806 | dev->open = sis190_open; |
1804 | dev->stop = sis190_close; | 1807 | dev->stop = sis190_close; |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 5513907e839..b60f0451f6c 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -1327,10 +1327,11 @@ static void xm_check_link(struct net_device *dev) | |||
1327 | * Since internal PHY is wired to a level triggered pin, can't | 1327 | * Since internal PHY is wired to a level triggered pin, can't |
1328 | * get an interrupt when carrier is detected. | 1328 | * get an interrupt when carrier is detected. |
1329 | */ | 1329 | */ |
1330 | static void xm_link_timer(void *arg) | 1330 | static void xm_link_timer(struct work_struct *work) |
1331 | { | 1331 | { |
1332 | struct net_device *dev = arg; | 1332 | struct skge_port *skge = |
1333 | struct skge_port *skge = netdev_priv(arg); | 1333 | container_of(work, struct skge_port, link_thread.work); |
1334 | struct net_device *dev = skge->netdev; | ||
1334 | struct skge_hw *hw = skge->hw; | 1335 | struct skge_hw *hw = skge->hw; |
1335 | int port = skge->port; | 1336 | int port = skge->port; |
1336 | 1337 | ||
@@ -3072,9 +3073,9 @@ static void skge_error_irq(struct skge_hw *hw) | |||
3072 | * because accessing phy registers requires spin wait which might | 3073 | * because accessing phy registers requires spin wait which might |
3073 | * cause excess interrupt latency. | 3074 | * cause excess interrupt latency. |
3074 | */ | 3075 | */ |
3075 | static void skge_extirq(void *arg) | 3076 | static void skge_extirq(struct work_struct *work) |
3076 | { | 3077 | { |
3077 | struct skge_hw *hw = arg; | 3078 | struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); |
3078 | int port; | 3079 | int port; |
3079 | 3080 | ||
3080 | mutex_lock(&hw->phy_mutex); | 3081 | mutex_lock(&hw->phy_mutex); |
@@ -3456,7 +3457,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3456 | skge->port = port; | 3457 | skge->port = port; |
3457 | 3458 | ||
3458 | /* Only used for Genesis XMAC */ | 3459 | /* Only used for Genesis XMAC */ |
3459 | INIT_WORK(&skge->link_thread, xm_link_timer, dev); | 3460 | INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); |
3460 | 3461 | ||
3461 | if (hw->chip_id != CHIP_ID_GENESIS) { | 3462 | if (hw->chip_id != CHIP_ID_GENESIS) { |
3462 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 3463 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
@@ -3543,7 +3544,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3543 | 3544 | ||
3544 | hw->pdev = pdev; | 3545 | hw->pdev = pdev; |
3545 | mutex_init(&hw->phy_mutex); | 3546 | mutex_init(&hw->phy_mutex); |
3546 | INIT_WORK(&hw->phy_work, skge_extirq, hw); | 3547 | INIT_WORK(&hw->phy_work, skge_extirq); |
3547 | spin_lock_init(&hw->hw_lock); | 3548 | spin_lock_init(&hw->hw_lock); |
3548 | 3549 | ||
3549 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3550 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 537c0aaa1db..23e5275d920 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2456,7 +2456,7 @@ struct skge_port { | |||
2456 | 2456 | ||
2457 | struct net_device_stats net_stats; | 2457 | struct net_device_stats net_stats; |
2458 | 2458 | ||
2459 | struct work_struct link_thread; | 2459 | struct delayed_work link_thread; |
2460 | enum pause_control flow_control; | 2460 | enum pause_control flow_control; |
2461 | enum pause_status flow_status; | 2461 | enum pause_status flow_status; |
2462 | u8 rx_csum; | 2462 | u8 rx_csum; |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 95b6478f55c..e62a9586fb9 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -210,6 +210,7 @@ struct smc_local { | |||
210 | 210 | ||
211 | /* work queue */ | 211 | /* work queue */ |
212 | struct work_struct phy_configure; | 212 | struct work_struct phy_configure; |
213 | struct net_device *dev; | ||
213 | int work_pending; | 214 | int work_pending; |
214 | 215 | ||
215 | spinlock_t lock; | 216 | spinlock_t lock; |
@@ -1114,10 +1115,11 @@ static void smc_phy_check_media(struct net_device *dev, int init) | |||
1114 | * of autonegotiation.) If the RPC ANEG bit is cleared, the selection | 1115 | * of autonegotiation.) If the RPC ANEG bit is cleared, the selection |
1115 | * is controlled by the RPC SPEED and RPC DPLX bits. | 1116 | * is controlled by the RPC SPEED and RPC DPLX bits. |
1116 | */ | 1117 | */ |
1117 | static void smc_phy_configure(void *data) | 1118 | static void smc_phy_configure(struct work_struct *work) |
1118 | { | 1119 | { |
1119 | struct net_device *dev = data; | 1120 | struct smc_local *lp = |
1120 | struct smc_local *lp = netdev_priv(dev); | 1121 | container_of(work, struct smc_local, phy_configure); |
1122 | struct net_device *dev = lp->dev; | ||
1121 | void __iomem *ioaddr = lp->base; | 1123 | void __iomem *ioaddr = lp->base; |
1122 | int phyaddr = lp->mii.phy_id; | 1124 | int phyaddr = lp->mii.phy_id; |
1123 | int my_phy_caps; /* My PHY capabilities */ | 1125 | int my_phy_caps; /* My PHY capabilities */ |
@@ -1592,7 +1594,7 @@ smc_open(struct net_device *dev) | |||
1592 | 1594 | ||
1593 | /* Configure the PHY, initialize the link state */ | 1595 | /* Configure the PHY, initialize the link state */ |
1594 | if (lp->phy_type != 0) | 1596 | if (lp->phy_type != 0) |
1595 | smc_phy_configure(dev); | 1597 | smc_phy_configure(&lp->phy_configure); |
1596 | else { | 1598 | else { |
1597 | spin_lock_irq(&lp->lock); | 1599 | spin_lock_irq(&lp->lock); |
1598 | smc_10bt_check_media(dev, 1); | 1600 | smc_10bt_check_media(dev, 1); |
@@ -1972,7 +1974,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) | |||
1972 | #endif | 1974 | #endif |
1973 | 1975 | ||
1974 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); | 1976 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); |
1975 | INIT_WORK(&lp->phy_configure, smc_phy_configure, dev); | 1977 | INIT_WORK(&lp->phy_configure, smc_phy_configure); |
1978 | lp->dev = dev; | ||
1976 | lp->mii.phy_id_mask = 0x1f; | 1979 | lp->mii.phy_id_mask = 0x1f; |
1977 | lp->mii.reg_num_mask = 0x1f; | 1980 | lp->mii.reg_num_mask = 0x1f; |
1978 | lp->mii.force_media = 0; | 1981 | lp->mii.force_media = 0; |
@@ -2322,7 +2325,7 @@ static int smc_drv_resume(struct platform_device *dev) | |||
2322 | smc_reset(ndev); | 2325 | smc_reset(ndev); |
2323 | smc_enable(ndev); | 2326 | smc_enable(ndev); |
2324 | if (lp->phy_type != 0) | 2327 | if (lp->phy_type != 0) |
2325 | smc_phy_configure(ndev); | 2328 | smc_phy_configure(&lp->phy_configure); |
2326 | netif_device_attach(ndev); | 2329 | netif_device_attach(ndev); |
2327 | } | 2330 | } |
2328 | } | 2331 | } |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 13e0a43e423..ebb6aa39f9c 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1939,10 +1939,11 @@ spider_net_stop(struct net_device *netdev) | |||
1939 | * called as task when tx hangs, resets interface (if interface is up) | 1939 | * called as task when tx hangs, resets interface (if interface is up) |
1940 | */ | 1940 | */ |
1941 | static void | 1941 | static void |
1942 | spider_net_tx_timeout_task(void *data) | 1942 | spider_net_tx_timeout_task(struct work_struct *work) |
1943 | { | 1943 | { |
1944 | struct net_device *netdev = data; | 1944 | struct spider_net_card *card = |
1945 | struct spider_net_card *card = netdev_priv(netdev); | 1945 | container_of(work, struct spider_net_card, tx_timeout_task); |
1946 | struct net_device *netdev = card->netdev; | ||
1946 | 1947 | ||
1947 | if (!(netdev->flags & IFF_UP)) | 1948 | if (!(netdev->flags & IFF_UP)) |
1948 | goto out; | 1949 | goto out; |
@@ -2116,7 +2117,7 @@ spider_net_alloc_card(void) | |||
2116 | card = netdev_priv(netdev); | 2117 | card = netdev_priv(netdev); |
2117 | card->netdev = netdev; | 2118 | card->netdev = netdev; |
2118 | card->msg_enable = SPIDER_NET_DEFAULT_MSG; | 2119 | card->msg_enable = SPIDER_NET_DEFAULT_MSG; |
2119 | INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); | 2120 | INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); |
2120 | init_waitqueue_head(&card->waitq); | 2121 | init_waitqueue_head(&card->waitq); |
2121 | atomic_set(&card->tx_timeout_task_counter, 0); | 2122 | atomic_set(&card->tx_timeout_task_counter, 0); |
2122 | 2123 | ||
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index cf44e72399b..785e4a535f9 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -2282,9 +2282,9 @@ static void gem_do_stop(struct net_device *dev, int wol) | |||
2282 | } | 2282 | } |
2283 | } | 2283 | } |
2284 | 2284 | ||
2285 | static void gem_reset_task(void *data) | 2285 | static void gem_reset_task(struct work_struct *work) |
2286 | { | 2286 | { |
2287 | struct gem *gp = (struct gem *) data; | 2287 | struct gem *gp = container_of(work, struct gem, reset_task); |
2288 | 2288 | ||
2289 | mutex_lock(&gp->pm_mutex); | 2289 | mutex_lock(&gp->pm_mutex); |
2290 | 2290 | ||
@@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3044 | gp->link_timer.function = gem_link_timer; | 3044 | gp->link_timer.function = gem_link_timer; |
3045 | gp->link_timer.data = (unsigned long) gp; | 3045 | gp->link_timer.data = (unsigned long) gp; |
3046 | 3046 | ||
3047 | INIT_WORK(&gp->reset_task, gem_reset_task, gp); | 3047 | INIT_WORK(&gp->reset_task, gem_reset_task); |
3048 | 3048 | ||
3049 | gp->lstate = link_down; | 3049 | gp->lstate = link_down; |
3050 | gp->timer_ticks = 0; | 3050 | gp->timer_ticks = 0; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index c20bb998e0e..d9123c9adc1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3654,9 +3654,9 @@ static void tg3_poll_controller(struct net_device *dev) | |||
3654 | } | 3654 | } |
3655 | #endif | 3655 | #endif |
3656 | 3656 | ||
3657 | static void tg3_reset_task(void *_data) | 3657 | static void tg3_reset_task(struct work_struct *work) |
3658 | { | 3658 | { |
3659 | struct tg3 *tp = _data; | 3659 | struct tg3 *tp = container_of(work, struct tg3, reset_task); |
3660 | unsigned int restart_timer; | 3660 | unsigned int restart_timer; |
3661 | 3661 | ||
3662 | tg3_full_lock(tp, 0); | 3662 | tg3_full_lock(tp, 0); |
@@ -11734,7 +11734,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11734 | #endif | 11734 | #endif |
11735 | spin_lock_init(&tp->lock); | 11735 | spin_lock_init(&tp->lock); |
11736 | spin_lock_init(&tp->indirect_lock); | 11736 | spin_lock_init(&tp->indirect_lock); |
11737 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11737 | INIT_WORK(&tp->reset_task, tg3_reset_task); |
11738 | 11738 | ||
11739 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); | 11739 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); |
11740 | if (tp->regs == 0UL) { | 11740 | if (tp->regs == 0UL) { |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index e14f5a00f65..f85f0025112 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -296,6 +296,7 @@ static void TLan_SetMulticastList( struct net_device *); | |||
296 | static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); | 296 | static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); |
297 | static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); | 297 | static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); |
298 | static void TLan_tx_timeout( struct net_device *dev); | 298 | static void TLan_tx_timeout( struct net_device *dev); |
299 | static void TLan_tx_timeout_work(struct work_struct *work); | ||
299 | static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); | 300 | static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); |
300 | 301 | ||
301 | static u32 TLan_HandleInvalid( struct net_device *, u16 ); | 302 | static u32 TLan_HandleInvalid( struct net_device *, u16 ); |
@@ -562,6 +563,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
562 | priv = netdev_priv(dev); | 563 | priv = netdev_priv(dev); |
563 | 564 | ||
564 | priv->pciDev = pdev; | 565 | priv->pciDev = pdev; |
566 | priv->dev = dev; | ||
565 | 567 | ||
566 | /* Is this a PCI device? */ | 568 | /* Is this a PCI device? */ |
567 | if (pdev) { | 569 | if (pdev) { |
@@ -634,7 +636,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
634 | 636 | ||
635 | /* This will be used when we get an adapter error from | 637 | /* This will be used when we get an adapter error from |
636 | * within our irq handler */ | 638 | * within our irq handler */ |
637 | INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); | 639 | INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); |
638 | 640 | ||
639 | spin_lock_init(&priv->lock); | 641 | spin_lock_init(&priv->lock); |
640 | 642 | ||
@@ -1040,6 +1042,25 @@ static void TLan_tx_timeout(struct net_device *dev) | |||
1040 | } | 1042 | } |
1041 | 1043 | ||
1042 | 1044 | ||
1045 | /*************************************************************** | ||
1046 | * TLan_tx_timeout_work | ||
1047 | * | ||
1048 | * Returns: nothing | ||
1049 | * | ||
1050 | * Params: | ||
1051 | * work work item of device which timed out | ||
1052 | * | ||
1053 | **************************************************************/ | ||
1054 | |||
1055 | static void TLan_tx_timeout_work(struct work_struct *work) | ||
1056 | { | ||
1057 | TLanPrivateInfo *priv = | ||
1058 | container_of(work, TLanPrivateInfo, tlan_tqueue); | ||
1059 | |||
1060 | TLan_tx_timeout(priv->dev); | ||
1061 | } | ||
1062 | |||
1063 | |||
1043 | 1064 | ||
1044 | /*************************************************************** | 1065 | /*************************************************************** |
1045 | * TLan_StartTx | 1066 | * TLan_StartTx |
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h index a44e2f2ef62..41ce0b66593 100644 --- a/drivers/net/tlan.h +++ b/drivers/net/tlan.h | |||
@@ -170,6 +170,7 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; | |||
170 | typedef struct tlan_private_tag { | 170 | typedef struct tlan_private_tag { |
171 | struct net_device *nextDevice; | 171 | struct net_device *nextDevice; |
172 | struct pci_dev *pciDev; | 172 | struct pci_dev *pciDev; |
173 | struct net_device *dev; | ||
173 | void *dmaStorage; | 174 | void *dmaStorage; |
174 | dma_addr_t dmaStorageDMA; | 175 | dma_addr_t dmaStorageDMA; |
175 | unsigned int dmaSize; | 176 | unsigned int dmaSize; |
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index fa3a2bb105a..942b839ccc5 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c | |||
@@ -26,10 +26,11 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; | |||
26 | 26 | ||
27 | /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list | 27 | /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list |
28 | of available transceivers. */ | 28 | of available transceivers. */ |
29 | void t21142_media_task(void *data) | 29 | void t21142_media_task(struct work_struct *work) |
30 | { | 30 | { |
31 | struct net_device *dev = data; | 31 | struct tulip_private *tp = |
32 | struct tulip_private *tp = netdev_priv(dev); | 32 | container_of(work, struct tulip_private, media_work); |
33 | struct net_device *dev = tp->dev; | ||
33 | void __iomem *ioaddr = tp->base_addr; | 34 | void __iomem *ioaddr = tp->base_addr; |
34 | int csr12 = ioread32(ioaddr + CSR12); | 35 | int csr12 = ioread32(ioaddr + CSR12); |
35 | int next_tick = 60*HZ; | 36 | int next_tick = 60*HZ; |
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c index 066e5d6bcbd..df326fe1cc8 100644 --- a/drivers/net/tulip/timer.c +++ b/drivers/net/tulip/timer.c | |||
@@ -18,10 +18,11 @@ | |||
18 | #include "tulip.h" | 18 | #include "tulip.h" |
19 | 19 | ||
20 | 20 | ||
21 | void tulip_media_task(void *data) | 21 | void tulip_media_task(struct work_struct *work) |
22 | { | 22 | { |
23 | struct net_device *dev = data; | 23 | struct tulip_private *tp = |
24 | struct tulip_private *tp = netdev_priv(dev); | 24 | container_of(work, struct tulip_private, media_work); |
25 | struct net_device *dev = tp->dev; | ||
25 | void __iomem *ioaddr = tp->base_addr; | 26 | void __iomem *ioaddr = tp->base_addr; |
26 | u32 csr12 = ioread32(ioaddr + CSR12); | 27 | u32 csr12 = ioread32(ioaddr + CSR12); |
27 | int next_tick = 2*HZ; | 28 | int next_tick = 2*HZ; |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index ad107f45c7b..25f25da7691 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -44,7 +44,7 @@ struct tulip_chip_table { | |||
44 | int valid_intrs; /* CSR7 interrupt enable settings */ | 44 | int valid_intrs; /* CSR7 interrupt enable settings */ |
45 | int flags; | 45 | int flags; |
46 | void (*media_timer) (unsigned long); | 46 | void (*media_timer) (unsigned long); |
47 | void (*media_task) (void *); | 47 | work_func_t media_task; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | 50 | ||
@@ -392,6 +392,7 @@ struct tulip_private { | |||
392 | int csr12_shadow; | 392 | int csr12_shadow; |
393 | int pad0; /* Used for 8-byte alignment */ | 393 | int pad0; /* Used for 8-byte alignment */ |
394 | struct work_struct media_work; | 394 | struct work_struct media_work; |
395 | struct net_device *dev; | ||
395 | }; | 396 | }; |
396 | 397 | ||
397 | 398 | ||
@@ -406,7 +407,7 @@ struct eeprom_fixup { | |||
406 | 407 | ||
407 | /* 21142.c */ | 408 | /* 21142.c */ |
408 | extern u16 t21142_csr14[]; | 409 | extern u16 t21142_csr14[]; |
409 | void t21142_media_task(void *data); | 410 | void t21142_media_task(struct work_struct *work); |
410 | void t21142_start_nway(struct net_device *dev); | 411 | void t21142_start_nway(struct net_device *dev); |
411 | void t21142_lnk_change(struct net_device *dev, int csr5); | 412 | void t21142_lnk_change(struct net_device *dev, int csr5); |
412 | 413 | ||
@@ -444,7 +445,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5); | |||
444 | void pnic_timer(unsigned long data); | 445 | void pnic_timer(unsigned long data); |
445 | 446 | ||
446 | /* timer.c */ | 447 | /* timer.c */ |
447 | void tulip_media_task(void *data); | 448 | void tulip_media_task(struct work_struct *work); |
448 | void mxic_timer(unsigned long data); | 449 | void mxic_timer(unsigned long data); |
449 | void comet_timer(unsigned long data); | 450 | void comet_timer(unsigned long data); |
450 | 451 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 0aee618f883..5a35354aa52 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -1367,6 +1367,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1367 | * it is zeroed and aligned in alloc_etherdev | 1367 | * it is zeroed and aligned in alloc_etherdev |
1368 | */ | 1368 | */ |
1369 | tp = netdev_priv(dev); | 1369 | tp = netdev_priv(dev); |
1370 | tp->dev = dev; | ||
1370 | 1371 | ||
1371 | tp->rx_ring = pci_alloc_consistent(pdev, | 1372 | tp->rx_ring = pci_alloc_consistent(pdev, |
1372 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + | 1373 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + |
@@ -1389,7 +1390,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1389 | tp->timer.data = (unsigned long)dev; | 1390 | tp->timer.data = (unsigned long)dev; |
1390 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; | 1391 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; |
1391 | 1392 | ||
1392 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); | 1393 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); |
1393 | 1394 | ||
1394 | dev->base_addr = (unsigned long)ioaddr; | 1395 | dev->base_addr = (unsigned long)ioaddr; |
1395 | 1396 | ||
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 931cbdf6d79..b2a23aed442 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c | |||
@@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty); | |||
125 | static int cpc_tty_chars_in_buffer(struct tty_struct *tty); | 125 | static int cpc_tty_chars_in_buffer(struct tty_struct *tty); |
126 | static void cpc_tty_flush_buffer(struct tty_struct *tty); | 126 | static void cpc_tty_flush_buffer(struct tty_struct *tty); |
127 | static void cpc_tty_hangup(struct tty_struct *tty); | 127 | static void cpc_tty_hangup(struct tty_struct *tty); |
128 | static void cpc_tty_rx_work(void *data); | 128 | static void cpc_tty_rx_work(struct work_struct *work); |
129 | static void cpc_tty_tx_work(void *data); | 129 | static void cpc_tty_tx_work(struct work_struct *work); |
130 | static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); | 130 | static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); |
131 | static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); | 131 | static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); |
132 | static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); | 132 | static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); |
@@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev) | |||
261 | cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; | 261 | cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; |
262 | cpc_tty->pc300dev = pc300dev; | 262 | cpc_tty->pc300dev = pc300dev; |
263 | 263 | ||
264 | INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); | 264 | INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work); |
265 | INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); | 265 | INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work); |
266 | 266 | ||
267 | cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; | 267 | cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; |
268 | 268 | ||
@@ -659,21 +659,23 @@ static void cpc_tty_hangup(struct tty_struct *tty) | |||
659 | * o call the line disc. read | 659 | * o call the line disc. read |
660 | * o free memory | 660 | * o free memory |
661 | */ | 661 | */ |
662 | static void cpc_tty_rx_work(void * data) | 662 | static void cpc_tty_rx_work(struct work_struct *work) |
663 | { | 663 | { |
664 | st_cpc_tty_area *cpc_tty; | ||
664 | unsigned long port; | 665 | unsigned long port; |
665 | int i, j; | 666 | int i, j; |
666 | st_cpc_tty_area *cpc_tty; | ||
667 | volatile st_cpc_rx_buf *buf; | 667 | volatile st_cpc_rx_buf *buf; |
668 | char flags=0,flg_rx=1; | 668 | char flags=0,flg_rx=1; |
669 | struct tty_ldisc *ld; | 669 | struct tty_ldisc *ld; |
670 | 670 | ||
671 | if (cpc_tty_cnt == 0) return; | 671 | if (cpc_tty_cnt == 0) return; |
672 | |||
673 | 672 | ||
674 | for (i=0; (i < 4) && flg_rx ; i++) { | 673 | for (i=0; (i < 4) && flg_rx ; i++) { |
675 | flg_rx = 0; | 674 | flg_rx = 0; |
676 | port = (unsigned long)data; | 675 | |
676 | cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work); | ||
677 | port = cpc_tty - cpc_tty_area; | ||
678 | |||
677 | for (j=0; j < CPC_TTY_NPORTS; j++) { | 679 | for (j=0; j < CPC_TTY_NPORTS; j++) { |
678 | cpc_tty = &cpc_tty_area[port]; | 680 | cpc_tty = &cpc_tty_area[port]; |
679 | 681 | ||
@@ -882,9 +884,10 @@ void cpc_tty_receive(pc300dev_t *pc300dev) | |||
882 | * o if need call line discipline wakeup | 884 | * o if need call line discipline wakeup |
883 | * o call wake_up_interruptible | 885 | * o call wake_up_interruptible |
884 | */ | 886 | */ |
885 | static void cpc_tty_tx_work(void *data) | 887 | static void cpc_tty_tx_work(struct work_struct *work) |
886 | { | 888 | { |
887 | st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; | 889 | st_cpc_tty_area *cpc_tty = |
890 | container_of(work, st_cpc_tty_area, tty_tx_work); | ||
888 | struct tty_struct *tty; | 891 | struct tty_struct *tty; |
889 | 892 | ||
890 | CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); | 893 | CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 94dfb92fab5..8286678513b 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h | |||
@@ -819,7 +819,7 @@ struct bcm43xx_private { | |||
819 | struct tasklet_struct isr_tasklet; | 819 | struct tasklet_struct isr_tasklet; |
820 | 820 | ||
821 | /* Periodic tasks */ | 821 | /* Periodic tasks */ |
822 | struct work_struct periodic_work; | 822 | struct delayed_work periodic_work; |
823 | unsigned int periodic_state; | 823 | unsigned int periodic_state; |
824 | 824 | ||
825 | struct work_struct restart_work; | 825 | struct work_struct restart_work; |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 5b3c27359a1..2ec2e5afce6 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -3215,9 +3215,10 @@ static void do_periodic_work(struct bcm43xx_private *bcm) | |||
3215 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); | 3215 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); |
3216 | } | 3216 | } |
3217 | 3217 | ||
3218 | static void bcm43xx_periodic_work_handler(void *d) | 3218 | static void bcm43xx_periodic_work_handler(struct work_struct *work) |
3219 | { | 3219 | { |
3220 | struct bcm43xx_private *bcm = d; | 3220 | struct bcm43xx_private *bcm = |
3221 | container_of(work, struct bcm43xx_private, periodic_work.work); | ||
3221 | struct net_device *net_dev = bcm->net_dev; | 3222 | struct net_device *net_dev = bcm->net_dev; |
3222 | unsigned long flags; | 3223 | unsigned long flags; |
3223 | u32 savedirqs = 0; | 3224 | u32 savedirqs = 0; |
@@ -3279,11 +3280,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) | |||
3279 | 3280 | ||
3280 | void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) | 3281 | void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) |
3281 | { | 3282 | { |
3282 | struct work_struct *work = &(bcm->periodic_work); | 3283 | struct delayed_work *work = &bcm->periodic_work; |
3283 | 3284 | ||
3284 | assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); | 3285 | assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); |
3285 | INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); | 3286 | INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler); |
3286 | schedule_work(work); | 3287 | schedule_delayed_work(work, 0); |
3287 | } | 3288 | } |
3288 | 3289 | ||
3289 | static void bcm43xx_security_init(struct bcm43xx_private *bcm) | 3290 | static void bcm43xx_security_init(struct bcm43xx_private *bcm) |
@@ -3635,7 +3636,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) | |||
3635 | bcm43xx_periodic_tasks_setup(bcm); | 3636 | bcm43xx_periodic_tasks_setup(bcm); |
3636 | 3637 | ||
3637 | /*FIXME: This should be handled by softmac instead. */ | 3638 | /*FIXME: This should be handled by softmac instead. */ |
3638 | schedule_work(&bcm->softmac->associnfo.work); | 3639 | schedule_delayed_work(&bcm->softmac->associnfo.work, 0); |
3639 | 3640 | ||
3640 | out: | 3641 | out: |
3641 | mutex_unlock(&(bcm)->mutex); | 3642 | mutex_unlock(&(bcm)->mutex); |
@@ -4182,9 +4183,10 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev) | |||
4182 | /* Hard-reset the chip. Do not call this directly. | 4183 | /* Hard-reset the chip. Do not call this directly. |
4183 | * Use bcm43xx_controller_restart() | 4184 | * Use bcm43xx_controller_restart() |
4184 | */ | 4185 | */ |
4185 | static void bcm43xx_chip_reset(void *_bcm) | 4186 | static void bcm43xx_chip_reset(struct work_struct *work) |
4186 | { | 4187 | { |
4187 | struct bcm43xx_private *bcm = _bcm; | 4188 | struct bcm43xx_private *bcm = |
4189 | container_of(work, struct bcm43xx_private, restart_work); | ||
4188 | struct bcm43xx_phyinfo *phy; | 4190 | struct bcm43xx_phyinfo *phy; |
4189 | int err = -ENODEV; | 4191 | int err = -ENODEV; |
4190 | 4192 | ||
@@ -4211,7 +4213,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason) | |||
4211 | if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) | 4213 | if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) |
4212 | return; | 4214 | return; |
4213 | printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); | 4215 | printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); |
4214 | INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); | 4216 | INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset); |
4215 | schedule_work(&bcm->restart_work); | 4217 | schedule_work(&bcm->restart_work); |
4216 | } | 4218 | } |
4217 | 4219 | ||
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h index e663518bd57..e89c890d16f 100644 --- a/drivers/net/wireless/hostap/hostap.h +++ b/drivers/net/wireless/hostap/hostap.h | |||
@@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc); | |||
35 | struct net_device_stats *hostap_get_stats(struct net_device *dev); | 35 | struct net_device_stats *hostap_get_stats(struct net_device *dev); |
36 | void hostap_setup_dev(struct net_device *dev, local_info_t *local, | 36 | void hostap_setup_dev(struct net_device *dev, local_info_t *local, |
37 | int main_dev); | 37 | int main_dev); |
38 | void hostap_set_multicast_list_queue(void *data); | 38 | void hostap_set_multicast_list_queue(struct work_struct *work); |
39 | int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); | 39 | int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); |
40 | int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); | 40 | int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); |
41 | void hostap_cleanup(local_info_t *local); | 41 | void hostap_cleanup(local_info_t *local); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index ba13125024c..08bc57a4b89 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs " | |||
49 | static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); | 49 | static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); |
50 | static void hostap_event_expired_sta(struct net_device *dev, | 50 | static void hostap_event_expired_sta(struct net_device *dev, |
51 | struct sta_info *sta); | 51 | struct sta_info *sta); |
52 | static void handle_add_proc_queue(void *data); | 52 | static void handle_add_proc_queue(struct work_struct *work); |
53 | 53 | ||
54 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 54 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
55 | static void handle_wds_oper_queue(void *data); | 55 | static void handle_wds_oper_queue(struct work_struct *work); |
56 | static void prism2_send_mgmt(struct net_device *dev, | 56 | static void prism2_send_mgmt(struct net_device *dev, |
57 | u16 type_subtype, char *body, | 57 | u16 type_subtype, char *body, |
58 | int body_len, u8 *addr, u16 tx_cb_idx); | 58 | int body_len, u8 *addr, u16 tx_cb_idx); |
@@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local) | |||
807 | INIT_LIST_HEAD(&ap->sta_list); | 807 | INIT_LIST_HEAD(&ap->sta_list); |
808 | 808 | ||
809 | /* Initialize task queue structure for AP management */ | 809 | /* Initialize task queue structure for AP management */ |
810 | INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); | 810 | INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); |
811 | 811 | ||
812 | ap->tx_callback_idx = | 812 | ap->tx_callback_idx = |
813 | hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); | 813 | hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); |
@@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local) | |||
815 | printk(KERN_WARNING "%s: failed to register TX callback for " | 815 | printk(KERN_WARNING "%s: failed to register TX callback for " |
816 | "AP\n", local->dev->name); | 816 | "AP\n", local->dev->name); |
817 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 817 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
818 | INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); | 818 | INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); |
819 | 819 | ||
820 | ap->tx_callback_auth = | 820 | ap->tx_callback_auth = |
821 | hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); | 821 | hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); |
@@ -1062,9 +1062,10 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | 1064 | ||
1065 | static void handle_add_proc_queue(void *data) | 1065 | static void handle_add_proc_queue(struct work_struct *work) |
1066 | { | 1066 | { |
1067 | struct ap_data *ap = (struct ap_data *) data; | 1067 | struct ap_data *ap = container_of(work, struct ap_data, |
1068 | add_sta_proc_queue); | ||
1068 | struct sta_info *sta; | 1069 | struct sta_info *sta; |
1069 | char name[20]; | 1070 | char name[20]; |
1070 | struct add_sta_proc_data *entry, *prev; | 1071 | struct add_sta_proc_data *entry, *prev; |
@@ -1952,9 +1953,11 @@ static void handle_pspoll(local_info_t *local, | |||
1952 | 1953 | ||
1953 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 1954 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
1954 | 1955 | ||
1955 | static void handle_wds_oper_queue(void *data) | 1956 | static void handle_wds_oper_queue(struct work_struct *work) |
1956 | { | 1957 | { |
1957 | local_info_t *local = data; | 1958 | struct ap_data *ap = container_of(work, struct ap_data, |
1959 | wds_oper_queue); | ||
1960 | local_info_t *local = ap->local; | ||
1958 | struct wds_oper_data *entry, *prev; | 1961 | struct wds_oper_data *entry, *prev; |
1959 | 1962 | ||
1960 | spin_lock_bh(&local->lock); | 1963 | spin_lock_bh(&local->lock); |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index ed00ebb6e7f..c19e68636a1 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -1645,9 +1645,9 @@ static void prism2_schedule_reset(local_info_t *local) | |||
1645 | 1645 | ||
1646 | /* Called only as scheduled task after noticing card timeout in interrupt | 1646 | /* Called only as scheduled task after noticing card timeout in interrupt |
1647 | * context */ | 1647 | * context */ |
1648 | static void handle_reset_queue(void *data) | 1648 | static void handle_reset_queue(struct work_struct *work) |
1649 | { | 1649 | { |
1650 | local_info_t *local = (local_info_t *) data; | 1650 | local_info_t *local = container_of(work, local_info_t, reset_queue); |
1651 | 1651 | ||
1652 | printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); | 1652 | printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); |
1653 | prism2_hw_reset(local->dev); | 1653 | prism2_hw_reset(local->dev); |
@@ -2896,9 +2896,10 @@ static void hostap_passive_scan(unsigned long data) | |||
2896 | 2896 | ||
2897 | /* Called only as a scheduled task when communications quality values should | 2897 | /* Called only as a scheduled task when communications quality values should |
2898 | * be updated. */ | 2898 | * be updated. */ |
2899 | static void handle_comms_qual_update(void *data) | 2899 | static void handle_comms_qual_update(struct work_struct *work) |
2900 | { | 2900 | { |
2901 | local_info_t *local = data; | 2901 | local_info_t *local = |
2902 | container_of(work, local_info_t, comms_qual_update); | ||
2902 | prism2_update_comms_qual(local->dev); | 2903 | prism2_update_comms_qual(local->dev); |
2903 | } | 2904 | } |
2904 | 2905 | ||
@@ -3050,9 +3051,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set) | |||
3050 | } | 3051 | } |
3051 | 3052 | ||
3052 | 3053 | ||
3053 | static void handle_set_tim_queue(void *data) | 3054 | static void handle_set_tim_queue(struct work_struct *work) |
3054 | { | 3055 | { |
3055 | local_info_t *local = (local_info_t *) data; | 3056 | local_info_t *local = container_of(work, local_info_t, set_tim_queue); |
3056 | struct set_tim_data *entry; | 3057 | struct set_tim_data *entry; |
3057 | u16 val; | 3058 | u16 val; |
3058 | 3059 | ||
@@ -3209,15 +3210,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, | |||
3209 | local->scan_channel_mask = 0xffff; | 3210 | local->scan_channel_mask = 0xffff; |
3210 | 3211 | ||
3211 | /* Initialize task queue structures */ | 3212 | /* Initialize task queue structures */ |
3212 | INIT_WORK(&local->reset_queue, handle_reset_queue, local); | 3213 | INIT_WORK(&local->reset_queue, handle_reset_queue); |
3213 | INIT_WORK(&local->set_multicast_list_queue, | 3214 | INIT_WORK(&local->set_multicast_list_queue, |
3214 | hostap_set_multicast_list_queue, local->dev); | 3215 | hostap_set_multicast_list_queue); |
3215 | 3216 | ||
3216 | INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); | 3217 | INIT_WORK(&local->set_tim_queue, handle_set_tim_queue); |
3217 | INIT_LIST_HEAD(&local->set_tim_list); | 3218 | INIT_LIST_HEAD(&local->set_tim_list); |
3218 | spin_lock_init(&local->set_tim_lock); | 3219 | spin_lock_init(&local->set_tim_lock); |
3219 | 3220 | ||
3220 | INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); | 3221 | INIT_WORK(&local->comms_qual_update, handle_comms_qual_update); |
3221 | 3222 | ||
3222 | /* Initialize tasklets for handling hardware IRQ related operations | 3223 | /* Initialize tasklets for handling hardware IRQ related operations |
3223 | * outside hw IRQ handler */ | 3224 | * outside hw IRQ handler */ |
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index 50f72d831cf..5fd2b1ad7f5 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c | |||
@@ -474,9 +474,9 @@ static void handle_info_queue_scanresults(local_info_t *local) | |||
474 | 474 | ||
475 | /* Called only as scheduled task after receiving info frames (used to avoid | 475 | /* Called only as scheduled task after receiving info frames (used to avoid |
476 | * pending too much time in HW IRQ handler). */ | 476 | * pending too much time in HW IRQ handler). */ |
477 | static void handle_info_queue(void *data) | 477 | static void handle_info_queue(struct work_struct *work) |
478 | { | 478 | { |
479 | local_info_t *local = (local_info_t *) data; | 479 | local_info_t *local = container_of(work, local_info_t, info_queue); |
480 | 480 | ||
481 | if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, | 481 | if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, |
482 | &local->pending_info)) | 482 | &local->pending_info)) |
@@ -493,7 +493,7 @@ void hostap_info_init(local_info_t *local) | |||
493 | { | 493 | { |
494 | skb_queue_head_init(&local->info_list); | 494 | skb_queue_head_init(&local->info_list); |
495 | #ifndef PRISM2_NO_STATION_MODES | 495 | #ifndef PRISM2_NO_STATION_MODES |
496 | INIT_WORK(&local->info_queue, handle_info_queue, local); | 496 | INIT_WORK(&local->info_queue, handle_info_queue); |
497 | #endif /* PRISM2_NO_STATION_MODES */ | 497 | #endif /* PRISM2_NO_STATION_MODES */ |
498 | } | 498 | } |
499 | 499 | ||
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 53374fcba77..0796be9d9e7 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p) | |||
767 | 767 | ||
768 | /* TODO: to be further implemented as soon as Prism2 fully supports | 768 | /* TODO: to be further implemented as soon as Prism2 fully supports |
769 | * GroupAddresses and correct documentation is available */ | 769 | * GroupAddresses and correct documentation is available */ |
770 | void hostap_set_multicast_list_queue(void *data) | 770 | void hostap_set_multicast_list_queue(struct work_struct *work) |
771 | { | 771 | { |
772 | struct net_device *dev = (struct net_device *) data; | 772 | local_info_t *local = |
773 | container_of(work, local_info_t, set_multicast_list_queue); | ||
774 | struct net_device *dev = local->dev; | ||
773 | struct hostap_interface *iface; | 775 | struct hostap_interface *iface; |
774 | local_info_t *local; | ||
775 | 776 | ||
776 | iface = netdev_priv(dev); | 777 | iface = netdev_priv(dev); |
777 | local = iface->local; | ||
778 | if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, | 778 | if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, |
779 | local->is_promisc)) { | 779 | local->is_promisc)) { |
780 | printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", | 780 | printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 79607b8b877..1bcd352a813 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv, | |||
316 | struct ipw2100_fw *fw); | 316 | struct ipw2100_fw *fw); |
317 | static int ipw2100_ucode_download(struct ipw2100_priv *priv, | 317 | static int ipw2100_ucode_download(struct ipw2100_priv *priv, |
318 | struct ipw2100_fw *fw); | 318 | struct ipw2100_fw *fw); |
319 | static void ipw2100_wx_event_work(struct ipw2100_priv *priv); | 319 | static void ipw2100_wx_event_work(struct work_struct *work); |
320 | static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); | 320 | static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); |
321 | static struct iw_handler_def ipw2100_wx_handler_def; | 321 | static struct iw_handler_def ipw2100_wx_handler_def; |
322 | 322 | ||
@@ -679,7 +679,8 @@ static void schedule_reset(struct ipw2100_priv *priv) | |||
679 | queue_delayed_work(priv->workqueue, &priv->reset_work, | 679 | queue_delayed_work(priv->workqueue, &priv->reset_work, |
680 | priv->reset_backoff * HZ); | 680 | priv->reset_backoff * HZ); |
681 | else | 681 | else |
682 | queue_work(priv->workqueue, &priv->reset_work); | 682 | queue_delayed_work(priv->workqueue, &priv->reset_work, |
683 | 0); | ||
683 | 684 | ||
684 | if (priv->reset_backoff < MAX_RESET_BACKOFF) | 685 | if (priv->reset_backoff < MAX_RESET_BACKOFF) |
685 | priv->reset_backoff++; | 686 | priv->reset_backoff++; |
@@ -1873,8 +1874,10 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1873 | netif_stop_queue(priv->net_dev); | 1874 | netif_stop_queue(priv->net_dev); |
1874 | } | 1875 | } |
1875 | 1876 | ||
1876 | static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | 1877 | static void ipw2100_reset_adapter(struct work_struct *work) |
1877 | { | 1878 | { |
1879 | struct ipw2100_priv *priv = | ||
1880 | container_of(work, struct ipw2100_priv, reset_work.work); | ||
1878 | unsigned long flags; | 1881 | unsigned long flags; |
1879 | union iwreq_data wrqu = { | 1882 | union iwreq_data wrqu = { |
1880 | .ap_addr = { | 1883 | .ap_addr = { |
@@ -2071,9 +2074,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) | |||
2071 | return; | 2074 | return; |
2072 | 2075 | ||
2073 | if (priv->status & STATUS_SECURITY_UPDATED) | 2076 | if (priv->status & STATUS_SECURITY_UPDATED) |
2074 | queue_work(priv->workqueue, &priv->security_work); | 2077 | queue_delayed_work(priv->workqueue, &priv->security_work, 0); |
2075 | 2078 | ||
2076 | queue_work(priv->workqueue, &priv->wx_event_work); | 2079 | queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); |
2077 | } | 2080 | } |
2078 | 2081 | ||
2079 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | 2082 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) |
@@ -5524,8 +5527,11 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) | |||
5524 | return err; | 5527 | return err; |
5525 | } | 5528 | } |
5526 | 5529 | ||
5527 | static void ipw2100_security_work(struct ipw2100_priv *priv) | 5530 | static void ipw2100_security_work(struct work_struct *work) |
5528 | { | 5531 | { |
5532 | struct ipw2100_priv *priv = | ||
5533 | container_of(work, struct ipw2100_priv, security_work.work); | ||
5534 | |||
5529 | /* If we happen to have reconnected before we get a chance to | 5535 | /* If we happen to have reconnected before we get a chance to |
5530 | * process this, then update the security settings--which causes | 5536 | * process this, then update the security settings--which causes |
5531 | * a disassociation to occur */ | 5537 | * a disassociation to occur */ |
@@ -5748,7 +5754,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5748 | 5754 | ||
5749 | priv->reset_backoff = 0; | 5755 | priv->reset_backoff = 0; |
5750 | mutex_unlock(&priv->action_mutex); | 5756 | mutex_unlock(&priv->action_mutex); |
5751 | ipw2100_reset_adapter(priv); | 5757 | ipw2100_reset_adapter(&priv->reset_work.work); |
5752 | return 0; | 5758 | return 0; |
5753 | 5759 | ||
5754 | done: | 5760 | done: |
@@ -5910,9 +5916,10 @@ static const struct ethtool_ops ipw2100_ethtool_ops = { | |||
5910 | .get_drvinfo = ipw_ethtool_get_drvinfo, | 5916 | .get_drvinfo = ipw_ethtool_get_drvinfo, |
5911 | }; | 5917 | }; |
5912 | 5918 | ||
5913 | static void ipw2100_hang_check(void *adapter) | 5919 | static void ipw2100_hang_check(struct work_struct *work) |
5914 | { | 5920 | { |
5915 | struct ipw2100_priv *priv = adapter; | 5921 | struct ipw2100_priv *priv = |
5922 | container_of(work, struct ipw2100_priv, hang_check.work); | ||
5916 | unsigned long flags; | 5923 | unsigned long flags; |
5917 | u32 rtc = 0xa5a5a5a5; | 5924 | u32 rtc = 0xa5a5a5a5; |
5918 | u32 len = sizeof(rtc); | 5925 | u32 len = sizeof(rtc); |
@@ -5952,9 +5959,10 @@ static void ipw2100_hang_check(void *adapter) | |||
5952 | spin_unlock_irqrestore(&priv->low_lock, flags); | 5959 | spin_unlock_irqrestore(&priv->low_lock, flags); |
5953 | } | 5960 | } |
5954 | 5961 | ||
5955 | static void ipw2100_rf_kill(void *adapter) | 5962 | static void ipw2100_rf_kill(struct work_struct *work) |
5956 | { | 5963 | { |
5957 | struct ipw2100_priv *priv = adapter; | 5964 | struct ipw2100_priv *priv = |
5965 | container_of(work, struct ipw2100_priv, rf_kill.work); | ||
5958 | unsigned long flags; | 5966 | unsigned long flags; |
5959 | 5967 | ||
5960 | spin_lock_irqsave(&priv->low_lock, flags); | 5968 | spin_lock_irqsave(&priv->low_lock, flags); |
@@ -6103,14 +6111,11 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
6103 | 6111 | ||
6104 | priv->workqueue = create_workqueue(DRV_NAME); | 6112 | priv->workqueue = create_workqueue(DRV_NAME); |
6105 | 6113 | ||
6106 | INIT_WORK(&priv->reset_work, | 6114 | INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); |
6107 | (void (*)(void *))ipw2100_reset_adapter, priv); | 6115 | INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); |
6108 | INIT_WORK(&priv->security_work, | 6116 | INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); |
6109 | (void (*)(void *))ipw2100_security_work, priv); | 6117 | INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); |
6110 | INIT_WORK(&priv->wx_event_work, | 6118 | INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); |
6111 | (void (*)(void *))ipw2100_wx_event_work, priv); | ||
6112 | INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv); | ||
6113 | INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv); | ||
6114 | 6119 | ||
6115 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | 6120 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) |
6116 | ipw2100_irq_tasklet, (unsigned long)priv); | 6121 | ipw2100_irq_tasklet, (unsigned long)priv); |
@@ -8281,8 +8286,10 @@ static struct iw_handler_def ipw2100_wx_handler_def = { | |||
8281 | .get_wireless_stats = ipw2100_wx_wireless_stats, | 8286 | .get_wireless_stats = ipw2100_wx_wireless_stats, |
8282 | }; | 8287 | }; |
8283 | 8288 | ||
8284 | static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | 8289 | static void ipw2100_wx_event_work(struct work_struct *work) |
8285 | { | 8290 | { |
8291 | struct ipw2100_priv *priv = | ||
8292 | container_of(work, struct ipw2100_priv, wx_event_work.work); | ||
8286 | union iwreq_data wrqu; | 8293 | union iwreq_data wrqu; |
8287 | int len = ETH_ALEN; | 8294 | int len = ETH_ALEN; |
8288 | 8295 | ||
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index 55b7227198d..de7d384d38a 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -583,11 +583,11 @@ struct ipw2100_priv { | |||
583 | struct tasklet_struct irq_tasklet; | 583 | struct tasklet_struct irq_tasklet; |
584 | 584 | ||
585 | struct workqueue_struct *workqueue; | 585 | struct workqueue_struct *workqueue; |
586 | struct work_struct reset_work; | 586 | struct delayed_work reset_work; |
587 | struct work_struct security_work; | 587 | struct delayed_work security_work; |
588 | struct work_struct wx_event_work; | 588 | struct delayed_work wx_event_work; |
589 | struct work_struct hang_check; | 589 | struct delayed_work hang_check; |
590 | struct work_struct rf_kill; | 590 | struct delayed_work rf_kill; |
591 | 591 | ||
592 | u32 interrupts; | 592 | u32 interrupts; |
593 | int tx_interrupts; | 593 | int tx_interrupts; |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index c692d01a76c..e82e56bb85e 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); | |||
187 | static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); | 187 | static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); |
188 | static void ipw_rx_queue_replenish(void *); | 188 | static void ipw_rx_queue_replenish(void *); |
189 | static int ipw_up(struct ipw_priv *); | 189 | static int ipw_up(struct ipw_priv *); |
190 | static void ipw_bg_up(void *); | 190 | static void ipw_bg_up(struct work_struct *work); |
191 | static void ipw_down(struct ipw_priv *); | 191 | static void ipw_down(struct ipw_priv *); |
192 | static void ipw_bg_down(void *); | 192 | static void ipw_bg_down(struct work_struct *work); |
193 | static int ipw_config(struct ipw_priv *); | 193 | static int ipw_config(struct ipw_priv *); |
194 | static int init_supported_rates(struct ipw_priv *priv, | 194 | static int init_supported_rates(struct ipw_priv *priv, |
195 | struct ipw_supported_rates *prates); | 195 | struct ipw_supported_rates *prates); |
@@ -862,11 +862,12 @@ static void ipw_led_link_on(struct ipw_priv *priv) | |||
862 | spin_unlock_irqrestore(&priv->lock, flags); | 862 | spin_unlock_irqrestore(&priv->lock, flags); |
863 | } | 863 | } |
864 | 864 | ||
865 | static void ipw_bg_led_link_on(void *data) | 865 | static void ipw_bg_led_link_on(struct work_struct *work) |
866 | { | 866 | { |
867 | struct ipw_priv *priv = data; | 867 | struct ipw_priv *priv = |
868 | container_of(work, struct ipw_priv, led_link_on.work); | ||
868 | mutex_lock(&priv->mutex); | 869 | mutex_lock(&priv->mutex); |
869 | ipw_led_link_on(data); | 870 | ipw_led_link_on(priv); |
870 | mutex_unlock(&priv->mutex); | 871 | mutex_unlock(&priv->mutex); |
871 | } | 872 | } |
872 | 873 | ||
@@ -906,11 +907,12 @@ static void ipw_led_link_off(struct ipw_priv *priv) | |||
906 | spin_unlock_irqrestore(&priv->lock, flags); | 907 | spin_unlock_irqrestore(&priv->lock, flags); |
907 | } | 908 | } |
908 | 909 | ||
909 | static void ipw_bg_led_link_off(void *data) | 910 | static void ipw_bg_led_link_off(struct work_struct *work) |
910 | { | 911 | { |
911 | struct ipw_priv *priv = data; | 912 | struct ipw_priv *priv = |
913 | container_of(work, struct ipw_priv, led_link_off.work); | ||
912 | mutex_lock(&priv->mutex); | 914 | mutex_lock(&priv->mutex); |
913 | ipw_led_link_off(data); | 915 | ipw_led_link_off(priv); |
914 | mutex_unlock(&priv->mutex); | 916 | mutex_unlock(&priv->mutex); |
915 | } | 917 | } |
916 | 918 | ||
@@ -985,11 +987,12 @@ static void ipw_led_activity_off(struct ipw_priv *priv) | |||
985 | spin_unlock_irqrestore(&priv->lock, flags); | 987 | spin_unlock_irqrestore(&priv->lock, flags); |
986 | } | 988 | } |
987 | 989 | ||
988 | static void ipw_bg_led_activity_off(void *data) | 990 | static void ipw_bg_led_activity_off(struct work_struct *work) |
989 | { | 991 | { |
990 | struct ipw_priv *priv = data; | 992 | struct ipw_priv *priv = |
993 | container_of(work, struct ipw_priv, led_act_off.work); | ||
991 | mutex_lock(&priv->mutex); | 994 | mutex_lock(&priv->mutex); |
992 | ipw_led_activity_off(data); | 995 | ipw_led_activity_off(priv); |
993 | mutex_unlock(&priv->mutex); | 996 | mutex_unlock(&priv->mutex); |
994 | } | 997 | } |
995 | 998 | ||
@@ -2228,11 +2231,12 @@ static void ipw_adapter_restart(void *adapter) | |||
2228 | } | 2231 | } |
2229 | } | 2232 | } |
2230 | 2233 | ||
2231 | static void ipw_bg_adapter_restart(void *data) | 2234 | static void ipw_bg_adapter_restart(struct work_struct *work) |
2232 | { | 2235 | { |
2233 | struct ipw_priv *priv = data; | 2236 | struct ipw_priv *priv = |
2237 | container_of(work, struct ipw_priv, adapter_restart); | ||
2234 | mutex_lock(&priv->mutex); | 2238 | mutex_lock(&priv->mutex); |
2235 | ipw_adapter_restart(data); | 2239 | ipw_adapter_restart(priv); |
2236 | mutex_unlock(&priv->mutex); | 2240 | mutex_unlock(&priv->mutex); |
2237 | } | 2241 | } |
2238 | 2242 | ||
@@ -2249,11 +2253,12 @@ static void ipw_scan_check(void *data) | |||
2249 | } | 2253 | } |
2250 | } | 2254 | } |
2251 | 2255 | ||
2252 | static void ipw_bg_scan_check(void *data) | 2256 | static void ipw_bg_scan_check(struct work_struct *work) |
2253 | { | 2257 | { |
2254 | struct ipw_priv *priv = data; | 2258 | struct ipw_priv *priv = |
2259 | container_of(work, struct ipw_priv, scan_check.work); | ||
2255 | mutex_lock(&priv->mutex); | 2260 | mutex_lock(&priv->mutex); |
2256 | ipw_scan_check(data); | 2261 | ipw_scan_check(priv); |
2257 | mutex_unlock(&priv->mutex); | 2262 | mutex_unlock(&priv->mutex); |
2258 | } | 2263 | } |
2259 | 2264 | ||
@@ -3831,17 +3836,19 @@ static int ipw_disassociate(void *data) | |||
3831 | return 1; | 3836 | return 1; |
3832 | } | 3837 | } |
3833 | 3838 | ||
3834 | static void ipw_bg_disassociate(void *data) | 3839 | static void ipw_bg_disassociate(struct work_struct *work) |
3835 | { | 3840 | { |
3836 | struct ipw_priv *priv = data; | 3841 | struct ipw_priv *priv = |
3842 | container_of(work, struct ipw_priv, disassociate); | ||
3837 | mutex_lock(&priv->mutex); | 3843 | mutex_lock(&priv->mutex); |
3838 | ipw_disassociate(data); | 3844 | ipw_disassociate(priv); |
3839 | mutex_unlock(&priv->mutex); | 3845 | mutex_unlock(&priv->mutex); |
3840 | } | 3846 | } |
3841 | 3847 | ||
3842 | static void ipw_system_config(void *data) | 3848 | static void ipw_system_config(struct work_struct *work) |
3843 | { | 3849 | { |
3844 | struct ipw_priv *priv = data; | 3850 | struct ipw_priv *priv = |
3851 | container_of(work, struct ipw_priv, system_config); | ||
3845 | 3852 | ||
3846 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 3853 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
3847 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { | 3854 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { |
@@ -4208,11 +4215,12 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4208 | IPW_STATS_INTERVAL); | 4215 | IPW_STATS_INTERVAL); |
4209 | } | 4216 | } |
4210 | 4217 | ||
4211 | static void ipw_bg_gather_stats(void *data) | 4218 | static void ipw_bg_gather_stats(struct work_struct *work) |
4212 | { | 4219 | { |
4213 | struct ipw_priv *priv = data; | 4220 | struct ipw_priv *priv = |
4221 | container_of(work, struct ipw_priv, gather_stats.work); | ||
4214 | mutex_lock(&priv->mutex); | 4222 | mutex_lock(&priv->mutex); |
4215 | ipw_gather_stats(data); | 4223 | ipw_gather_stats(priv); |
4216 | mutex_unlock(&priv->mutex); | 4224 | mutex_unlock(&priv->mutex); |
4217 | } | 4225 | } |
4218 | 4226 | ||
@@ -4268,8 +4276,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4268 | if (!(priv->status & STATUS_ROAMING)) { | 4276 | if (!(priv->status & STATUS_ROAMING)) { |
4269 | priv->status |= STATUS_ROAMING; | 4277 | priv->status |= STATUS_ROAMING; |
4270 | if (!(priv->status & STATUS_SCANNING)) | 4278 | if (!(priv->status & STATUS_SCANNING)) |
4271 | queue_work(priv->workqueue, | 4279 | queue_delayed_work(priv->workqueue, |
4272 | &priv->request_scan); | 4280 | &priv->request_scan, 0); |
4273 | } | 4281 | } |
4274 | return; | 4282 | return; |
4275 | } | 4283 | } |
@@ -4607,8 +4615,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4607 | #ifdef CONFIG_IPW2200_MONITOR | 4615 | #ifdef CONFIG_IPW2200_MONITOR |
4608 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 4616 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
4609 | priv->status |= STATUS_SCAN_FORCED; | 4617 | priv->status |= STATUS_SCAN_FORCED; |
4610 | queue_work(priv->workqueue, | 4618 | queue_delayed_work(priv->workqueue, |
4611 | &priv->request_scan); | 4619 | &priv->request_scan, 0); |
4612 | break; | 4620 | break; |
4613 | } | 4621 | } |
4614 | priv->status &= ~STATUS_SCAN_FORCED; | 4622 | priv->status &= ~STATUS_SCAN_FORCED; |
@@ -4631,8 +4639,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4631 | /* Don't schedule if we aborted the scan */ | 4639 | /* Don't schedule if we aborted the scan */ |
4632 | priv->status &= ~STATUS_ROAMING; | 4640 | priv->status &= ~STATUS_ROAMING; |
4633 | } else if (priv->status & STATUS_SCAN_PENDING) | 4641 | } else if (priv->status & STATUS_SCAN_PENDING) |
4634 | queue_work(priv->workqueue, | 4642 | queue_delayed_work(priv->workqueue, |
4635 | &priv->request_scan); | 4643 | &priv->request_scan, 0); |
4636 | else if (priv->config & CFG_BACKGROUND_SCAN | 4644 | else if (priv->config & CFG_BACKGROUND_SCAN |
4637 | && priv->status & STATUS_ASSOCIATED) | 4645 | && priv->status & STATUS_ASSOCIATED) |
4638 | queue_delayed_work(priv->workqueue, | 4646 | queue_delayed_work(priv->workqueue, |
@@ -5055,11 +5063,12 @@ static void ipw_rx_queue_replenish(void *data) | |||
5055 | ipw_rx_queue_restock(priv); | 5063 | ipw_rx_queue_restock(priv); |
5056 | } | 5064 | } |
5057 | 5065 | ||
5058 | static void ipw_bg_rx_queue_replenish(void *data) | 5066 | static void ipw_bg_rx_queue_replenish(struct work_struct *work) |
5059 | { | 5067 | { |
5060 | struct ipw_priv *priv = data; | 5068 | struct ipw_priv *priv = |
5069 | container_of(work, struct ipw_priv, rx_replenish); | ||
5061 | mutex_lock(&priv->mutex); | 5070 | mutex_lock(&priv->mutex); |
5062 | ipw_rx_queue_replenish(data); | 5071 | ipw_rx_queue_replenish(priv); |
5063 | mutex_unlock(&priv->mutex); | 5072 | mutex_unlock(&priv->mutex); |
5064 | } | 5073 | } |
5065 | 5074 | ||
@@ -5489,9 +5498,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, | |||
5489 | return 1; | 5498 | return 1; |
5490 | } | 5499 | } |
5491 | 5500 | ||
5492 | static void ipw_merge_adhoc_network(void *data) | 5501 | static void ipw_merge_adhoc_network(struct work_struct *work) |
5493 | { | 5502 | { |
5494 | struct ipw_priv *priv = data; | 5503 | struct ipw_priv *priv = |
5504 | container_of(work, struct ipw_priv, merge_networks); | ||
5495 | struct ieee80211_network *network = NULL; | 5505 | struct ieee80211_network *network = NULL; |
5496 | struct ipw_network_match match = { | 5506 | struct ipw_network_match match = { |
5497 | .network = priv->assoc_network | 5507 | .network = priv->assoc_network |
@@ -5948,11 +5958,12 @@ static void ipw_adhoc_check(void *data) | |||
5948 | priv->assoc_request.beacon_interval); | 5958 | priv->assoc_request.beacon_interval); |
5949 | } | 5959 | } |
5950 | 5960 | ||
5951 | static void ipw_bg_adhoc_check(void *data) | 5961 | static void ipw_bg_adhoc_check(struct work_struct *work) |
5952 | { | 5962 | { |
5953 | struct ipw_priv *priv = data; | 5963 | struct ipw_priv *priv = |
5964 | container_of(work, struct ipw_priv, adhoc_check.work); | ||
5954 | mutex_lock(&priv->mutex); | 5965 | mutex_lock(&priv->mutex); |
5955 | ipw_adhoc_check(data); | 5966 | ipw_adhoc_check(priv); |
5956 | mutex_unlock(&priv->mutex); | 5967 | mutex_unlock(&priv->mutex); |
5957 | } | 5968 | } |
5958 | 5969 | ||
@@ -6299,19 +6310,26 @@ done: | |||
6299 | return err; | 6310 | return err; |
6300 | } | 6311 | } |
6301 | 6312 | ||
6302 | static int ipw_request_passive_scan(struct ipw_priv *priv) { | 6313 | static void ipw_request_passive_scan(struct work_struct *work) |
6303 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | 6314 | { |
6315 | struct ipw_priv *priv = | ||
6316 | container_of(work, struct ipw_priv, request_passive_scan); | ||
6317 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | ||
6304 | } | 6318 | } |
6305 | 6319 | ||
6306 | static int ipw_request_scan(struct ipw_priv *priv) { | 6320 | static void ipw_request_scan(struct work_struct *work) |
6307 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | 6321 | { |
6322 | struct ipw_priv *priv = | ||
6323 | container_of(work, struct ipw_priv, request_scan.work); | ||
6324 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | ||
6308 | } | 6325 | } |
6309 | 6326 | ||
6310 | static void ipw_bg_abort_scan(void *data) | 6327 | static void ipw_bg_abort_scan(struct work_struct *work) |
6311 | { | 6328 | { |
6312 | struct ipw_priv *priv = data; | 6329 | struct ipw_priv *priv = |
6330 | container_of(work, struct ipw_priv, abort_scan); | ||
6313 | mutex_lock(&priv->mutex); | 6331 | mutex_lock(&priv->mutex); |
6314 | ipw_abort_scan(data); | 6332 | ipw_abort_scan(priv); |
6315 | mutex_unlock(&priv->mutex); | 6333 | mutex_unlock(&priv->mutex); |
6316 | } | 6334 | } |
6317 | 6335 | ||
@@ -7084,9 +7102,10 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, | |||
7084 | /* | 7102 | /* |
7085 | * background support to run QoS activate functionality | 7103 | * background support to run QoS activate functionality |
7086 | */ | 7104 | */ |
7087 | static void ipw_bg_qos_activate(void *data) | 7105 | static void ipw_bg_qos_activate(struct work_struct *work) |
7088 | { | 7106 | { |
7089 | struct ipw_priv *priv = data; | 7107 | struct ipw_priv *priv = |
7108 | container_of(work, struct ipw_priv, qos_activate); | ||
7090 | 7109 | ||
7091 | if (priv == NULL) | 7110 | if (priv == NULL) |
7092 | return; | 7111 | return; |
@@ -7394,11 +7413,12 @@ static void ipw_roam(void *data) | |||
7394 | priv->status &= ~STATUS_ROAMING; | 7413 | priv->status &= ~STATUS_ROAMING; |
7395 | } | 7414 | } |
7396 | 7415 | ||
7397 | static void ipw_bg_roam(void *data) | 7416 | static void ipw_bg_roam(struct work_struct *work) |
7398 | { | 7417 | { |
7399 | struct ipw_priv *priv = data; | 7418 | struct ipw_priv *priv = |
7419 | container_of(work, struct ipw_priv, roam); | ||
7400 | mutex_lock(&priv->mutex); | 7420 | mutex_lock(&priv->mutex); |
7401 | ipw_roam(data); | 7421 | ipw_roam(priv); |
7402 | mutex_unlock(&priv->mutex); | 7422 | mutex_unlock(&priv->mutex); |
7403 | } | 7423 | } |
7404 | 7424 | ||
@@ -7479,8 +7499,8 @@ static int ipw_associate(void *data) | |||
7479 | &priv->request_scan, | 7499 | &priv->request_scan, |
7480 | SCAN_INTERVAL); | 7500 | SCAN_INTERVAL); |
7481 | else | 7501 | else |
7482 | queue_work(priv->workqueue, | 7502 | queue_delayed_work(priv->workqueue, |
7483 | &priv->request_scan); | 7503 | &priv->request_scan, 0); |
7484 | } | 7504 | } |
7485 | 7505 | ||
7486 | return 0; | 7506 | return 0; |
@@ -7491,11 +7511,12 @@ static int ipw_associate(void *data) | |||
7491 | return 1; | 7511 | return 1; |
7492 | } | 7512 | } |
7493 | 7513 | ||
7494 | static void ipw_bg_associate(void *data) | 7514 | static void ipw_bg_associate(struct work_struct *work) |
7495 | { | 7515 | { |
7496 | struct ipw_priv *priv = data; | 7516 | struct ipw_priv *priv = |
7517 | container_of(work, struct ipw_priv, associate); | ||
7497 | mutex_lock(&priv->mutex); | 7518 | mutex_lock(&priv->mutex); |
7498 | ipw_associate(data); | 7519 | ipw_associate(priv); |
7499 | mutex_unlock(&priv->mutex); | 7520 | mutex_unlock(&priv->mutex); |
7500 | } | 7521 | } |
7501 | 7522 | ||
@@ -9410,7 +9431,7 @@ static int ipw_wx_set_scan(struct net_device *dev, | |||
9410 | 9431 | ||
9411 | IPW_DEBUG_WX("Start scan\n"); | 9432 | IPW_DEBUG_WX("Start scan\n"); |
9412 | 9433 | ||
9413 | queue_work(priv->workqueue, &priv->request_scan); | 9434 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); |
9414 | 9435 | ||
9415 | return 0; | 9436 | return 0; |
9416 | } | 9437 | } |
@@ -10547,11 +10568,12 @@ static void ipw_rf_kill(void *adapter) | |||
10547 | spin_unlock_irqrestore(&priv->lock, flags); | 10568 | spin_unlock_irqrestore(&priv->lock, flags); |
10548 | } | 10569 | } |
10549 | 10570 | ||
10550 | static void ipw_bg_rf_kill(void *data) | 10571 | static void ipw_bg_rf_kill(struct work_struct *work) |
10551 | { | 10572 | { |
10552 | struct ipw_priv *priv = data; | 10573 | struct ipw_priv *priv = |
10574 | container_of(work, struct ipw_priv, rf_kill.work); | ||
10553 | mutex_lock(&priv->mutex); | 10575 | mutex_lock(&priv->mutex); |
10554 | ipw_rf_kill(data); | 10576 | ipw_rf_kill(priv); |
10555 | mutex_unlock(&priv->mutex); | 10577 | mutex_unlock(&priv->mutex); |
10556 | } | 10578 | } |
10557 | 10579 | ||
@@ -10582,11 +10604,12 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10582 | queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); | 10604 | queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); |
10583 | } | 10605 | } |
10584 | 10606 | ||
10585 | static void ipw_bg_link_up(void *data) | 10607 | static void ipw_bg_link_up(struct work_struct *work) |
10586 | { | 10608 | { |
10587 | struct ipw_priv *priv = data; | 10609 | struct ipw_priv *priv = |
10610 | container_of(work, struct ipw_priv, link_up); | ||
10588 | mutex_lock(&priv->mutex); | 10611 | mutex_lock(&priv->mutex); |
10589 | ipw_link_up(data); | 10612 | ipw_link_up(priv); |
10590 | mutex_unlock(&priv->mutex); | 10613 | mutex_unlock(&priv->mutex); |
10591 | } | 10614 | } |
10592 | 10615 | ||
@@ -10606,15 +10629,16 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10606 | 10629 | ||
10607 | if (!(priv->status & STATUS_EXIT_PENDING)) { | 10630 | if (!(priv->status & STATUS_EXIT_PENDING)) { |
10608 | /* Queue up another scan... */ | 10631 | /* Queue up another scan... */ |
10609 | queue_work(priv->workqueue, &priv->request_scan); | 10632 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); |
10610 | } | 10633 | } |
10611 | } | 10634 | } |
10612 | 10635 | ||
10613 | static void ipw_bg_link_down(void *data) | 10636 | static void ipw_bg_link_down(struct work_struct *work) |
10614 | { | 10637 | { |
10615 | struct ipw_priv *priv = data; | 10638 | struct ipw_priv *priv = |
10639 | container_of(work, struct ipw_priv, link_down); | ||
10616 | mutex_lock(&priv->mutex); | 10640 | mutex_lock(&priv->mutex); |
10617 | ipw_link_down(data); | 10641 | ipw_link_down(priv); |
10618 | mutex_unlock(&priv->mutex); | 10642 | mutex_unlock(&priv->mutex); |
10619 | } | 10643 | } |
10620 | 10644 | ||
@@ -10626,38 +10650,30 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10626 | init_waitqueue_head(&priv->wait_command_queue); | 10650 | init_waitqueue_head(&priv->wait_command_queue); |
10627 | init_waitqueue_head(&priv->wait_state); | 10651 | init_waitqueue_head(&priv->wait_state); |
10628 | 10652 | ||
10629 | INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); | 10653 | INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); |
10630 | INIT_WORK(&priv->associate, ipw_bg_associate, priv); | 10654 | INIT_WORK(&priv->associate, ipw_bg_associate); |
10631 | INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); | 10655 | INIT_WORK(&priv->disassociate, ipw_bg_disassociate); |
10632 | INIT_WORK(&priv->system_config, ipw_system_config, priv); | 10656 | INIT_WORK(&priv->system_config, ipw_system_config); |
10633 | INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); | 10657 | INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); |
10634 | INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); | 10658 | INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); |
10635 | INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); | 10659 | INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); |
10636 | INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); | 10660 | INIT_WORK(&priv->up, ipw_bg_up); |
10637 | INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); | 10661 | INIT_WORK(&priv->down, ipw_bg_down); |
10638 | INIT_WORK(&priv->request_scan, | 10662 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); |
10639 | (void (*)(void *))ipw_request_scan, priv); | 10663 | INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); |
10640 | INIT_WORK(&priv->request_passive_scan, | 10664 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); |
10641 | (void (*)(void *))ipw_request_passive_scan, priv); | 10665 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); |
10642 | INIT_WORK(&priv->gather_stats, | 10666 | INIT_WORK(&priv->roam, ipw_bg_roam); |
10643 | (void (*)(void *))ipw_bg_gather_stats, priv); | 10667 | INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); |
10644 | INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); | 10668 | INIT_WORK(&priv->link_up, ipw_bg_link_up); |
10645 | INIT_WORK(&priv->roam, ipw_bg_roam, priv); | 10669 | INIT_WORK(&priv->link_down, ipw_bg_link_down); |
10646 | INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); | 10670 | INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); |
10647 | INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); | 10671 | INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); |
10648 | INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); | 10672 | INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); |
10649 | INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, | 10673 | INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); |
10650 | priv); | ||
10651 | INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off, | ||
10652 | priv); | ||
10653 | INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off, | ||
10654 | priv); | ||
10655 | INIT_WORK(&priv->merge_networks, | ||
10656 | (void (*)(void *))ipw_merge_adhoc_network, priv); | ||
10657 | 10674 | ||
10658 | #ifdef CONFIG_IPW2200_QOS | 10675 | #ifdef CONFIG_IPW2200_QOS |
10659 | INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, | 10676 | INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); |
10660 | priv); | ||
10661 | #endif /* CONFIG_IPW2200_QOS */ | 10677 | #endif /* CONFIG_IPW2200_QOS */ |
10662 | 10678 | ||
10663 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | 10679 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) |
@@ -11190,7 +11206,8 @@ static int ipw_up(struct ipw_priv *priv) | |||
11190 | 11206 | ||
11191 | /* If configure to try and auto-associate, kick | 11207 | /* If configure to try and auto-associate, kick |
11192 | * off a scan. */ | 11208 | * off a scan. */ |
11193 | queue_work(priv->workqueue, &priv->request_scan); | 11209 | queue_delayed_work(priv->workqueue, |
11210 | &priv->request_scan, 0); | ||
11194 | 11211 | ||
11195 | return 0; | 11212 | return 0; |
11196 | } | 11213 | } |
@@ -11211,11 +11228,12 @@ static int ipw_up(struct ipw_priv *priv) | |||
11211 | return -EIO; | 11228 | return -EIO; |
11212 | } | 11229 | } |
11213 | 11230 | ||
11214 | static void ipw_bg_up(void *data) | 11231 | static void ipw_bg_up(struct work_struct *work) |
11215 | { | 11232 | { |
11216 | struct ipw_priv *priv = data; | 11233 | struct ipw_priv *priv = |
11234 | container_of(work, struct ipw_priv, up); | ||
11217 | mutex_lock(&priv->mutex); | 11235 | mutex_lock(&priv->mutex); |
11218 | ipw_up(data); | 11236 | ipw_up(priv); |
11219 | mutex_unlock(&priv->mutex); | 11237 | mutex_unlock(&priv->mutex); |
11220 | } | 11238 | } |
11221 | 11239 | ||
@@ -11282,11 +11300,12 @@ static void ipw_down(struct ipw_priv *priv) | |||
11282 | ipw_led_radio_off(priv); | 11300 | ipw_led_radio_off(priv); |
11283 | } | 11301 | } |
11284 | 11302 | ||
11285 | static void ipw_bg_down(void *data) | 11303 | static void ipw_bg_down(struct work_struct *work) |
11286 | { | 11304 | { |
11287 | struct ipw_priv *priv = data; | 11305 | struct ipw_priv *priv = |
11306 | container_of(work, struct ipw_priv, down); | ||
11288 | mutex_lock(&priv->mutex); | 11307 | mutex_lock(&priv->mutex); |
11289 | ipw_down(data); | 11308 | ipw_down(priv); |
11290 | mutex_unlock(&priv->mutex); | 11309 | mutex_unlock(&priv->mutex); |
11291 | } | 11310 | } |
11292 | 11311 | ||
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index dad5eedefbf..626a240a87d 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1290,21 +1290,21 @@ struct ipw_priv { | |||
1290 | 1290 | ||
1291 | struct workqueue_struct *workqueue; | 1291 | struct workqueue_struct *workqueue; |
1292 | 1292 | ||
1293 | struct work_struct adhoc_check; | 1293 | struct delayed_work adhoc_check; |
1294 | struct work_struct associate; | 1294 | struct work_struct associate; |
1295 | struct work_struct disassociate; | 1295 | struct work_struct disassociate; |
1296 | struct work_struct system_config; | 1296 | struct work_struct system_config; |
1297 | struct work_struct rx_replenish; | 1297 | struct work_struct rx_replenish; |
1298 | struct work_struct request_scan; | 1298 | struct delayed_work request_scan; |
1299 | struct work_struct request_passive_scan; | 1299 | struct work_struct request_passive_scan; |
1300 | struct work_struct adapter_restart; | 1300 | struct work_struct adapter_restart; |
1301 | struct work_struct rf_kill; | 1301 | struct delayed_work rf_kill; |
1302 | struct work_struct up; | 1302 | struct work_struct up; |
1303 | struct work_struct down; | 1303 | struct work_struct down; |
1304 | struct work_struct gather_stats; | 1304 | struct delayed_work gather_stats; |
1305 | struct work_struct abort_scan; | 1305 | struct work_struct abort_scan; |
1306 | struct work_struct roam; | 1306 | struct work_struct roam; |
1307 | struct work_struct scan_check; | 1307 | struct delayed_work scan_check; |
1308 | struct work_struct link_up; | 1308 | struct work_struct link_up; |
1309 | struct work_struct link_down; | 1309 | struct work_struct link_down; |
1310 | 1310 | ||
@@ -1319,9 +1319,9 @@ struct ipw_priv { | |||
1319 | u32 led_ofdm_on; | 1319 | u32 led_ofdm_on; |
1320 | u32 led_ofdm_off; | 1320 | u32 led_ofdm_off; |
1321 | 1321 | ||
1322 | struct work_struct led_link_on; | 1322 | struct delayed_work led_link_on; |
1323 | struct work_struct led_link_off; | 1323 | struct delayed_work led_link_off; |
1324 | struct work_struct led_act_off; | 1324 | struct delayed_work led_act_off; |
1325 | struct work_struct merge_networks; | 1325 | struct work_struct merge_networks; |
1326 | 1326 | ||
1327 | struct ipw_cmd_log *cmdlog; | 1327 | struct ipw_cmd_log *cmdlog; |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 336cabac13b..936c888e03e 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -980,9 +980,11 @@ static void print_linkstatus(struct net_device *dev, u16 status) | |||
980 | } | 980 | } |
981 | 981 | ||
982 | /* Search scan results for requested BSSID, join it if found */ | 982 | /* Search scan results for requested BSSID, join it if found */ |
983 | static void orinoco_join_ap(struct net_device *dev) | 983 | static void orinoco_join_ap(struct work_struct *work) |
984 | { | 984 | { |
985 | struct orinoco_private *priv = netdev_priv(dev); | 985 | struct orinoco_private *priv = |
986 | container_of(work, struct orinoco_private, join_work); | ||
987 | struct net_device *dev = priv->ndev; | ||
986 | struct hermes *hw = &priv->hw; | 988 | struct hermes *hw = &priv->hw; |
987 | int err; | 989 | int err; |
988 | unsigned long flags; | 990 | unsigned long flags; |
@@ -1055,9 +1057,11 @@ static void orinoco_join_ap(struct net_device *dev) | |||
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | /* Send new BSSID to userspace */ | 1059 | /* Send new BSSID to userspace */ |
1058 | static void orinoco_send_wevents(struct net_device *dev) | 1060 | static void orinoco_send_wevents(struct work_struct *work) |
1059 | { | 1061 | { |
1060 | struct orinoco_private *priv = netdev_priv(dev); | 1062 | struct orinoco_private *priv = |
1063 | container_of(work, struct orinoco_private, wevent_work); | ||
1064 | struct net_device *dev = priv->ndev; | ||
1061 | struct hermes *hw = &priv->hw; | 1065 | struct hermes *hw = &priv->hw; |
1062 | union iwreq_data wrqu; | 1066 | union iwreq_data wrqu; |
1063 | int err; | 1067 | int err; |
@@ -1864,9 +1868,11 @@ __orinoco_set_multicast_list(struct net_device *dev) | |||
1864 | 1868 | ||
1865 | /* This must be called from user context, without locks held - use | 1869 | /* This must be called from user context, without locks held - use |
1866 | * schedule_work() */ | 1870 | * schedule_work() */ |
1867 | static void orinoco_reset(struct net_device *dev) | 1871 | static void orinoco_reset(struct work_struct *work) |
1868 | { | 1872 | { |
1869 | struct orinoco_private *priv = netdev_priv(dev); | 1873 | struct orinoco_private *priv = |
1874 | container_of(work, struct orinoco_private, reset_work); | ||
1875 | struct net_device *dev = priv->ndev; | ||
1870 | struct hermes *hw = &priv->hw; | 1876 | struct hermes *hw = &priv->hw; |
1871 | int err; | 1877 | int err; |
1872 | unsigned long flags; | 1878 | unsigned long flags; |
@@ -2434,9 +2440,9 @@ struct net_device *alloc_orinocodev(int sizeof_card, | |||
2434 | priv->hw_unavailable = 1; /* orinoco_init() must clear this | 2440 | priv->hw_unavailable = 1; /* orinoco_init() must clear this |
2435 | * before anything else touches the | 2441 | * before anything else touches the |
2436 | * hardware */ | 2442 | * hardware */ |
2437 | INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); | 2443 | INIT_WORK(&priv->reset_work, orinoco_reset); |
2438 | INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); | 2444 | INIT_WORK(&priv->join_work, orinoco_join_ap); |
2439 | INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); | 2445 | INIT_WORK(&priv->wevent_work, orinoco_send_wevents); |
2440 | 2446 | ||
2441 | netif_carrier_off(dev); | 2447 | netif_carrier_off(dev); |
2442 | priv->last_linkstatus = 0xffff; | 2448 | priv->last_linkstatus = 0xffff; |
@@ -3608,7 +3614,7 @@ static int orinoco_ioctl_reset(struct net_device *dev, | |||
3608 | printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); | 3614 | printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); |
3609 | 3615 | ||
3610 | /* Firmware reset */ | 3616 | /* Firmware reset */ |
3611 | orinoco_reset(dev); | 3617 | orinoco_reset(&priv->reset_work); |
3612 | } else { | 3618 | } else { |
3613 | printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); | 3619 | printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); |
3614 | 3620 | ||
@@ -4154,7 +4160,7 @@ static int orinoco_ioctl_commit(struct net_device *dev, | |||
4154 | return 0; | 4160 | return 0; |
4155 | 4161 | ||
4156 | if (priv->broken_disableport) { | 4162 | if (priv->broken_disableport) { |
4157 | orinoco_reset(dev); | 4163 | orinoco_reset(&priv->reset_work); |
4158 | return 0; | 4164 | return 0; |
4159 | } | 4165 | } |
4160 | 4166 | ||
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 4a20e45de3c..a87eb51886c 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c | |||
@@ -157,8 +157,9 @@ prism54_mib_init(islpci_private *priv) | |||
157 | * schedule_work(), thus we can as well use sleeping semaphore | 157 | * schedule_work(), thus we can as well use sleeping semaphore |
158 | * locking */ | 158 | * locking */ |
159 | void | 159 | void |
160 | prism54_update_stats(islpci_private *priv) | 160 | prism54_update_stats(struct work_struct *work) |
161 | { | 161 | { |
162 | islpci_private *priv = container_of(work, islpci_private, stats_work); | ||
162 | char *data; | 163 | char *data; |
163 | int j; | 164 | int j; |
164 | struct obj_bss bss, *bss2; | 165 | struct obj_bss bss, *bss2; |
@@ -2493,9 +2494,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2493 | * interrupt context, no locks held. | 2494 | * interrupt context, no locks held. |
2494 | */ | 2495 | */ |
2495 | void | 2496 | void |
2496 | prism54_process_trap(void *data) | 2497 | prism54_process_trap(struct work_struct *work) |
2497 | { | 2498 | { |
2498 | struct islpci_mgmtframe *frame = data; | 2499 | struct islpci_mgmtframe *frame = |
2500 | container_of(work, struct islpci_mgmtframe, ws); | ||
2499 | struct net_device *ndev = frame->ndev; | 2501 | struct net_device *ndev = frame->ndev; |
2500 | enum oid_num_t n = mgt_oidtonum(frame->header->oid); | 2502 | enum oid_num_t n = mgt_oidtonum(frame->header->oid); |
2501 | 2503 | ||
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h index e8183d30c52..bcfbfb9281d 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/prism54/isl_ioctl.h | |||
@@ -31,12 +31,12 @@ | |||
31 | void prism54_mib_init(islpci_private *); | 31 | void prism54_mib_init(islpci_private *); |
32 | 32 | ||
33 | struct iw_statistics *prism54_get_wireless_stats(struct net_device *); | 33 | struct iw_statistics *prism54_get_wireless_stats(struct net_device *); |
34 | void prism54_update_stats(islpci_private *); | 34 | void prism54_update_stats(struct work_struct *); |
35 | 35 | ||
36 | void prism54_acl_init(struct islpci_acl *); | 36 | void prism54_acl_init(struct islpci_acl *); |
37 | void prism54_acl_clean(struct islpci_acl *); | 37 | void prism54_acl_clean(struct islpci_acl *); |
38 | 38 | ||
39 | void prism54_process_trap(void *); | 39 | void prism54_process_trap(struct work_struct *); |
40 | 40 | ||
41 | void prism54_wpa_bss_ie_init(islpci_private *priv); | 41 | void prism54_wpa_bss_ie_init(islpci_private *priv); |
42 | void prism54_wpa_bss_ie_clean(islpci_private *priv); | 42 | void prism54_wpa_bss_ie_clean(islpci_private *priv); |
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 1e0603ca436..f057fd9fcd7 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c | |||
@@ -860,11 +860,10 @@ islpci_setup(struct pci_dev *pdev) | |||
860 | priv->state_off = 1; | 860 | priv->state_off = 1; |
861 | 861 | ||
862 | /* initialize workqueue's */ | 862 | /* initialize workqueue's */ |
863 | INIT_WORK(&priv->stats_work, | 863 | INIT_WORK(&priv->stats_work, prism54_update_stats); |
864 | (void (*)(void *)) prism54_update_stats, priv); | ||
865 | priv->stats_timestamp = 0; | 864 | priv->stats_timestamp = 0; |
866 | 865 | ||
867 | INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); | 866 | INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); |
868 | priv->reset_task_pending = 0; | 867 | priv->reset_task_pending = 0; |
869 | 868 | ||
870 | /* allocate various memory areas */ | 869 | /* allocate various memory areas */ |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 676d83813dc..b1122912ee2 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv) | |||
480 | } | 480 | } |
481 | 481 | ||
482 | void | 482 | void |
483 | islpci_do_reset_and_wake(void *data) | 483 | islpci_do_reset_and_wake(struct work_struct *work) |
484 | { | 484 | { |
485 | islpci_private *priv = data; | 485 | islpci_private *priv = container_of(work, islpci_private, reset_task); |
486 | 486 | ||
487 | islpci_reset(priv, 1); | 487 | islpci_reset(priv, 1); |
488 | priv->reset_task_pending = 0; | 488 | priv->reset_task_pending = 0; |
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h index 26789454067..5bf820defbd 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/prism54/islpci_eth.h | |||
@@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *); | |||
67 | int islpci_eth_transmit(struct sk_buff *, struct net_device *); | 67 | int islpci_eth_transmit(struct sk_buff *, struct net_device *); |
68 | int islpci_eth_receive(islpci_private *); | 68 | int islpci_eth_receive(islpci_private *); |
69 | void islpci_eth_tx_timeout(struct net_device *); | 69 | void islpci_eth_tx_timeout(struct net_device *); |
70 | void islpci_do_reset_and_wake(void *data); | 70 | void islpci_do_reset_and_wake(struct work_struct *); |
71 | 71 | ||
72 | #endif /* _ISL_GEN_H */ | 72 | #endif /* _ISL_GEN_H */ |
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index 036a875054c..2246f7930b4 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c | |||
@@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev) | |||
386 | 386 | ||
387 | /* Create work to handle trap out of interrupt | 387 | /* Create work to handle trap out of interrupt |
388 | * context. */ | 388 | * context. */ |
389 | INIT_WORK(&frame->ws, prism54_process_trap, frame); | 389 | INIT_WORK(&frame->ws, prism54_process_trap); |
390 | schedule_work(&frame->ws); | 390 | schedule_work(&frame->ws); |
391 | 391 | ||
392 | } else { | 392 | } else { |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 2696f95b927..f1573a9c233 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -32,8 +32,8 @@ | |||
32 | 32 | ||
33 | static void ieee_init(struct ieee80211_device *ieee); | 33 | static void ieee_init(struct ieee80211_device *ieee); |
34 | static void softmac_init(struct ieee80211softmac_device *sm); | 34 | static void softmac_init(struct ieee80211softmac_device *sm); |
35 | static void set_rts_cts_work(void *d); | 35 | static void set_rts_cts_work(struct work_struct *work); |
36 | static void set_basic_rates_work(void *d); | 36 | static void set_basic_rates_work(struct work_struct *work); |
37 | 37 | ||
38 | static void housekeeping_init(struct zd_mac *mac); | 38 | static void housekeeping_init(struct zd_mac *mac); |
39 | static void housekeeping_enable(struct zd_mac *mac); | 39 | static void housekeeping_enable(struct zd_mac *mac); |
@@ -48,8 +48,8 @@ int zd_mac_init(struct zd_mac *mac, | |||
48 | memset(mac, 0, sizeof(*mac)); | 48 | memset(mac, 0, sizeof(*mac)); |
49 | spin_lock_init(&mac->lock); | 49 | spin_lock_init(&mac->lock); |
50 | mac->netdev = netdev; | 50 | mac->netdev = netdev; |
51 | INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac); | 51 | INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); |
52 | INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac); | 52 | INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); |
53 | 53 | ||
54 | ieee_init(ieee); | 54 | ieee_init(ieee); |
55 | softmac_init(ieee80211_priv(netdev)); | 55 | softmac_init(ieee80211_priv(netdev)); |
@@ -366,9 +366,10 @@ static void try_enable_tx(struct zd_mac *mac) | |||
366 | spin_unlock_irqrestore(&mac->lock, flags); | 366 | spin_unlock_irqrestore(&mac->lock, flags); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void set_rts_cts_work(void *d) | 369 | static void set_rts_cts_work(struct work_struct *work) |
370 | { | 370 | { |
371 | struct zd_mac *mac = d; | 371 | struct zd_mac *mac = |
372 | container_of(work, struct zd_mac, set_rts_cts_work.work); | ||
372 | unsigned long flags; | 373 | unsigned long flags; |
373 | u8 rts_rate; | 374 | u8 rts_rate; |
374 | unsigned int short_preamble; | 375 | unsigned int short_preamble; |
@@ -387,9 +388,10 @@ static void set_rts_cts_work(void *d) | |||
387 | try_enable_tx(mac); | 388 | try_enable_tx(mac); |
388 | } | 389 | } |
389 | 390 | ||
390 | static void set_basic_rates_work(void *d) | 391 | static void set_basic_rates_work(struct work_struct *work) |
391 | { | 392 | { |
392 | struct zd_mac *mac = d; | 393 | struct zd_mac *mac = |
394 | container_of(work, struct zd_mac, set_basic_rates_work.work); | ||
393 | unsigned long flags; | 395 | unsigned long flags; |
394 | u16 basic_rates; | 396 | u16 basic_rates; |
395 | 397 | ||
@@ -467,12 +469,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes) | |||
467 | if (need_set_rts_cts && !mac->updating_rts_rate) { | 469 | if (need_set_rts_cts && !mac->updating_rts_rate) { |
468 | mac->updating_rts_rate = 1; | 470 | mac->updating_rts_rate = 1; |
469 | netif_stop_queue(mac->netdev); | 471 | netif_stop_queue(mac->netdev); |
470 | queue_work(zd_workqueue, &mac->set_rts_cts_work); | 472 | queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0); |
471 | } | 473 | } |
472 | if (need_set_rates && !mac->updating_basic_rates) { | 474 | if (need_set_rates && !mac->updating_basic_rates) { |
473 | mac->updating_basic_rates = 1; | 475 | mac->updating_basic_rates = 1; |
474 | netif_stop_queue(mac->netdev); | 476 | netif_stop_queue(mac->netdev); |
475 | queue_work(zd_workqueue, &mac->set_basic_rates_work); | 477 | queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work, |
478 | 0); | ||
476 | } | 479 | } |
477 | spin_unlock_irqrestore(&mac->lock, flags); | 480 | spin_unlock_irqrestore(&mac->lock, flags); |
478 | } | 481 | } |
@@ -1182,9 +1185,10 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) | |||
1182 | 1185 | ||
1183 | #define LINK_LED_WORK_DELAY HZ | 1186 | #define LINK_LED_WORK_DELAY HZ |
1184 | 1187 | ||
1185 | static void link_led_handler(void *p) | 1188 | static void link_led_handler(struct work_struct *work) |
1186 | { | 1189 | { |
1187 | struct zd_mac *mac = p; | 1190 | struct zd_mac *mac = |
1191 | container_of(work, struct zd_mac, housekeeping.link_led_work.work); | ||
1188 | struct zd_chip *chip = &mac->chip; | 1192 | struct zd_chip *chip = &mac->chip; |
1189 | struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); | 1193 | struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); |
1190 | int is_associated; | 1194 | int is_associated; |
@@ -1205,7 +1209,7 @@ static void link_led_handler(void *p) | |||
1205 | 1209 | ||
1206 | static void housekeeping_init(struct zd_mac *mac) | 1210 | static void housekeeping_init(struct zd_mac *mac) |
1207 | { | 1211 | { |
1208 | INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); | 1212 | INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler); |
1209 | } | 1213 | } |
1210 | 1214 | ||
1211 | static void housekeeping_enable(struct zd_mac *mac) | 1215 | static void housekeeping_enable(struct zd_mac *mac) |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 5dcfb251f02..d4e8b870409 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -119,7 +119,7 @@ struct rx_status { | |||
119 | #define ZD_RX_ERROR 0x80 | 119 | #define ZD_RX_ERROR 0x80 |
120 | 120 | ||
121 | struct housekeeping { | 121 | struct housekeeping { |
122 | struct work_struct link_led_work; | 122 | struct delayed_work link_led_work; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | #define ZD_MAC_STATS_BUFFER_SIZE 16 | 125 | #define ZD_MAC_STATS_BUFFER_SIZE 16 |
@@ -133,8 +133,8 @@ struct zd_mac { | |||
133 | struct iw_statistics iw_stats; | 133 | struct iw_statistics iw_stats; |
134 | 134 | ||
135 | struct housekeeping housekeeping; | 135 | struct housekeeping housekeeping; |
136 | struct work_struct set_rts_cts_work; | 136 | struct delayed_work set_rts_cts_work; |
137 | struct work_struct set_basic_rates_work; | 137 | struct delayed_work set_basic_rates_work; |
138 | 138 | ||
139 | unsigned int stats_count; | 139 | unsigned int stats_count; |
140 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 140 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index fc4bc9b94c7..a83c3db7d18 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; | 30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; |
31 | 31 | ||
32 | static void wq_sync_buffer(void *); | 32 | static void wq_sync_buffer(struct work_struct *work); |
33 | 33 | ||
34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | 34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
35 | static int work_enabled; | 35 | static int work_enabled; |
@@ -65,7 +65,7 @@ int alloc_cpu_buffers(void) | |||
65 | b->sample_received = 0; | 65 | b->sample_received = 0; |
66 | b->sample_lost_overflow = 0; | 66 | b->sample_lost_overflow = 0; |
67 | b->cpu = i; | 67 | b->cpu = i; |
68 | INIT_WORK(&b->work, wq_sync_buffer, b); | 68 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
69 | } | 69 | } |
70 | return 0; | 70 | return 0; |
71 | 71 | ||
@@ -282,9 +282,10 @@ void oprofile_add_trace(unsigned long pc) | |||
282 | * By using schedule_delayed_work_on and then schedule_delayed_work | 282 | * By using schedule_delayed_work_on and then schedule_delayed_work |
283 | * we guarantee this will stay on the correct cpu | 283 | * we guarantee this will stay on the correct cpu |
284 | */ | 284 | */ |
285 | static void wq_sync_buffer(void * data) | 285 | static void wq_sync_buffer(struct work_struct *work) |
286 | { | 286 | { |
287 | struct oprofile_cpu_buffer * b = data; | 287 | struct oprofile_cpu_buffer * b = |
288 | container_of(work, struct oprofile_cpu_buffer, work.work); | ||
288 | if (b->cpu != smp_processor_id()) { | 289 | if (b->cpu != smp_processor_id()) { |
289 | printk("WQ on CPU%d, prefer CPU%d\n", | 290 | printk("WQ on CPU%d, prefer CPU%d\n", |
290 | smp_processor_id(), b->cpu); | 291 | smp_processor_id(), b->cpu); |
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 09abb80e057..49900d9e323 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -43,7 +43,7 @@ struct oprofile_cpu_buffer { | |||
43 | unsigned long sample_lost_overflow; | 43 | unsigned long sample_lost_overflow; |
44 | unsigned long backtrace_aborted; | 44 | unsigned long backtrace_aborted; |
45 | int cpu; | 45 | int cpu; |
46 | struct work_struct work; | 46 | struct delayed_work work; |
47 | } ____cacheline_aligned; | 47 | } ____cacheline_aligned; |
48 | 48 | ||
49 | extern struct oprofile_cpu_buffer cpu_buffer[]; | 49 | extern struct oprofile_cpu_buffer cpu_buffer[]; |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ea2087c3414..50757695844 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -70,7 +70,7 @@ struct slot { | |||
70 | struct hotplug_slot *hotplug_slot; | 70 | struct hotplug_slot *hotplug_slot; |
71 | struct list_head slot_list; | 71 | struct list_head slot_list; |
72 | char name[SLOT_NAME_SIZE]; | 72 | char name[SLOT_NAME_SIZE]; |
73 | struct work_struct work; /* work for button event */ | 73 | struct delayed_work work; /* work for button event */ |
74 | struct mutex lock; | 74 | struct mutex lock; |
75 | }; | 75 | }; |
76 | 76 | ||
@@ -187,7 +187,7 @@ extern int shpchp_configure_device(struct slot *p_slot); | |||
187 | extern int shpchp_unconfigure_device(struct slot *p_slot); | 187 | extern int shpchp_unconfigure_device(struct slot *p_slot); |
188 | extern void shpchp_remove_ctrl_files(struct controller *ctrl); | 188 | extern void shpchp_remove_ctrl_files(struct controller *ctrl); |
189 | extern void cleanup_slots(struct controller *ctrl); | 189 | extern void cleanup_slots(struct controller *ctrl); |
190 | extern void queue_pushbutton_work(void *data); | 190 | extern void queue_pushbutton_work(struct work_struct *work); |
191 | 191 | ||
192 | 192 | ||
193 | #ifdef CONFIG_ACPI | 193 | #ifdef CONFIG_ACPI |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 235c18a2239..4eac85b3d90 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl) | |||
159 | goto error_info; | 159 | goto error_info; |
160 | 160 | ||
161 | slot->number = sun; | 161 | slot->number = sun; |
162 | INIT_WORK(&slot->work, queue_pushbutton_work, slot); | 162 | INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work); |
163 | 163 | ||
164 | /* register this slot with the hotplug pci core */ | 164 | /* register this slot with the hotplug pci core */ |
165 | hotplug_slot->private = slot; | 165 | hotplug_slot->private = slot; |
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index c39901dbff2..158ac783609 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include "../pci.h" | 36 | #include "../pci.h" |
37 | #include "shpchp.h" | 37 | #include "shpchp.h" |
38 | 38 | ||
39 | static void interrupt_event_handler(void *data); | 39 | static void interrupt_event_handler(struct work_struct *work); |
40 | static int shpchp_enable_slot(struct slot *p_slot); | 40 | static int shpchp_enable_slot(struct slot *p_slot); |
41 | static int shpchp_disable_slot(struct slot *p_slot); | 41 | static int shpchp_disable_slot(struct slot *p_slot); |
42 | 42 | ||
@@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
50 | 50 | ||
51 | info->event_type = event_type; | 51 | info->event_type = event_type; |
52 | info->p_slot = p_slot; | 52 | info->p_slot = p_slot; |
53 | INIT_WORK(&info->work, interrupt_event_handler, info); | 53 | INIT_WORK(&info->work, interrupt_event_handler); |
54 | 54 | ||
55 | schedule_work(&info->work); | 55 | schedule_work(&info->work); |
56 | 56 | ||
@@ -408,9 +408,10 @@ struct pushbutton_work_info { | |||
408 | * Handles all pending events and exits. | 408 | * Handles all pending events and exits. |
409 | * | 409 | * |
410 | */ | 410 | */ |
411 | static void shpchp_pushbutton_thread(void *data) | 411 | static void shpchp_pushbutton_thread(struct work_struct *work) |
412 | { | 412 | { |
413 | struct pushbutton_work_info *info = data; | 413 | struct pushbutton_work_info *info = |
414 | container_of(work, struct pushbutton_work_info, work); | ||
414 | struct slot *p_slot = info->p_slot; | 415 | struct slot *p_slot = info->p_slot; |
415 | 416 | ||
416 | mutex_lock(&p_slot->lock); | 417 | mutex_lock(&p_slot->lock); |
@@ -436,9 +437,9 @@ static void shpchp_pushbutton_thread(void *data) | |||
436 | kfree(info); | 437 | kfree(info); |
437 | } | 438 | } |
438 | 439 | ||
439 | void queue_pushbutton_work(void *data) | 440 | void queue_pushbutton_work(struct work_struct *work) |
440 | { | 441 | { |
441 | struct slot *p_slot = data; | 442 | struct slot *p_slot = container_of(work, struct slot, work.work); |
442 | struct pushbutton_work_info *info; | 443 | struct pushbutton_work_info *info; |
443 | 444 | ||
444 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 445 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
@@ -447,7 +448,7 @@ void queue_pushbutton_work(void *data) | |||
447 | return; | 448 | return; |
448 | } | 449 | } |
449 | info->p_slot = p_slot; | 450 | info->p_slot = p_slot; |
450 | INIT_WORK(&info->work, shpchp_pushbutton_thread, info); | 451 | INIT_WORK(&info->work, shpchp_pushbutton_thread); |
451 | 452 | ||
452 | mutex_lock(&p_slot->lock); | 453 | mutex_lock(&p_slot->lock); |
453 | switch (p_slot->state) { | 454 | switch (p_slot->state) { |
@@ -541,9 +542,9 @@ static void handle_button_press_event(struct slot *p_slot) | |||
541 | } | 542 | } |
542 | } | 543 | } |
543 | 544 | ||
544 | static void interrupt_event_handler(void *data) | 545 | static void interrupt_event_handler(struct work_struct *work) |
545 | { | 546 | { |
546 | struct event_info *info = data; | 547 | struct event_info *info = container_of(work, struct event_info, work); |
547 | struct slot *p_slot = info->p_slot; | 548 | struct slot *p_slot = info->p_slot; |
548 | 549 | ||
549 | mutex_lock(&p_slot->lock); | 550 | mutex_lock(&p_slot->lock); |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 04c43ef529a..55866b6b26f 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) | |||
160 | rpc->e_lock = SPIN_LOCK_UNLOCKED; | 160 | rpc->e_lock = SPIN_LOCK_UNLOCKED; |
161 | 161 | ||
162 | rpc->rpd = dev; | 162 | rpc->rpd = dev; |
163 | INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); | 163 | INIT_WORK(&rpc->dpc_handler, aer_isr); |
164 | rpc->prod_idx = rpc->cons_idx = 0; | 164 | rpc->prod_idx = rpc->cons_idx = 0; |
165 | mutex_init(&rpc->rpc_mutex); | 165 | mutex_init(&rpc->rpc_mutex); |
166 | init_waitqueue_head(&rpc->wait_release); | 166 | init_waitqueue_head(&rpc->wait_release); |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index daf0cad88fc..3c0a58f64dd 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type; | |||
118 | extern void aer_enable_rootport(struct aer_rpc *rpc); | 118 | extern void aer_enable_rootport(struct aer_rpc *rpc); |
119 | extern void aer_delete_rootport(struct aer_rpc *rpc); | 119 | extern void aer_delete_rootport(struct aer_rpc *rpc); |
120 | extern int aer_init(struct pcie_device *dev); | 120 | extern int aer_init(struct pcie_device *dev); |
121 | extern void aer_isr(void *context); | 121 | extern void aer_isr(struct work_struct *work); |
122 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 122 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
123 | extern int aer_osc_setup(struct pci_dev *dev); | 123 | extern int aer_osc_setup(struct pci_dev *dev); |
124 | 124 | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 1c7e660d653..08e13033ced 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
690 | 690 | ||
691 | /** | 691 | /** |
692 | * aer_isr - consume errors detected by root port | 692 | * aer_isr - consume errors detected by root port |
693 | * @context: pointer to a private data of pcie device | 693 | * @work: definition of this work item |
694 | * | 694 | * |
695 | * Invoked, as DPC, when root port records new detected error | 695 | * Invoked, as DPC, when root port records new detected error |
696 | **/ | 696 | **/ |
697 | void aer_isr(void *context) | 697 | void aer_isr(struct work_struct *work) |
698 | { | 698 | { |
699 | struct pcie_device *p_device = (struct pcie_device *) context; | 699 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); |
700 | struct aer_rpc *rpc = get_service_data(p_device); | 700 | struct pcie_device *p_device = rpc->rpd; |
701 | struct aer_err_source *e_src; | 701 | struct aer_err_source *e_src; |
702 | 702 | ||
703 | mutex_lock(&rpc->rpc_mutex); | 703 | mutex_lock(&rpc->rpc_mutex); |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 45df12eda3c..7355eb455a8 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -675,9 +675,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s) | |||
675 | } | 675 | } |
676 | 676 | ||
677 | 677 | ||
678 | static void pcmcia_delayed_add_device(void *data) | 678 | static void pcmcia_delayed_add_device(struct work_struct *work) |
679 | { | 679 | { |
680 | struct pcmcia_socket *s = data; | 680 | struct pcmcia_socket *s = |
681 | container_of(work, struct pcmcia_socket, device_add); | ||
681 | ds_dbg(1, "adding additional device to %d\n", s->sock); | 682 | ds_dbg(1, "adding additional device to %d\n", s->sock); |
682 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); | 683 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); |
683 | s->pcmcia_state.device_add_pending = 0; | 684 | s->pcmcia_state.device_add_pending = 0; |
@@ -1349,7 +1350,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev, | |||
1349 | init_waitqueue_head(&socket->queue); | 1350 | init_waitqueue_head(&socket->queue); |
1350 | #endif | 1351 | #endif |
1351 | INIT_LIST_HEAD(&socket->devices_list); | 1352 | INIT_LIST_HEAD(&socket->devices_list); |
1352 | INIT_WORK(&socket->device_add, pcmcia_delayed_add_device, socket); | 1353 | INIT_WORK(&socket->device_add, pcmcia_delayed_add_device); |
1353 | memset(&socket->pcmcia_state, 0, sizeof(u8)); | 1354 | memset(&socket->pcmcia_state, 0, sizeof(u8)); |
1354 | socket->device_count = 0; | 1355 | socket->device_count = 0; |
1355 | 1356 | ||
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 814b9e1873f..828b329e08e 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -53,9 +53,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
53 | * Routine to poll RTC seconds field for change as often as possible, | 53 | * Routine to poll RTC seconds field for change as often as possible, |
54 | * after first RTC_UIE use timer to reduce polling | 54 | * after first RTC_UIE use timer to reduce polling |
55 | */ | 55 | */ |
56 | static void rtc_uie_task(void *data) | 56 | static void rtc_uie_task(struct work_struct *work) |
57 | { | 57 | { |
58 | struct rtc_device *rtc = data; | 58 | struct rtc_device *rtc = |
59 | container_of(work, struct rtc_device, uie_task); | ||
59 | struct rtc_time tm; | 60 | struct rtc_time tm; |
60 | int num = 0; | 61 | int num = 0; |
61 | int err; | 62 | int err; |
@@ -411,7 +412,7 @@ static int rtc_dev_add_device(struct class_device *class_dev, | |||
411 | spin_lock_init(&rtc->irq_lock); | 412 | spin_lock_init(&rtc->irq_lock); |
412 | init_waitqueue_head(&rtc->irq_queue); | 413 | init_waitqueue_head(&rtc->irq_queue); |
413 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 414 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
414 | INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); | 415 | INIT_WORK(&rtc->uie_task, rtc_uie_task); |
415 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); | 416 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); |
416 | #endif | 417 | #endif |
417 | 418 | ||
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index a6aa9107288..bb3cb336054 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags) | |||
849 | hostdata->issue_queue = NULL; | 849 | hostdata->issue_queue = NULL; |
850 | hostdata->disconnected_queue = NULL; | 850 | hostdata->disconnected_queue = NULL; |
851 | 851 | ||
852 | INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); | 852 | INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); |
853 | 853 | ||
854 | #ifdef NCR5380_STATS | 854 | #ifdef NCR5380_STATS |
855 | for (i = 0; i < 8; ++i) { | 855 | for (i = 0; i < 8; ++i) { |
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1016 | 1016 | ||
1017 | /* Run the coroutine if it isn't already running. */ | 1017 | /* Run the coroutine if it isn't already running. */ |
1018 | /* Kick off command processing */ | 1018 | /* Kick off command processing */ |
1019 | schedule_work(&hostdata->coroutine); | 1019 | schedule_delayed_work(&hostdata->coroutine, 0); |
1020 | return 0; | 1020 | return 0; |
1021 | } | 1021 | } |
1022 | 1022 | ||
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1033 | * host lock and called routines may take the isa dma lock. | 1033 | * host lock and called routines may take the isa dma lock. |
1034 | */ | 1034 | */ |
1035 | 1035 | ||
1036 | static void NCR5380_main(void *p) | 1036 | static void NCR5380_main(struct work_struct *work) |
1037 | { | 1037 | { |
1038 | struct NCR5380_hostdata *hostdata = p; | 1038 | struct NCR5380_hostdata *hostdata = |
1039 | container_of(work, struct NCR5380_hostdata, coroutine.work); | ||
1039 | struct Scsi_Host *instance = hostdata->host; | 1040 | struct Scsi_Host *instance = hostdata->host; |
1040 | Scsi_Cmnd *tmp, *prev; | 1041 | Scsi_Cmnd *tmp, *prev; |
1041 | int done; | 1042 | int done; |
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
1221 | } /* if BASR_IRQ */ | 1222 | } /* if BASR_IRQ */ |
1222 | spin_unlock_irqrestore(instance->host_lock, flags); | 1223 | spin_unlock_irqrestore(instance->host_lock, flags); |
1223 | if(!done) | 1224 | if(!done) |
1224 | schedule_work(&hostdata->coroutine); | 1225 | schedule_delayed_work(&hostdata->coroutine, 0); |
1225 | } while (!done); | 1226 | } while (!done); |
1226 | return IRQ_HANDLED; | 1227 | return IRQ_HANDLED; |
1227 | } | 1228 | } |
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 1bc73de496b..713a108c02e 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
@@ -271,7 +271,7 @@ struct NCR5380_hostdata { | |||
271 | unsigned long time_expires; /* in jiffies, set prior to sleeping */ | 271 | unsigned long time_expires; /* in jiffies, set prior to sleeping */ |
272 | int select_time; /* timer in select for target response */ | 272 | int select_time; /* timer in select for target response */ |
273 | volatile Scsi_Cmnd *selecting; | 273 | volatile Scsi_Cmnd *selecting; |
274 | struct work_struct coroutine; /* our co-routine */ | 274 | struct delayed_work coroutine; /* our co-routine */ |
275 | #ifdef NCR5380_STATS | 275 | #ifdef NCR5380_STATS |
276 | unsigned timebase; /* Base for time calcs */ | 276 | unsigned timebase; /* Base for time calcs */ |
277 | long time_read[8]; /* time to do reads */ | 277 | long time_read[8]; /* time to do reads */ |
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance); | |||
298 | #ifndef DONT_USE_INTR | 298 | #ifndef DONT_USE_INTR |
299 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); | 299 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); |
300 | #endif | 300 | #endif |
301 | static void NCR5380_main(void *ptr); | 301 | static void NCR5380_main(struct work_struct *work); |
302 | static void NCR5380_print_options(struct Scsi_Host *instance); | 302 | static void NCR5380_print_options(struct Scsi_Host *instance); |
303 | #ifdef NDEBUG | 303 | #ifdef NDEBUG |
304 | static void NCR5380_print_phase(struct Scsi_Host *instance); | 304 | static void NCR5380_print_phase(struct Scsi_Host *instance); |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 306f46b85a5..0cec742d12e 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq; | |||
1443 | * Run service completions on the card with interrupts enabled. | 1443 | * Run service completions on the card with interrupts enabled. |
1444 | * | 1444 | * |
1445 | */ | 1445 | */ |
1446 | static void run(void) | 1446 | static void run(struct work_struct *work) |
1447 | { | 1447 | { |
1448 | struct aha152x_hostdata *hd; | 1448 | struct aha152x_hostdata *hd; |
1449 | 1449 | ||
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id) | |||
1499 | HOSTDATA(shpnt)->service=1; | 1499 | HOSTDATA(shpnt)->service=1; |
1500 | 1500 | ||
1501 | /* Poke the BH handler */ | 1501 | /* Poke the BH handler */ |
1502 | INIT_WORK(&aha152x_tq, (void *) run, NULL); | 1502 | INIT_WORK(&aha152x_tq, run); |
1503 | schedule_work(&aha152x_tq); | 1503 | schedule_work(&aha152x_tq); |
1504 | } | 1504 | } |
1505 | DO_UNLOCK(flags); | 1505 | DO_UNLOCK(flags); |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 14d5d8c2ee1..75ed6b0569d 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -414,9 +414,10 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) | |||
414 | } | 414 | } |
415 | 415 | ||
416 | /* hard reset a phy later */ | 416 | /* hard reset a phy later */ |
417 | static void do_phy_reset_later(void *data) | 417 | static void do_phy_reset_later(struct work_struct *work) |
418 | { | 418 | { |
419 | struct sas_phy *sas_phy = data; | 419 | struct sas_phy *sas_phy = |
420 | container_of(work, struct sas_phy, reset_work); | ||
420 | int error; | 421 | int error; |
421 | 422 | ||
422 | ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, | 423 | ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, |
@@ -430,7 +431,7 @@ static void do_phy_reset_later(void *data) | |||
430 | 431 | ||
431 | static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) | 432 | static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) |
432 | { | 433 | { |
433 | INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy); | 434 | INIT_WORK(&sas_phy->reset_work, do_phy_reset_later); |
434 | queue_work(shost->work_q, &sas_phy->reset_work); | 435 | queue_work(shost->work_q, &sas_phy->reset_work); |
435 | } | 436 | } |
436 | 437 | ||
@@ -442,7 +443,7 @@ static void task_kill_later(struct asd_ascb *ascb) | |||
442 | struct Scsi_Host *shost = sas_ha->core.shost; | 443 | struct Scsi_Host *shost = sas_ha->core.shost; |
443 | struct sas_task *task = ascb->uldd_task; | 444 | struct sas_task *task = ascb->uldd_task; |
444 | 445 | ||
445 | INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task); | 446 | INIT_WORK(&task->abort_work, sas_task_abort); |
446 | queue_work(shost->work_q, &task->abort_work); | 447 | queue_work(shost->work_q, &task->abort_work); |
447 | } | 448 | } |
448 | 449 | ||
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index e31f6122106..0464c182c57 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c | |||
@@ -36,7 +36,7 @@ typedef struct { | |||
36 | int base_hi; /* Hi Base address for ECP-ISA chipset */ | 36 | int base_hi; /* Hi Base address for ECP-ISA chipset */ |
37 | int mode; /* Transfer mode */ | 37 | int mode; /* Transfer mode */ |
38 | struct scsi_cmnd *cur_cmd; /* Current queued command */ | 38 | struct scsi_cmnd *cur_cmd; /* Current queued command */ |
39 | struct work_struct imm_tq; /* Polling interrupt stuff */ | 39 | struct delayed_work imm_tq; /* Polling interrupt stuff */ |
40 | unsigned long jstart; /* Jiffies at start */ | 40 | unsigned long jstart; /* Jiffies at start */ |
41 | unsigned failed:1; /* Failure flag */ | 41 | unsigned failed:1; /* Failure flag */ |
42 | unsigned dp:1; /* Data phase present */ | 42 | unsigned dp:1; /* Data phase present */ |
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd) | |||
733 | * the scheduler's task queue to generate a stream of call-backs and | 733 | * the scheduler's task queue to generate a stream of call-backs and |
734 | * complete the request when the drive is ready. | 734 | * complete the request when the drive is ready. |
735 | */ | 735 | */ |
736 | static void imm_interrupt(void *data) | 736 | static void imm_interrupt(struct work_struct *work) |
737 | { | 737 | { |
738 | imm_struct *dev = (imm_struct *) data; | 738 | imm_struct *dev = container_of(work, imm_struct, imm_tq.work); |
739 | struct scsi_cmnd *cmd = dev->cur_cmd; | 739 | struct scsi_cmnd *cmd = dev->cur_cmd; |
740 | struct Scsi_Host *host = cmd->device->host; | 740 | struct Scsi_Host *host = cmd->device->host; |
741 | unsigned long flags; | 741 | unsigned long flags; |
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data) | |||
745 | return; | 745 | return; |
746 | } | 746 | } |
747 | if (imm_engine(dev, cmd)) { | 747 | if (imm_engine(dev, cmd)) { |
748 | INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev); | ||
749 | schedule_delayed_work(&dev->imm_tq, 1); | 748 | schedule_delayed_work(&dev->imm_tq, 1); |
750 | return; | 749 | return; |
751 | } | 750 | } |
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, | |||
953 | cmd->result = DID_ERROR << 16; /* default return code */ | 952 | cmd->result = DID_ERROR << 16; /* default return code */ |
954 | cmd->SCp.phase = 0; /* bus free */ | 953 | cmd->SCp.phase = 0; /* bus free */ |
955 | 954 | ||
956 | INIT_WORK(&dev->imm_tq, imm_interrupt, dev); | 955 | schedule_delayed_work(&dev->imm_tq, 0); |
957 | schedule_work(&dev->imm_tq); | ||
958 | 956 | ||
959 | imm_pb_claim(dev); | 957 | imm_pb_claim(dev); |
960 | 958 | ||
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb) | |||
1225 | else | 1223 | else |
1226 | ports = 8; | 1224 | ports = 8; |
1227 | 1225 | ||
1228 | INIT_WORK(&dev->imm_tq, imm_interrupt, dev); | 1226 | INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); |
1229 | 1227 | ||
1230 | err = -ENOMEM; | 1228 | err = -ENOMEM; |
1231 | host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); | 1229 | host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2d83fbb806a..ccd4dafce8e 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -2307,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref) | |||
2307 | 2307 | ||
2308 | /** | 2308 | /** |
2309 | * ipr_worker_thread - Worker thread | 2309 | * ipr_worker_thread - Worker thread |
2310 | * @data: ioa config struct | 2310 | * @work: ioa config struct |
2311 | * | 2311 | * |
2312 | * Called at task level from a work thread. This function takes care | 2312 | * Called at task level from a work thread. This function takes care |
2313 | * of adding and removing device from the mid-layer as configuration | 2313 | * of adding and removing device from the mid-layer as configuration |
@@ -2316,13 +2316,14 @@ static void ipr_release_dump(struct kref *kref) | |||
2316 | * Return value: | 2316 | * Return value: |
2317 | * nothing | 2317 | * nothing |
2318 | **/ | 2318 | **/ |
2319 | static void ipr_worker_thread(void *data) | 2319 | static void ipr_worker_thread(struct work_struct *work) |
2320 | { | 2320 | { |
2321 | unsigned long lock_flags; | 2321 | unsigned long lock_flags; |
2322 | struct ipr_resource_entry *res; | 2322 | struct ipr_resource_entry *res; |
2323 | struct scsi_device *sdev; | 2323 | struct scsi_device *sdev; |
2324 | struct ipr_dump *dump; | 2324 | struct ipr_dump *dump; |
2325 | struct ipr_ioa_cfg *ioa_cfg = data; | 2325 | struct ipr_ioa_cfg *ioa_cfg = |
2326 | container_of(work, struct ipr_ioa_cfg, work_q); | ||
2326 | u8 bus, target, lun; | 2327 | u8 bus, target, lun; |
2327 | int did_work; | 2328 | int did_work; |
2328 | 2329 | ||
@@ -7121,7 +7122,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7121 | INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); | 7122 | INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); |
7122 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); | 7123 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); |
7123 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 7124 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
7124 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); | 7125 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
7125 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 7126 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
7126 | ioa_cfg->sdt_state = INACTIVE; | 7127 | ioa_cfg->sdt_state = INACTIVE; |
7127 | if (ipr_enable_cache) | 7128 | if (ipr_enable_cache) |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 5d886218948..e11b23c641e 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -719,9 +719,10 @@ again: | |||
719 | return rc; | 719 | return rc; |
720 | } | 720 | } |
721 | 721 | ||
722 | static void iscsi_xmitworker(void *data) | 722 | static void iscsi_xmitworker(struct work_struct *work) |
723 | { | 723 | { |
724 | struct iscsi_conn *conn = data; | 724 | struct iscsi_conn *conn = |
725 | container_of(work, struct iscsi_conn, xmitwork); | ||
725 | int rc; | 726 | int rc; |
726 | /* | 727 | /* |
727 | * serialize Xmit worker on a per-connection basis. | 728 | * serialize Xmit worker on a per-connection basis. |
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1512 | if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) | 1513 | if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) |
1513 | goto mgmtqueue_alloc_fail; | 1514 | goto mgmtqueue_alloc_fail; |
1514 | 1515 | ||
1515 | INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); | 1516 | INIT_WORK(&conn->xmitwork, iscsi_xmitworker); |
1516 | 1517 | ||
1517 | /* allocate login_mtask used for the login/text sequences */ | 1518 | /* allocate login_mtask used for the login/text sequences */ |
1518 | spin_lock_bh(&session->lock); | 1519 | spin_lock_bh(&session->lock); |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d977bd492d8..fb7df7b7581 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port) | |||
647 | * Discover process only interrogates devices in order to discover the | 647 | * Discover process only interrogates devices in order to discover the |
648 | * domain. | 648 | * domain. |
649 | */ | 649 | */ |
650 | static void sas_discover_domain(void *data) | 650 | static void sas_discover_domain(struct work_struct *work) |
651 | { | 651 | { |
652 | int error = 0; | 652 | int error = 0; |
653 | struct asd_sas_port *port = data; | 653 | struct sas_discovery_event *ev = |
654 | container_of(work, struct sas_discovery_event, work); | ||
655 | struct asd_sas_port *port = ev->port; | ||
654 | 656 | ||
655 | sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, | 657 | sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, |
656 | &port->disc.pending); | 658 | &port->disc.pending); |
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data) | |||
692 | current->pid, error); | 694 | current->pid, error); |
693 | } | 695 | } |
694 | 696 | ||
695 | static void sas_revalidate_domain(void *data) | 697 | static void sas_revalidate_domain(struct work_struct *work) |
696 | { | 698 | { |
697 | int res = 0; | 699 | int res = 0; |
698 | struct asd_sas_port *port = data; | 700 | struct sas_discovery_event *ev = |
701 | container_of(work, struct sas_discovery_event, work); | ||
702 | struct asd_sas_port *port = ev->port; | ||
699 | 703 | ||
700 | sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, | 704 | sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, |
701 | &port->disc.pending); | 705 | &port->disc.pending); |
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) | |||
722 | BUG_ON(ev >= DISC_NUM_EVENTS); | 726 | BUG_ON(ev >= DISC_NUM_EVENTS); |
723 | 727 | ||
724 | sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, | 728 | sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, |
725 | &disc->disc_work[ev], port->ha->core.shost); | 729 | &disc->disc_work[ev].work, port->ha->core.shost); |
726 | 730 | ||
727 | return 0; | 731 | return 0; |
728 | } | 732 | } |
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) | |||
737 | { | 741 | { |
738 | int i; | 742 | int i; |
739 | 743 | ||
740 | static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { | 744 | static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { |
741 | [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, | 745 | [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, |
742 | [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, | 746 | [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, |
743 | }; | 747 | }; |
744 | 748 | ||
745 | spin_lock_init(&disc->disc_event_lock); | 749 | spin_lock_init(&disc->disc_event_lock); |
746 | disc->pending = 0; | 750 | disc->pending = 0; |
747 | for (i = 0; i < DISC_NUM_EVENTS; i++) | 751 | for (i = 0; i < DISC_NUM_EVENTS; i++) { |
748 | INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); | 752 | INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); |
753 | disc->disc_work[i].port = port; | ||
754 | } | ||
749 | } | 755 | } |
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 19110ed1c89..d83392ee682 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c | |||
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) | |||
31 | BUG_ON(event >= HA_NUM_EVENTS); | 31 | BUG_ON(event >= HA_NUM_EVENTS); |
32 | 32 | ||
33 | sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, | 33 | sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, |
34 | &sas_ha->ha_events[event], sas_ha->core.shost); | 34 | &sas_ha->ha_events[event].work, sas_ha->core.shost); |
35 | } | 35 | } |
36 | 36 | ||
37 | static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) | 37 | static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) |
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) | |||
41 | BUG_ON(event >= PORT_NUM_EVENTS); | 41 | BUG_ON(event >= PORT_NUM_EVENTS); |
42 | 42 | ||
43 | sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, | 43 | sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, |
44 | &phy->port_events[event], ha->core.shost); | 44 | &phy->port_events[event].work, ha->core.shost); |
45 | } | 45 | } |
46 | 46 | ||
47 | static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) | 47 | static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) |
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) | |||
51 | BUG_ON(event >= PHY_NUM_EVENTS); | 51 | BUG_ON(event >= PHY_NUM_EVENTS); |
52 | 52 | ||
53 | sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, | 53 | sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, |
54 | &phy->phy_events[event], ha->core.shost); | 54 | &phy->phy_events[event].work, ha->core.shost); |
55 | } | 55 | } |
56 | 56 | ||
57 | int sas_init_events(struct sas_ha_struct *sas_ha) | 57 | int sas_init_events(struct sas_ha_struct *sas_ha) |
58 | { | 58 | { |
59 | static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { | 59 | static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = { |
60 | [HAE_RESET] = sas_hae_reset, | 60 | [HAE_RESET] = sas_hae_reset, |
61 | }; | 61 | }; |
62 | 62 | ||
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha) | |||
64 | 64 | ||
65 | spin_lock_init(&sas_ha->event_lock); | 65 | spin_lock_init(&sas_ha->event_lock); |
66 | 66 | ||
67 | for (i = 0; i < HA_NUM_EVENTS; i++) | 67 | for (i = 0; i < HA_NUM_EVENTS; i++) { |
68 | INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); | 68 | INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); |
69 | sas_ha->ha_events[i].ha = sas_ha; | ||
70 | } | ||
69 | 71 | ||
70 | sas_ha->notify_ha_event = notify_ha_event; | 72 | sas_ha->notify_ha_event = notify_ha_event; |
71 | sas_ha->notify_port_event = notify_port_event; | 73 | sas_ha->notify_port_event = notify_port_event; |
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 0fb347b4b1a..d65bc4e0f21 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c | |||
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) | |||
65 | 65 | ||
66 | /* ---------- HA events ---------- */ | 66 | /* ---------- HA events ---------- */ |
67 | 67 | ||
68 | void sas_hae_reset(void *data) | 68 | void sas_hae_reset(struct work_struct *work) |
69 | { | 69 | { |
70 | struct sas_ha_struct *ha = data; | 70 | struct sas_ha_event *ev = |
71 | container_of(work, struct sas_ha_event, work); | ||
72 | struct sas_ha_struct *ha = ev->ha; | ||
71 | 73 | ||
72 | sas_begin_event(HAE_RESET, &ha->event_lock, | 74 | sas_begin_event(HAE_RESET, &ha->event_lock, |
73 | &ha->pending); | 75 | &ha->pending); |
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index bffcee47492..137d7e496b6 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha); | |||
60 | 60 | ||
61 | void sas_deform_port(struct asd_sas_phy *phy); | 61 | void sas_deform_port(struct asd_sas_phy *phy); |
62 | 62 | ||
63 | void sas_porte_bytes_dmaed(void *); | 63 | void sas_porte_bytes_dmaed(struct work_struct *work); |
64 | void sas_porte_broadcast_rcvd(void *); | 64 | void sas_porte_broadcast_rcvd(struct work_struct *work); |
65 | void sas_porte_link_reset_err(void *); | 65 | void sas_porte_link_reset_err(struct work_struct *work); |
66 | void sas_porte_timer_event(void *); | 66 | void sas_porte_timer_event(struct work_struct *work); |
67 | void sas_porte_hard_reset(void *); | 67 | void sas_porte_hard_reset(struct work_struct *work); |
68 | 68 | ||
69 | int sas_notify_lldd_dev_found(struct domain_device *); | 69 | int sas_notify_lldd_dev_found(struct domain_device *); |
70 | void sas_notify_lldd_dev_gone(struct domain_device *); | 70 | void sas_notify_lldd_dev_gone(struct domain_device *); |
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy); | |||
75 | 75 | ||
76 | struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); | 76 | struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); |
77 | 77 | ||
78 | void sas_hae_reset(void *); | 78 | void sas_hae_reset(struct work_struct *work); |
79 | 79 | ||
80 | static inline void sas_queue_event(int event, spinlock_t *lock, | 80 | static inline void sas_queue_event(int event, spinlock_t *lock, |
81 | unsigned long *pending, | 81 | unsigned long *pending, |
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 9340cdbae4a..b459c4b635b 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c | |||
@@ -30,9 +30,11 @@ | |||
30 | 30 | ||
31 | /* ---------- Phy events ---------- */ | 31 | /* ---------- Phy events ---------- */ |
32 | 32 | ||
33 | static void sas_phye_loss_of_signal(void *data) | 33 | static void sas_phye_loss_of_signal(struct work_struct *work) |
34 | { | 34 | { |
35 | struct asd_sas_phy *phy = data; | 35 | struct asd_sas_event *ev = |
36 | container_of(work, struct asd_sas_event, work); | ||
37 | struct asd_sas_phy *phy = ev->phy; | ||
36 | 38 | ||
37 | sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, | 39 | sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, |
38 | &phy->phy_events_pending); | 40 | &phy->phy_events_pending); |
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data) | |||
40 | sas_deform_port(phy); | 42 | sas_deform_port(phy); |
41 | } | 43 | } |
42 | 44 | ||
43 | static void sas_phye_oob_done(void *data) | 45 | static void sas_phye_oob_done(struct work_struct *work) |
44 | { | 46 | { |
45 | struct asd_sas_phy *phy = data; | 47 | struct asd_sas_event *ev = |
48 | container_of(work, struct asd_sas_event, work); | ||
49 | struct asd_sas_phy *phy = ev->phy; | ||
46 | 50 | ||
47 | sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, | 51 | sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, |
48 | &phy->phy_events_pending); | 52 | &phy->phy_events_pending); |
49 | phy->error = 0; | 53 | phy->error = 0; |
50 | } | 54 | } |
51 | 55 | ||
52 | static void sas_phye_oob_error(void *data) | 56 | static void sas_phye_oob_error(struct work_struct *work) |
53 | { | 57 | { |
54 | struct asd_sas_phy *phy = data; | 58 | struct asd_sas_event *ev = |
59 | container_of(work, struct asd_sas_event, work); | ||
60 | struct asd_sas_phy *phy = ev->phy; | ||
55 | struct sas_ha_struct *sas_ha = phy->ha; | 61 | struct sas_ha_struct *sas_ha = phy->ha; |
56 | struct asd_sas_port *port = phy->port; | 62 | struct asd_sas_port *port = phy->port; |
57 | struct sas_internal *i = | 63 | struct sas_internal *i = |
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data) | |||
80 | } | 86 | } |
81 | } | 87 | } |
82 | 88 | ||
83 | static void sas_phye_spinup_hold(void *data) | 89 | static void sas_phye_spinup_hold(struct work_struct *work) |
84 | { | 90 | { |
85 | struct asd_sas_phy *phy = data; | 91 | struct asd_sas_event *ev = |
92 | container_of(work, struct asd_sas_event, work); | ||
93 | struct asd_sas_phy *phy = ev->phy; | ||
86 | struct sas_ha_struct *sas_ha = phy->ha; | 94 | struct sas_ha_struct *sas_ha = phy->ha; |
87 | struct sas_internal *i = | 95 | struct sas_internal *i = |
88 | to_sas_internal(sas_ha->core.shost->transportt); | 96 | to_sas_internal(sas_ha->core.shost->transportt); |
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
100 | { | 108 | { |
101 | int i; | 109 | int i; |
102 | 110 | ||
103 | static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { | 111 | static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { |
104 | [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, | 112 | [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, |
105 | [PHYE_OOB_DONE] = sas_phye_oob_done, | 113 | [PHYE_OOB_DONE] = sas_phye_oob_done, |
106 | [PHYE_OOB_ERROR] = sas_phye_oob_error, | 114 | [PHYE_OOB_ERROR] = sas_phye_oob_error, |
107 | [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, | 115 | [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, |
108 | }; | 116 | }; |
109 | 117 | ||
110 | static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { | 118 | static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { |
111 | [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, | 119 | [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, |
112 | [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, | 120 | [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, |
113 | [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, | 121 | [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, |
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
122 | 130 | ||
123 | phy->error = 0; | 131 | phy->error = 0; |
124 | INIT_LIST_HEAD(&phy->port_phy_el); | 132 | INIT_LIST_HEAD(&phy->port_phy_el); |
125 | for (k = 0; k < PORT_NUM_EVENTS; k++) | 133 | for (k = 0; k < PORT_NUM_EVENTS; k++) { |
126 | INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], | 134 | INIT_WORK(&phy->port_events[k].work, |
127 | phy); | 135 | sas_port_event_fns[k]); |
136 | phy->port_events[k].phy = phy; | ||
137 | } | ||
138 | |||
139 | for (k = 0; k < PHY_NUM_EVENTS; k++) { | ||
140 | INIT_WORK(&phy->phy_events[k].work, | ||
141 | sas_phy_event_fns[k]); | ||
142 | phy->phy_events[k].phy = phy; | ||
143 | } | ||
128 | 144 | ||
129 | for (k = 0; k < PHY_NUM_EVENTS; k++) | ||
130 | INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], | ||
131 | phy); | ||
132 | phy->port = NULL; | 145 | phy->port = NULL; |
133 | phy->ha = sas_ha; | 146 | phy->ha = sas_ha; |
134 | spin_lock_init(&phy->frame_rcvd_lock); | 147 | spin_lock_init(&phy->frame_rcvd_lock); |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 253cdcf306a..971c37ceecb 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy) | |||
181 | 181 | ||
182 | /* ---------- SAS port events ---------- */ | 182 | /* ---------- SAS port events ---------- */ |
183 | 183 | ||
184 | void sas_porte_bytes_dmaed(void *data) | 184 | void sas_porte_bytes_dmaed(struct work_struct *work) |
185 | { | 185 | { |
186 | struct asd_sas_phy *phy = data; | 186 | struct asd_sas_event *ev = |
187 | container_of(work, struct asd_sas_event, work); | ||
188 | struct asd_sas_phy *phy = ev->phy; | ||
187 | 189 | ||
188 | sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, | 190 | sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, |
189 | &phy->port_events_pending); | 191 | &phy->port_events_pending); |
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data) | |||
191 | sas_form_port(phy); | 193 | sas_form_port(phy); |
192 | } | 194 | } |
193 | 195 | ||
194 | void sas_porte_broadcast_rcvd(void *data) | 196 | void sas_porte_broadcast_rcvd(struct work_struct *work) |
195 | { | 197 | { |
198 | struct asd_sas_event *ev = | ||
199 | container_of(work, struct asd_sas_event, work); | ||
200 | struct asd_sas_phy *phy = ev->phy; | ||
196 | unsigned long flags; | 201 | unsigned long flags; |
197 | u32 prim; | 202 | u32 prim; |
198 | struct asd_sas_phy *phy = data; | ||
199 | 203 | ||
200 | sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, | 204 | sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, |
201 | &phy->port_events_pending); | 205 | &phy->port_events_pending); |
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data) | |||
208 | sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); | 212 | sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); |
209 | } | 213 | } |
210 | 214 | ||
211 | void sas_porte_link_reset_err(void *data) | 215 | void sas_porte_link_reset_err(struct work_struct *work) |
212 | { | 216 | { |
213 | struct asd_sas_phy *phy = data; | 217 | struct asd_sas_event *ev = |
218 | container_of(work, struct asd_sas_event, work); | ||
219 | struct asd_sas_phy *phy = ev->phy; | ||
214 | 220 | ||
215 | sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, | 221 | sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, |
216 | &phy->port_events_pending); | 222 | &phy->port_events_pending); |
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data) | |||
218 | sas_deform_port(phy); | 224 | sas_deform_port(phy); |
219 | } | 225 | } |
220 | 226 | ||
221 | void sas_porte_timer_event(void *data) | 227 | void sas_porte_timer_event(struct work_struct *work) |
222 | { | 228 | { |
223 | struct asd_sas_phy *phy = data; | 229 | struct asd_sas_event *ev = |
230 | container_of(work, struct asd_sas_event, work); | ||
231 | struct asd_sas_phy *phy = ev->phy; | ||
224 | 232 | ||
225 | sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, | 233 | sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, |
226 | &phy->port_events_pending); | 234 | &phy->port_events_pending); |
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data) | |||
228 | sas_deform_port(phy); | 236 | sas_deform_port(phy); |
229 | } | 237 | } |
230 | 238 | ||
231 | void sas_porte_hard_reset(void *data) | 239 | void sas_porte_hard_reset(struct work_struct *work) |
232 | { | 240 | { |
233 | struct asd_sas_phy *phy = data; | 241 | struct asd_sas_event *ev = |
242 | container_of(work, struct asd_sas_event, work); | ||
243 | struct asd_sas_phy *phy = ev->phy; | ||
234 | 244 | ||
235 | sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, | 245 | sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, |
236 | &phy->port_events_pending); | 246 | &phy->port_events_pending); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index e064aac06b9..22672d54aa2 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -846,8 +846,10 @@ static int do_sas_task_abort(struct sas_task *task) | |||
846 | return -EAGAIN; | 846 | return -EAGAIN; |
847 | } | 847 | } |
848 | 848 | ||
849 | void sas_task_abort(struct sas_task *task) | 849 | void sas_task_abort(struct work_struct *work) |
850 | { | 850 | { |
851 | struct sas_task *task = | ||
852 | container_of(work, struct sas_task, abort_work); | ||
851 | int i; | 853 | int i; |
852 | 854 | ||
853 | for (i = 0; i < 5; i++) | 855 | for (i = 0; i < 5; i++) |
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 89a2a9f11e4..584ba4d6e03 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c | |||
@@ -31,7 +31,7 @@ typedef struct { | |||
31 | int base; /* Actual port address */ | 31 | int base; /* Actual port address */ |
32 | int mode; /* Transfer mode */ | 32 | int mode; /* Transfer mode */ |
33 | struct scsi_cmnd *cur_cmd; /* Current queued command */ | 33 | struct scsi_cmnd *cur_cmd; /* Current queued command */ |
34 | struct work_struct ppa_tq; /* Polling interrupt stuff */ | 34 | struct delayed_work ppa_tq; /* Polling interrupt stuff */ |
35 | unsigned long jstart; /* Jiffies at start */ | 35 | unsigned long jstart; /* Jiffies at start */ |
36 | unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ | 36 | unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ |
37 | unsigned int failed:1; /* Failure flag */ | 37 | unsigned int failed:1; /* Failure flag */ |
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd) | |||
627 | * the scheduler's task queue to generate a stream of call-backs and | 627 | * the scheduler's task queue to generate a stream of call-backs and |
628 | * complete the request when the drive is ready. | 628 | * complete the request when the drive is ready. |
629 | */ | 629 | */ |
630 | static void ppa_interrupt(void *data) | 630 | static void ppa_interrupt(struct work_struct *work) |
631 | { | 631 | { |
632 | ppa_struct *dev = (ppa_struct *) data; | 632 | ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); |
633 | struct scsi_cmnd *cmd = dev->cur_cmd; | 633 | struct scsi_cmnd *cmd = dev->cur_cmd; |
634 | 634 | ||
635 | if (!cmd) { | 635 | if (!cmd) { |
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data) | |||
637 | return; | 637 | return; |
638 | } | 638 | } |
639 | if (ppa_engine(dev, cmd)) { | 639 | if (ppa_engine(dev, cmd)) { |
640 | dev->ppa_tq.data = (void *) dev; | ||
641 | schedule_delayed_work(&dev->ppa_tq, 1); | 640 | schedule_delayed_work(&dev->ppa_tq, 1); |
642 | return; | 641 | return; |
643 | } | 642 | } |
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, | |||
822 | cmd->result = DID_ERROR << 16; /* default return code */ | 821 | cmd->result = DID_ERROR << 16; /* default return code */ |
823 | cmd->SCp.phase = 0; /* bus free */ | 822 | cmd->SCp.phase = 0; /* bus free */ |
824 | 823 | ||
825 | dev->ppa_tq.data = dev; | 824 | schedule_delayed_work(&dev->ppa_tq, 0); |
826 | schedule_work(&dev->ppa_tq); | ||
827 | 825 | ||
828 | ppa_pb_claim(dev); | 826 | ppa_pb_claim(dev); |
829 | 827 | ||
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb) | |||
1086 | else | 1084 | else |
1087 | ports = 8; | 1085 | ports = 8; |
1088 | 1086 | ||
1089 | INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); | 1087 | INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); |
1090 | 1088 | ||
1091 | err = -ENOMEM; | 1089 | err = -ENOMEM; |
1092 | host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); | 1090 | host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index db9d88e7bee..969c9e43102 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -961,9 +961,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
961 | * the mid-level tries to sleep when it reaches the driver threshold | 961 | * the mid-level tries to sleep when it reaches the driver threshold |
962 | * "host->can_queue". This can cause a panic if we were in our interrupt code. | 962 | * "host->can_queue". This can cause a panic if we were in our interrupt code. |
963 | **/ | 963 | **/ |
964 | static void qla4xxx_do_dpc(void *data) | 964 | static void qla4xxx_do_dpc(struct work_struct *work) |
965 | { | 965 | { |
966 | struct scsi_qla_host *ha = (struct scsi_qla_host *) data; | 966 | struct scsi_qla_host *ha = |
967 | container_of(work, struct scsi_qla_host, dpc_work); | ||
967 | struct ddb_entry *ddb_entry, *dtemp; | 968 | struct ddb_entry *ddb_entry, *dtemp; |
968 | 969 | ||
969 | DEBUG2(printk("scsi%ld: %s: DPC handler waking up." | 970 | DEBUG2(printk("scsi%ld: %s: DPC handler waking up." |
@@ -1253,7 +1254,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1253 | ret = -ENODEV; | 1254 | ret = -ENODEV; |
1254 | goto probe_failed; | 1255 | goto probe_failed; |
1255 | } | 1256 | } |
1256 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); | 1257 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); |
1257 | 1258 | ||
1258 | ret = request_irq(pdev->irq, qla4xxx_intr_handler, | 1259 | ret = request_irq(pdev->irq, qla4xxx_intr_handler, |
1259 | SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); | 1260 | SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 4d656148bd6..14e635aa44c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -437,9 +437,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, | |||
437 | goto retry; | 437 | goto retry; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void scsi_target_reap_usercontext(void *data) | 440 | static void scsi_target_reap_usercontext(struct work_struct *work) |
441 | { | 441 | { |
442 | struct scsi_target *starget = data; | 442 | struct scsi_target *starget = |
443 | container_of(work, struct scsi_target, ew.work); | ||
443 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 444 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
444 | unsigned long flags; | 445 | unsigned long flags; |
445 | 446 | ||
@@ -475,7 +476,7 @@ void scsi_target_reap(struct scsi_target *starget) | |||
475 | starget->state = STARGET_DEL; | 476 | starget->state = STARGET_DEL; |
476 | spin_unlock_irqrestore(shost->host_lock, flags); | 477 | spin_unlock_irqrestore(shost->host_lock, flags); |
477 | execute_in_process_context(scsi_target_reap_usercontext, | 478 | execute_in_process_context(scsi_target_reap_usercontext, |
478 | starget, &starget->ew); | 479 | &starget->ew); |
479 | return; | 480 | return; |
480 | 481 | ||
481 | } | 482 | } |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index e1a91665d1c..259c90cfa36 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev) | |||
218 | put_device(&sdev->sdev_gendev); | 218 | put_device(&sdev->sdev_gendev); |
219 | } | 219 | } |
220 | 220 | ||
221 | static void scsi_device_dev_release_usercontext(void *data) | 221 | static void scsi_device_dev_release_usercontext(struct work_struct *work) |
222 | { | 222 | { |
223 | struct device *dev = data; | ||
224 | struct scsi_device *sdev; | 223 | struct scsi_device *sdev; |
225 | struct device *parent; | 224 | struct device *parent; |
226 | struct scsi_target *starget; | 225 | struct scsi_target *starget; |
227 | unsigned long flags; | 226 | unsigned long flags; |
228 | 227 | ||
229 | parent = dev->parent; | 228 | sdev = container_of(work, struct scsi_device, ew.work); |
230 | sdev = to_scsi_device(dev); | 229 | |
230 | parent = sdev->sdev_gendev.parent; | ||
231 | starget = to_scsi_target(parent); | 231 | starget = to_scsi_target(parent); |
232 | 232 | ||
233 | spin_lock_irqsave(sdev->host->host_lock, flags); | 233 | spin_lock_irqsave(sdev->host->host_lock, flags); |
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data) | |||
258 | static void scsi_device_dev_release(struct device *dev) | 258 | static void scsi_device_dev_release(struct device *dev) |
259 | { | 259 | { |
260 | struct scsi_device *sdp = to_scsi_device(dev); | 260 | struct scsi_device *sdp = to_scsi_device(dev); |
261 | execute_in_process_context(scsi_device_dev_release_usercontext, dev, | 261 | execute_in_process_context(scsi_device_dev_release_usercontext, |
262 | &sdp->ew); | 262 | &sdp->ew); |
263 | } | 263 | } |
264 | 264 | ||
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 39da5cd6fb6..386dbae17b4 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -185,10 +185,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd) | |||
185 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | 185 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void scsi_tgt_cmd_destroy(void *data) | 188 | static void scsi_tgt_cmd_destroy(struct work_struct *work) |
189 | { | 189 | { |
190 | struct scsi_cmnd *cmd = data; | 190 | struct scsi_tgt_cmd *tcmd = |
191 | struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; | 191 | container_of(work, struct scsi_tgt_cmd, work); |
192 | struct scsi_cmnd *cmd = tcmd->rq->special; | ||
192 | 193 | ||
193 | dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, | 194 | dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, |
194 | rq_data_dir(cmd->request)); | 195 | rq_data_dir(cmd->request)); |
@@ -214,6 +215,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd, | |||
214 | struct list_head *head; | 215 | struct list_head *head; |
215 | 216 | ||
216 | tcmd->tag = tag; | 217 | tcmd->tag = tag; |
218 | INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); | ||
217 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | 219 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); |
218 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; | 220 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; |
219 | list_add(&tcmd->hash_list, head); | 221 | list_add(&tcmd->hash_list, head); |
@@ -303,7 +305,7 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost) | |||
303 | cmd = tcmd->rq->special; | 305 | cmd = tcmd->rq->special; |
304 | 306 | ||
305 | shost->hostt->eh_abort_handler(cmd); | 307 | shost->hostt->eh_abort_handler(cmd); |
306 | scsi_tgt_cmd_destroy(cmd); | 308 | scsi_tgt_cmd_destroy(&tcmd->work); |
307 | } | 309 | } |
308 | } | 310 | } |
309 | EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); | 311 | EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); |
@@ -347,7 +349,6 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) | |||
347 | dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); | 349 | dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); |
348 | 350 | ||
349 | scsi_tgt_uspace_send_status(cmd, tcmd->tag); | 351 | scsi_tgt_uspace_send_status(cmd, tcmd->tag); |
350 | INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd); | ||
351 | queue_work(scsi_tgtd, &tcmd->work); | 352 | queue_work(scsi_tgtd, &tcmd->work); |
352 | } | 353 | } |
353 | 354 | ||
@@ -549,13 +550,15 @@ static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, | |||
549 | 550 | ||
550 | static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | 551 | static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
551 | { | 552 | { |
553 | struct scsi_tgt_cmd *tcmd; | ||
552 | int err; | 554 | int err; |
553 | 555 | ||
554 | err = shost->hostt->eh_abort_handler(cmd); | 556 | err = shost->hostt->eh_abort_handler(cmd); |
555 | if (err) | 557 | if (err) |
556 | eprintk("fail to abort %p\n", cmd); | 558 | eprintk("fail to abort %p\n", cmd); |
557 | 559 | ||
558 | scsi_tgt_cmd_destroy(cmd); | 560 | tcmd = cmd->request->end_io_data; |
561 | scsi_tgt_cmd_destroy(&tcmd->work); | ||
559 | return err; | 562 | return err; |
560 | } | 563 | } |
561 | 564 | ||
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 38c215a78f6..3571ce8934e 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) | |||
241 | #define FC_MGMTSRVR_PORTID 0x00000a | 241 | #define FC_MGMTSRVR_PORTID 0x00000a |
242 | 242 | ||
243 | 243 | ||
244 | static void fc_timeout_deleted_rport(void *data); | 244 | static void fc_timeout_deleted_rport(struct work_struct *work); |
245 | static void fc_timeout_fail_rport_io(void *data); | 245 | static void fc_timeout_fail_rport_io(struct work_struct *work); |
246 | static void fc_scsi_scan_rport(void *data); | 246 | static void fc_scsi_scan_rport(struct work_struct *work); |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * Attribute counts pre object type... | 249 | * Attribute counts pre object type... |
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost) | |||
1613 | * 1 on success / 0 already queued / < 0 for error | 1613 | * 1 on success / 0 already queued / < 0 for error |
1614 | **/ | 1614 | **/ |
1615 | static int | 1615 | static int |
1616 | fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, | 1616 | fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, |
1617 | unsigned long delay) | 1617 | unsigned long delay) |
1618 | { | 1618 | { |
1619 | if (unlikely(!fc_host_devloss_work_q(shost))) { | 1619 | if (unlikely(!fc_host_devloss_work_q(shost))) { |
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, | |||
1625 | return -EINVAL; | 1625 | return -EINVAL; |
1626 | } | 1626 | } |
1627 | 1627 | ||
1628 | if (delay == 0) | ||
1629 | return queue_work(fc_host_devloss_work_q(shost), work); | ||
1630 | |||
1631 | return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); | 1628 | return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); |
1632 | } | 1629 | } |
1633 | 1630 | ||
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host); | |||
1712 | * fc_starget_delete - called to delete the scsi decendents of an rport | 1709 | * fc_starget_delete - called to delete the scsi decendents of an rport |
1713 | * (target and all sdevs) | 1710 | * (target and all sdevs) |
1714 | * | 1711 | * |
1715 | * @data: remote port to be operated on. | 1712 | * @work: remote port to be operated on. |
1716 | **/ | 1713 | **/ |
1717 | static void | 1714 | static void |
1718 | fc_starget_delete(void *data) | 1715 | fc_starget_delete(struct work_struct *work) |
1719 | { | 1716 | { |
1720 | struct fc_rport *rport = (struct fc_rport *)data; | 1717 | struct fc_rport *rport = |
1718 | container_of(work, struct fc_rport, stgt_delete_work); | ||
1721 | struct Scsi_Host *shost = rport_to_shost(rport); | 1719 | struct Scsi_Host *shost = rport_to_shost(rport); |
1722 | unsigned long flags; | 1720 | unsigned long flags; |
1723 | struct fc_internal *i = to_fc_internal(shost->transportt); | 1721 | struct fc_internal *i = to_fc_internal(shost->transportt); |
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data) | |||
1751 | /** | 1749 | /** |
1752 | * fc_rport_final_delete - finish rport termination and delete it. | 1750 | * fc_rport_final_delete - finish rport termination and delete it. |
1753 | * | 1751 | * |
1754 | * @data: remote port to be deleted. | 1752 | * @work: remote port to be deleted. |
1755 | **/ | 1753 | **/ |
1756 | static void | 1754 | static void |
1757 | fc_rport_final_delete(void *data) | 1755 | fc_rport_final_delete(struct work_struct *work) |
1758 | { | 1756 | { |
1759 | struct fc_rport *rport = (struct fc_rport *)data; | 1757 | struct fc_rport *rport = |
1758 | container_of(work, struct fc_rport, rport_delete_work); | ||
1760 | struct device *dev = &rport->dev; | 1759 | struct device *dev = &rport->dev; |
1761 | struct Scsi_Host *shost = rport_to_shost(rport); | 1760 | struct Scsi_Host *shost = rport_to_shost(rport); |
1762 | struct fc_internal *i = to_fc_internal(shost->transportt); | 1761 | struct fc_internal *i = to_fc_internal(shost->transportt); |
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data) | |||
1770 | 1769 | ||
1771 | /* Delete SCSI target and sdevs */ | 1770 | /* Delete SCSI target and sdevs */ |
1772 | if (rport->scsi_target_id != -1) | 1771 | if (rport->scsi_target_id != -1) |
1773 | fc_starget_delete(data); | 1772 | fc_starget_delete(&rport->stgt_delete_work); |
1774 | else if (i->f->dev_loss_tmo_callbk) | 1773 | else if (i->f->dev_loss_tmo_callbk) |
1775 | i->f->dev_loss_tmo_callbk(rport); | 1774 | i->f->dev_loss_tmo_callbk(rport); |
1776 | else if (i->f->terminate_rport_io) | 1775 | else if (i->f->terminate_rport_io) |
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel, | |||
1829 | rport->channel = channel; | 1828 | rport->channel = channel; |
1830 | rport->fast_io_fail_tmo = -1; | 1829 | rport->fast_io_fail_tmo = -1; |
1831 | 1830 | ||
1832 | INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); | 1831 | INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); |
1833 | INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); | 1832 | INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); |
1834 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); | 1833 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); |
1835 | INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); | 1834 | INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); |
1836 | INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); | 1835 | INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); |
1837 | 1836 | ||
1838 | spin_lock_irqsave(shost->host_lock, flags); | 1837 | spin_lock_irqsave(shost->host_lock, flags); |
1839 | 1838 | ||
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
1963 | } | 1962 | } |
1964 | 1963 | ||
1965 | if (match) { | 1964 | if (match) { |
1966 | struct work_struct *work = | 1965 | struct delayed_work *work = |
1967 | &rport->dev_loss_work; | 1966 | &rport->dev_loss_work; |
1968 | 1967 | ||
1969 | memcpy(&rport->node_name, &ids->node_name, | 1968 | memcpy(&rport->node_name, &ids->node_name, |
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg); | |||
2267 | * was a SCSI target (thus was blocked), and failed | 2266 | * was a SCSI target (thus was blocked), and failed |
2268 | * to return in the alloted time. | 2267 | * to return in the alloted time. |
2269 | * | 2268 | * |
2270 | * @data: rport target that failed to reappear in the alloted time. | 2269 | * @work: rport target that failed to reappear in the alloted time. |
2271 | **/ | 2270 | **/ |
2272 | static void | 2271 | static void |
2273 | fc_timeout_deleted_rport(void *data) | 2272 | fc_timeout_deleted_rport(struct work_struct *work) |
2274 | { | 2273 | { |
2275 | struct fc_rport *rport = (struct fc_rport *)data; | 2274 | struct fc_rport *rport = |
2275 | container_of(work, struct fc_rport, dev_loss_work.work); | ||
2276 | struct Scsi_Host *shost = rport_to_shost(rport); | 2276 | struct Scsi_Host *shost = rport_to_shost(rport); |
2277 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | 2277 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); |
2278 | unsigned long flags; | 2278 | unsigned long flags; |
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void *data) | |||
2366 | * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a | 2366 | * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a |
2367 | * disconnected SCSI target. | 2367 | * disconnected SCSI target. |
2368 | * | 2368 | * |
2369 | * @data: rport to terminate io on. | 2369 | * @work: rport to terminate io on. |
2370 | * | 2370 | * |
2371 | * Notes: Only requests the failure of the io, not that all are flushed | 2371 | * Notes: Only requests the failure of the io, not that all are flushed |
2372 | * prior to returning. | 2372 | * prior to returning. |
2373 | **/ | 2373 | **/ |
2374 | static void | 2374 | static void |
2375 | fc_timeout_fail_rport_io(void *data) | 2375 | fc_timeout_fail_rport_io(struct work_struct *work) |
2376 | { | 2376 | { |
2377 | struct fc_rport *rport = (struct fc_rport *)data; | 2377 | struct fc_rport *rport = |
2378 | container_of(work, struct fc_rport, fail_io_work.work); | ||
2378 | struct Scsi_Host *shost = rport_to_shost(rport); | 2379 | struct Scsi_Host *shost = rport_to_shost(rport); |
2379 | struct fc_internal *i = to_fc_internal(shost->transportt); | 2380 | struct fc_internal *i = to_fc_internal(shost->transportt); |
2380 | 2381 | ||
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void *data) | |||
2387 | /** | 2388 | /** |
2388 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. | 2389 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. |
2389 | * | 2390 | * |
2390 | * @data: remote port to be scanned. | 2391 | * @work: remote port to be scanned. |
2391 | **/ | 2392 | **/ |
2392 | static void | 2393 | static void |
2393 | fc_scsi_scan_rport(void *data) | 2394 | fc_scsi_scan_rport(struct work_struct *work) |
2394 | { | 2395 | { |
2395 | struct fc_rport *rport = (struct fc_rport *)data; | 2396 | struct fc_rport *rport = |
2397 | container_of(work, struct fc_rport, scan_work); | ||
2396 | struct Scsi_Host *shost = rport_to_shost(rport); | 2398 | struct Scsi_Host *shost = rport_to_shost(rport); |
2397 | unsigned long flags; | 2399 | unsigned long flags; |
2398 | 2400 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9b25124a989..9c22f134271 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, | |||
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void session_recovery_timedout(void *data) | 237 | static void session_recovery_timedout(struct work_struct *work) |
238 | { | 238 | { |
239 | struct iscsi_cls_session *session = data; | 239 | struct iscsi_cls_session *session = |
240 | container_of(work, struct iscsi_cls_session, | ||
241 | recovery_work.work); | ||
240 | 242 | ||
241 | dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " | 243 | dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " |
242 | "out after %d secs\n", session->recovery_tmo); | 244 | "out after %d secs\n", session->recovery_tmo); |
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, | |||
276 | 278 | ||
277 | session->transport = transport; | 279 | session->transport = transport; |
278 | session->recovery_tmo = 120; | 280 | session->recovery_tmo = 120; |
279 | INIT_WORK(&session->recovery_work, session_recovery_timedout, session); | 281 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); |
280 | INIT_LIST_HEAD(&session->host_list); | 282 | INIT_LIST_HEAD(&session->host_list); |
281 | INIT_LIST_HEAD(&session->sess_list); | 283 | INIT_LIST_HEAD(&session->sess_list); |
282 | 284 | ||
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 9f070f0d0f2..3fded483146 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -964,9 +964,10 @@ struct work_queue_wrapper { | |||
964 | }; | 964 | }; |
965 | 965 | ||
966 | static void | 966 | static void |
967 | spi_dv_device_work_wrapper(void *data) | 967 | spi_dv_device_work_wrapper(struct work_struct *work) |
968 | { | 968 | { |
969 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 969 | struct work_queue_wrapper *wqw = |
970 | container_of(work, struct work_queue_wrapper, work); | ||
970 | struct scsi_device *sdev = wqw->sdev; | 971 | struct scsi_device *sdev = wqw->sdev; |
971 | 972 | ||
972 | kfree(wqw); | 973 | kfree(wqw); |
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev) | |||
1006 | return; | 1007 | return; |
1007 | } | 1008 | } |
1008 | 1009 | ||
1009 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); | 1010 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); |
1010 | wqw->sdev = sdev; | 1011 | wqw->sdev = sdev; |
1011 | 1012 | ||
1012 | schedule_work(&wqw->work); | 1013 | schedule_work(&wqw->work); |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 72025df5561..494d9b85648 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -148,7 +148,7 @@ struct chip_data { | |||
148 | void (*cs_control)(u32 command); | 148 | void (*cs_control)(u32 command); |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static void pump_messages(void *data); | 151 | static void pump_messages(struct work_struct *work); |
152 | 152 | ||
153 | static int flush(struct driver_data *drv_data) | 153 | static int flush(struct driver_data *drv_data) |
154 | { | 154 | { |
@@ -884,9 +884,10 @@ static void pump_transfers(unsigned long data) | |||
884 | } | 884 | } |
885 | } | 885 | } |
886 | 886 | ||
887 | static void pump_messages(void *data) | 887 | static void pump_messages(struct work_struct *work) |
888 | { | 888 | { |
889 | struct driver_data *drv_data = data; | 889 | struct driver_data *drv_data = |
890 | container_of(work, struct driver_data, pump_messages); | ||
890 | unsigned long flags; | 891 | unsigned long flags; |
891 | 892 | ||
892 | /* Lock queue and check for queue work */ | 893 | /* Lock queue and check for queue work */ |
@@ -1098,7 +1099,7 @@ static int init_queue(struct driver_data *drv_data) | |||
1098 | tasklet_init(&drv_data->pump_transfers, | 1099 | tasklet_init(&drv_data->pump_transfers, |
1099 | pump_transfers, (unsigned long)drv_data); | 1100 | pump_transfers, (unsigned long)drv_data); |
1100 | 1101 | ||
1101 | INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); | 1102 | INIT_WORK(&drv_data->pump_messages, pump_messages); |
1102 | drv_data->workqueue = create_singlethread_workqueue( | 1103 | drv_data->workqueue = create_singlethread_workqueue( |
1103 | drv_data->master->cdev.dev->bus_id); | 1104 | drv_data->master->cdev.dev->bus_id); |
1104 | if (drv_data->workqueue == NULL) | 1105 | if (drv_data->workqueue == NULL) |
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index a23862ef72b..08c1c57c612 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -265,9 +265,10 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
265 | * Drivers can provide word-at-a-time i/o primitives, or provide | 265 | * Drivers can provide word-at-a-time i/o primitives, or provide |
266 | * transfer-at-a-time ones to leverage dma or fifo hardware. | 266 | * transfer-at-a-time ones to leverage dma or fifo hardware. |
267 | */ | 267 | */ |
268 | static void bitbang_work(void *_bitbang) | 268 | static void bitbang_work(struct work_struct *work) |
269 | { | 269 | { |
270 | struct spi_bitbang *bitbang = _bitbang; | 270 | struct spi_bitbang *bitbang = |
271 | container_of(work, struct spi_bitbang, work); | ||
271 | unsigned long flags; | 272 | unsigned long flags; |
272 | 273 | ||
273 | spin_lock_irqsave(&bitbang->lock, flags); | 274 | spin_lock_irqsave(&bitbang->lock, flags); |
@@ -456,7 +457,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
456 | if (!bitbang->master || !bitbang->chipselect) | 457 | if (!bitbang->master || !bitbang->chipselect) |
457 | return -EINVAL; | 458 | return -EINVAL; |
458 | 459 | ||
459 | INIT_WORK(&bitbang->work, bitbang_work, bitbang); | 460 | INIT_WORK(&bitbang->work, bitbang_work); |
460 | spin_lock_init(&bitbang->lock); | 461 | spin_lock_init(&bitbang->lock); |
461 | INIT_LIST_HEAD(&bitbang->queue); | 462 | INIT_LIST_HEAD(&bitbang->queue); |
462 | 463 | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index e6565633ba0..3dfa3e40e14 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -158,7 +158,7 @@ struct cxacru_data { | |||
158 | const struct cxacru_modem_type *modem_type; | 158 | const struct cxacru_modem_type *modem_type; |
159 | 159 | ||
160 | int line_status; | 160 | int line_status; |
161 | struct work_struct poll_work; | 161 | struct delayed_work poll_work; |
162 | 162 | ||
163 | /* contol handles */ | 163 | /* contol handles */ |
164 | struct mutex cm_serialize; | 164 | struct mutex cm_serialize; |
@@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance) | |||
347 | return 0; | 347 | return 0; |
348 | } | 348 | } |
349 | 349 | ||
350 | static void cxacru_poll_status(struct cxacru_data *instance); | 350 | static void cxacru_poll_status(struct work_struct *work); |
351 | 351 | ||
352 | static int cxacru_atm_start(struct usbatm_data *usbatm_instance, | 352 | static int cxacru_atm_start(struct usbatm_data *usbatm_instance, |
353 | struct atm_dev *atm_dev) | 353 | struct atm_dev *atm_dev) |
@@ -376,12 +376,14 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance, | |||
376 | } | 376 | } |
377 | 377 | ||
378 | /* Start status polling */ | 378 | /* Start status polling */ |
379 | cxacru_poll_status(instance); | 379 | cxacru_poll_status(&instance->poll_work.work); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
383 | static void cxacru_poll_status(struct cxacru_data *instance) | 383 | static void cxacru_poll_status(struct work_struct *work) |
384 | { | 384 | { |
385 | struct cxacru_data *instance = | ||
386 | container_of(work, struct cxacru_data, poll_work.work); | ||
385 | u32 buf[CXINF_MAX] = {}; | 387 | u32 buf[CXINF_MAX] = {}; |
386 | struct usbatm_data *usbatm = instance->usbatm; | 388 | struct usbatm_data *usbatm = instance->usbatm; |
387 | struct atm_dev *atm_dev = usbatm->atm_dev; | 389 | struct atm_dev *atm_dev = usbatm->atm_dev; |
@@ -720,7 +722,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
720 | 722 | ||
721 | mutex_init(&instance->cm_serialize); | 723 | mutex_init(&instance->cm_serialize); |
722 | 724 | ||
723 | INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); | 725 | INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); |
724 | 726 | ||
725 | usbatm_instance->driver_data = instance; | 727 | usbatm_instance->driver_data = instance; |
726 | 728 | ||
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c index a823486495c..8ed6c75adf0 100644 --- a/drivers/usb/atm/speedtch.c +++ b/drivers/usb/atm/speedtch.c | |||
@@ -142,7 +142,7 @@ struct speedtch_instance_data { | |||
142 | 142 | ||
143 | struct speedtch_params params; /* set in probe, constant afterwards */ | 143 | struct speedtch_params params; /* set in probe, constant afterwards */ |
144 | 144 | ||
145 | struct work_struct status_checker; | 145 | struct delayed_work status_checker; |
146 | 146 | ||
147 | unsigned char last_status; | 147 | unsigned char last_status; |
148 | 148 | ||
@@ -498,8 +498,11 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance) | |||
498 | return ret; | 498 | return ret; |
499 | } | 499 | } |
500 | 500 | ||
501 | static void speedtch_check_status(struct speedtch_instance_data *instance) | 501 | static void speedtch_check_status(struct work_struct *work) |
502 | { | 502 | { |
503 | struct speedtch_instance_data *instance = | ||
504 | container_of(work, struct speedtch_instance_data, | ||
505 | status_checker.work); | ||
503 | struct usbatm_data *usbatm = instance->usbatm; | 506 | struct usbatm_data *usbatm = instance->usbatm; |
504 | struct atm_dev *atm_dev = usbatm->atm_dev; | 507 | struct atm_dev *atm_dev = usbatm->atm_dev; |
505 | unsigned char *buf = instance->scratch_buffer; | 508 | unsigned char *buf = instance->scratch_buffer; |
@@ -576,7 +579,7 @@ static void speedtch_status_poll(unsigned long data) | |||
576 | { | 579 | { |
577 | struct speedtch_instance_data *instance = (void *)data; | 580 | struct speedtch_instance_data *instance = (void *)data; |
578 | 581 | ||
579 | schedule_work(&instance->status_checker); | 582 | schedule_delayed_work(&instance->status_checker, 0); |
580 | 583 | ||
581 | /* The following check is racy, but the race is harmless */ | 584 | /* The following check is racy, but the race is harmless */ |
582 | if (instance->poll_delay < MAX_POLL_DELAY) | 585 | if (instance->poll_delay < MAX_POLL_DELAY) |
@@ -596,7 +599,7 @@ static void speedtch_resubmit_int(unsigned long data) | |||
596 | if (int_urb) { | 599 | if (int_urb) { |
597 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); | 600 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); |
598 | if (!ret) | 601 | if (!ret) |
599 | schedule_work(&instance->status_checker); | 602 | schedule_delayed_work(&instance->status_checker, 0); |
600 | else { | 603 | else { |
601 | atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); | 604 | atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); |
602 | mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); | 605 | mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); |
@@ -640,7 +643,7 @@ static void speedtch_handle_int(struct urb *int_urb) | |||
640 | 643 | ||
641 | if ((int_urb = instance->int_urb)) { | 644 | if ((int_urb = instance->int_urb)) { |
642 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); | 645 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); |
643 | schedule_work(&instance->status_checker); | 646 | schedule_delayed_work(&instance->status_checker, 0); |
644 | if (ret < 0) { | 647 | if (ret < 0) { |
645 | atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); | 648 | atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); |
646 | goto fail; | 649 | goto fail; |
@@ -855,7 +858,7 @@ static int speedtch_bind(struct usbatm_data *usbatm, | |||
855 | 858 | ||
856 | usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); | 859 | usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); |
857 | 860 | ||
858 | INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); | 861 | INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status); |
859 | 862 | ||
860 | instance->status_checker.timer.function = speedtch_status_poll; | 863 | instance->status_checker.timer.function = speedtch_status_poll; |
861 | instance->status_checker.timer.data = (unsigned long)instance; | 864 | instance->status_checker.timer.data = (unsigned long)instance; |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index c137c041f7a..f2d196fa1e8 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -655,9 +655,9 @@ static int request_dsp(struct uea_softc *sc) | |||
655 | /* | 655 | /* |
656 | * The uea_load_page() function must be called within a process context | 656 | * The uea_load_page() function must be called within a process context |
657 | */ | 657 | */ |
658 | static void uea_load_page(void *xsc) | 658 | static void uea_load_page(struct work_struct *work) |
659 | { | 659 | { |
660 | struct uea_softc *sc = xsc; | 660 | struct uea_softc *sc = container_of(work, struct uea_softc, task); |
661 | u16 pageno = sc->pageno; | 661 | u16 pageno = sc->pageno; |
662 | u16 ovl = sc->ovl; | 662 | u16 ovl = sc->ovl; |
663 | struct block_info bi; | 663 | struct block_info bi; |
@@ -1348,7 +1348,7 @@ static int uea_boot(struct uea_softc *sc) | |||
1348 | 1348 | ||
1349 | uea_enters(INS_TO_USBDEV(sc)); | 1349 | uea_enters(INS_TO_USBDEV(sc)); |
1350 | 1350 | ||
1351 | INIT_WORK(&sc->task, uea_load_page, sc); | 1351 | INIT_WORK(&sc->task, uea_load_page); |
1352 | init_waitqueue_head(&sc->sync_q); | 1352 | init_waitqueue_head(&sc->sync_q); |
1353 | init_waitqueue_head(&sc->cmv_ack_wait); | 1353 | init_waitqueue_head(&sc->cmv_ack_wait); |
1354 | 1354 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ec3438dc8ee..7f1fa956dcd 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb) | |||
421 | schedule_work(&acm->work); | 421 | schedule_work(&acm->work); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void acm_softint(void *private) | 424 | static void acm_softint(struct work_struct *work) |
425 | { | 425 | { |
426 | struct acm *acm = private; | 426 | struct acm *acm = container_of(work, struct acm, work); |
427 | dbg("Entering acm_softint."); | 427 | dbg("Entering acm_softint."); |
428 | 428 | ||
429 | if (!ACM_READY(acm)) | 429 | if (!ACM_READY(acm)) |
@@ -927,7 +927,7 @@ skip_normal_probe: | |||
927 | acm->rx_buflimit = num_rx_buf; | 927 | acm->rx_buflimit = num_rx_buf; |
928 | acm->urb_task.func = acm_rx_tasklet; | 928 | acm->urb_task.func = acm_rx_tasklet; |
929 | acm->urb_task.data = (unsigned long) acm; | 929 | acm->urb_task.data = (unsigned long) acm; |
930 | INIT_WORK(&acm->work, acm_softint, acm); | 930 | INIT_WORK(&acm->work, acm_softint); |
931 | spin_lock_init(&acm->throttle_lock); | 931 | spin_lock_init(&acm->throttle_lock); |
932 | spin_lock_init(&acm->write_lock); | 932 | spin_lock_init(&acm->write_lock); |
933 | spin_lock_init(&acm->read_lock); | 933 | spin_lock_init(&acm->read_lock); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 0ce393eb3c4..9be41ed1f9a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -68,7 +68,7 @@ struct usb_hub { | |||
68 | 68 | ||
69 | unsigned has_indicators:1; | 69 | unsigned has_indicators:1; |
70 | u8 indicator[USB_MAXCHILDREN]; | 70 | u8 indicator[USB_MAXCHILDREN]; |
71 | struct work_struct leds; | 71 | struct delayed_work leds; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | 74 | ||
@@ -218,9 +218,10 @@ static void set_port_led( | |||
218 | 218 | ||
219 | #define LED_CYCLE_PERIOD ((2*HZ)/3) | 219 | #define LED_CYCLE_PERIOD ((2*HZ)/3) |
220 | 220 | ||
221 | static void led_work (void *__hub) | 221 | static void led_work (struct work_struct *work) |
222 | { | 222 | { |
223 | struct usb_hub *hub = __hub; | 223 | struct usb_hub *hub = |
224 | container_of(work, struct usb_hub, leds.work); | ||
224 | struct usb_device *hdev = hub->hdev; | 225 | struct usb_device *hdev = hub->hdev; |
225 | unsigned i; | 226 | unsigned i; |
226 | unsigned changed = 0; | 227 | unsigned changed = 0; |
@@ -405,9 +406,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) | |||
405 | * talking to TTs must queue control transfers (not just bulk and iso), so | 406 | * talking to TTs must queue control transfers (not just bulk and iso), so |
406 | * both can talk to the same hub concurrently. | 407 | * both can talk to the same hub concurrently. |
407 | */ | 408 | */ |
408 | static void hub_tt_kevent (void *arg) | 409 | static void hub_tt_kevent (struct work_struct *work) |
409 | { | 410 | { |
410 | struct usb_hub *hub = arg; | 411 | struct usb_hub *hub = |
412 | container_of(work, struct usb_hub, tt.kevent); | ||
411 | unsigned long flags; | 413 | unsigned long flags; |
412 | 414 | ||
413 | spin_lock_irqsave (&hub->tt.lock, flags); | 415 | spin_lock_irqsave (&hub->tt.lock, flags); |
@@ -694,7 +696,7 @@ static int hub_configure(struct usb_hub *hub, | |||
694 | 696 | ||
695 | spin_lock_init (&hub->tt.lock); | 697 | spin_lock_init (&hub->tt.lock); |
696 | INIT_LIST_HEAD (&hub->tt.clear_list); | 698 | INIT_LIST_HEAD (&hub->tt.clear_list); |
697 | INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); | 699 | INIT_WORK (&hub->tt.kevent, hub_tt_kevent); |
698 | switch (hdev->descriptor.bDeviceProtocol) { | 700 | switch (hdev->descriptor.bDeviceProtocol) { |
699 | case 0: | 701 | case 0: |
700 | break; | 702 | break; |
@@ -938,7 +940,7 @@ descriptor_error: | |||
938 | INIT_LIST_HEAD(&hub->event_list); | 940 | INIT_LIST_HEAD(&hub->event_list); |
939 | hub->intfdev = &intf->dev; | 941 | hub->intfdev = &intf->dev; |
940 | hub->hdev = hdev; | 942 | hub->hdev = hdev; |
941 | INIT_WORK(&hub->leds, led_work, hub); | 943 | INIT_DELAYED_WORK(&hub->leds, led_work); |
942 | 944 | ||
943 | usb_set_intfdata (intf, hub); | 945 | usb_set_intfdata (intf, hub); |
944 | intf->needs_remote_wakeup = 1; | 946 | intf->needs_remote_wakeup = 1; |
@@ -2381,7 +2383,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) | |||
2381 | /* hub LEDs are probably harder to miss than syslog */ | 2383 | /* hub LEDs are probably harder to miss than syslog */ |
2382 | if (hub->has_indicators) { | 2384 | if (hub->has_indicators) { |
2383 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; | 2385 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; |
2384 | schedule_work (&hub->leds); | 2386 | schedule_delayed_work (&hub->leds, 0); |
2385 | } | 2387 | } |
2386 | } | 2388 | } |
2387 | kfree(qual); | 2389 | kfree(qual); |
@@ -2555,7 +2557,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2555 | if (hub->has_indicators) { | 2557 | if (hub->has_indicators) { |
2556 | hub->indicator[port1-1] = | 2558 | hub->indicator[port1-1] = |
2557 | INDICATOR_AMBER_BLINK; | 2559 | INDICATOR_AMBER_BLINK; |
2558 | schedule_work (&hub->leds); | 2560 | schedule_delayed_work (&hub->leds, 0); |
2559 | } | 2561 | } |
2560 | status = -ENOTCONN; /* Don't retry */ | 2562 | status = -ENOTCONN; /* Don't retry */ |
2561 | goto loop_disable; | 2563 | goto loop_disable; |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 29b0fa9ff9d..7390b67c609 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1501,9 +1501,10 @@ struct set_config_request { | |||
1501 | }; | 1501 | }; |
1502 | 1502 | ||
1503 | /* Worker routine for usb_driver_set_configuration() */ | 1503 | /* Worker routine for usb_driver_set_configuration() */ |
1504 | static void driver_set_config_work(void *_req) | 1504 | static void driver_set_config_work(struct work_struct *work) |
1505 | { | 1505 | { |
1506 | struct set_config_request *req = _req; | 1506 | struct set_config_request *req = |
1507 | container_of(work, struct set_config_request, work); | ||
1507 | 1508 | ||
1508 | usb_lock_device(req->udev); | 1509 | usb_lock_device(req->udev); |
1509 | usb_set_configuration(req->udev, req->config); | 1510 | usb_set_configuration(req->udev, req->config); |
@@ -1541,7 +1542,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config) | |||
1541 | return -ENOMEM; | 1542 | return -ENOMEM; |
1542 | req->udev = udev; | 1543 | req->udev = udev; |
1543 | req->config = config; | 1544 | req->config = config; |
1544 | INIT_WORK(&req->work, driver_set_config_work, req); | 1545 | INIT_WORK(&req->work, driver_set_config_work); |
1545 | 1546 | ||
1546 | usb_get_dev(udev); | 1547 | usb_get_dev(udev); |
1547 | if (!schedule_work(&req->work)) { | 1548 | if (!schedule_work(&req->work)) { |
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 81cb52564e6..02426d0b9a3 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -203,9 +203,10 @@ static void ksuspend_usb_cleanup(void) | |||
203 | #ifdef CONFIG_USB_SUSPEND | 203 | #ifdef CONFIG_USB_SUSPEND |
204 | 204 | ||
205 | /* usb_autosuspend_work - callback routine to autosuspend a USB device */ | 205 | /* usb_autosuspend_work - callback routine to autosuspend a USB device */ |
206 | static void usb_autosuspend_work(void *_udev) | 206 | static void usb_autosuspend_work(struct work_struct *work) |
207 | { | 207 | { |
208 | struct usb_device *udev = _udev; | 208 | struct usb_device *udev = |
209 | container_of(work, struct usb_device, autosuspend.work); | ||
209 | 210 | ||
210 | usb_pm_lock(udev); | 211 | usb_pm_lock(udev); |
211 | udev->auto_pm = 1; | 212 | udev->auto_pm = 1; |
@@ -215,7 +216,7 @@ static void usb_autosuspend_work(void *_udev) | |||
215 | 216 | ||
216 | #else | 217 | #else |
217 | 218 | ||
218 | static void usb_autosuspend_work(void *_udev) | 219 | static void usb_autosuspend_work(struct work_struct *work) |
219 | {} | 220 | {} |
220 | 221 | ||
221 | #endif /* CONFIG_USB_SUSPEND */ | 222 | #endif /* CONFIG_USB_SUSPEND */ |
@@ -304,7 +305,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) | |||
304 | 305 | ||
305 | #ifdef CONFIG_PM | 306 | #ifdef CONFIG_PM |
306 | mutex_init(&dev->pm_mutex); | 307 | mutex_init(&dev->pm_mutex); |
307 | INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); | 308 | INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); |
308 | #endif | 309 | #endif |
309 | return dev; | 310 | return dev; |
310 | } | 311 | } |
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 3bd1dfe565c..d15bf22b9a0 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags) | |||
1833 | spin_unlock_irqrestore(&dev->req_lock, flags); | 1833 | spin_unlock_irqrestore(&dev->req_lock, flags); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | static void eth_work (void *_dev) | 1836 | static void eth_work (struct work_struct *work) |
1837 | { | 1837 | { |
1838 | struct eth_dev *dev = _dev; | 1838 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
1839 | 1839 | ||
1840 | if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { | 1840 | if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { |
1841 | if (netif_running (dev->net)) | 1841 | if (netif_running (dev->net)) |
@@ -2398,7 +2398,7 @@ autoconf_fail: | |||
2398 | dev = netdev_priv(net); | 2398 | dev = netdev_priv(net); |
2399 | spin_lock_init (&dev->lock); | 2399 | spin_lock_init (&dev->lock); |
2400 | spin_lock_init (&dev->req_lock); | 2400 | spin_lock_init (&dev->req_lock); |
2401 | INIT_WORK (&dev->work, eth_work, dev); | 2401 | INIT_WORK (&dev->work, eth_work); |
2402 | INIT_LIST_HEAD (&dev->tx_reqs); | 2402 | INIT_LIST_HEAD (&dev->tx_reqs); |
2403 | INIT_LIST_HEAD (&dev->rx_reqs); | 2403 | INIT_LIST_HEAD (&dev->rx_reqs); |
2404 | 2404 | ||
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index ef54e310bfc..a9d7119e317 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -163,7 +163,7 @@ struct u132_endp { | |||
163 | u16 queue_next; | 163 | u16 queue_next; |
164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; | 164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; |
165 | struct list_head urb_more; | 165 | struct list_head urb_more; |
166 | struct work_struct scheduler; | 166 | struct delayed_work scheduler; |
167 | }; | 167 | }; |
168 | struct u132_ring { | 168 | struct u132_ring { |
169 | unsigned in_use:1; | 169 | unsigned in_use:1; |
@@ -171,7 +171,7 @@ struct u132_ring { | |||
171 | u8 number; | 171 | u8 number; |
172 | struct u132 *u132; | 172 | struct u132 *u132; |
173 | struct u132_endp *curr_endp; | 173 | struct u132_endp *curr_endp; |
174 | struct work_struct scheduler; | 174 | struct delayed_work scheduler; |
175 | }; | 175 | }; |
176 | #define OHCI_QUIRK_AMD756 0x01 | 176 | #define OHCI_QUIRK_AMD756 0x01 |
177 | #define OHCI_QUIRK_SUPERIO 0x02 | 177 | #define OHCI_QUIRK_SUPERIO 0x02 |
@@ -198,7 +198,7 @@ struct u132 { | |||
198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; | 198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; |
199 | int flags; | 199 | int flags; |
200 | unsigned long next_statechange; | 200 | unsigned long next_statechange; |
201 | struct work_struct monitor; | 201 | struct delayed_work monitor; |
202 | int num_endpoints; | 202 | int num_endpoints; |
203 | struct u132_addr addr[MAX_U132_ADDRS]; | 203 | struct u132_addr addr[MAX_U132_ADDRS]; |
204 | struct u132_udev udev[MAX_U132_UDEVS]; | 204 | struct u132_udev udev[MAX_U132_UDEVS]; |
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, | |||
310 | if (delta > 0) { | 310 | if (delta > 0) { |
311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) | 311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) |
312 | return; | 312 | return; |
313 | } else if (queue_work(workqueue, &ring->scheduler)) | 313 | } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) |
314 | return; | 314 | return; |
315 | kref_put(&u132->kref, u132_hcd_delete); | 315 | kref_put(&u132->kref, u132_hcd_delete); |
316 | return; | 316 | return; |
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132, | |||
389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, | 389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, |
390 | unsigned int delta) | 390 | unsigned int delta) |
391 | { | 391 | { |
392 | if (delta > 0) { | 392 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) |
393 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) | 393 | kref_get(&endp->kref); |
394 | kref_get(&endp->kref); | ||
395 | } else if (queue_work(workqueue, &endp->scheduler)) | ||
396 | kref_get(&endp->kref); | ||
397 | return; | ||
398 | } | 394 | } |
399 | 395 | ||
400 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) | 396 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) |
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132) | |||
410 | 406 | ||
411 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) | 407 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) |
412 | { | 408 | { |
413 | if (delta > 0) { | 409 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) |
414 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) { | 410 | kref_get(&u132->kref); |
415 | kref_get(&u132->kref); | ||
416 | } | ||
417 | } else if (queue_work(workqueue, &u132->monitor)) | ||
418 | kref_get(&u132->kref); | ||
419 | return; | ||
420 | } | 411 | } |
421 | 412 | ||
422 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) | 413 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) |
423 | { | 414 | { |
424 | if (delta > 0) { | 415 | if (!queue_delayed_work(workqueue, &u132->monitor, delta)) |
425 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) | 416 | kref_put(&u132->kref, u132_hcd_delete); |
426 | return; | ||
427 | } else if (queue_work(workqueue, &u132->monitor)) | ||
428 | return; | ||
429 | kref_put(&u132->kref, u132_hcd_delete); | ||
430 | return; | ||
431 | } | 417 | } |
432 | 418 | ||
433 | static void u132_monitor_cancel_work(struct u132 *u132) | 419 | static void u132_monitor_cancel_work(struct u132 *u132) |
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132) | |||
489 | return 0; | 475 | return 0; |
490 | } | 476 | } |
491 | 477 | ||
492 | static void u132_hcd_monitor_work(void *data) | 478 | static void u132_hcd_monitor_work(struct work_struct *work) |
493 | { | 479 | { |
494 | struct u132 *u132 = data; | 480 | struct u132 *u132 = container_of(work, struct u132, monitor.work); |
495 | if (u132->going > 1) { | 481 | if (u132->going > 1) { |
496 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" | 482 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" |
497 | , u132->going); | 483 | , u132->going); |
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf, | |||
1315 | } | 1301 | } |
1316 | } | 1302 | } |
1317 | 1303 | ||
1318 | static void u132_hcd_ring_work_scheduler(void *data); | ||
1319 | static void u132_hcd_endp_work_scheduler(void *data); | ||
1320 | /* | 1304 | /* |
1321 | * this work function is only executed from the work queue | 1305 | * this work function is only executed from the work queue |
1322 | * | 1306 | * |
1323 | */ | 1307 | */ |
1324 | static void u132_hcd_ring_work_scheduler(void *data) | 1308 | static void u132_hcd_ring_work_scheduler(struct work_struct *work) |
1325 | { | 1309 | { |
1326 | struct u132_ring *ring = data; | 1310 | struct u132_ring *ring = |
1311 | container_of(work, struct u132_ring, scheduler.work); | ||
1327 | struct u132 *u132 = ring->u132; | 1312 | struct u132 *u132 = ring->u132; |
1328 | down(&u132->scheduler_lock); | 1313 | down(&u132->scheduler_lock); |
1329 | if (ring->in_use) { | 1314 | if (ring->in_use) { |
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data) | |||
1382 | } | 1367 | } |
1383 | } | 1368 | } |
1384 | 1369 | ||
1385 | static void u132_hcd_endp_work_scheduler(void *data) | 1370 | static void u132_hcd_endp_work_scheduler(struct work_struct *work) |
1386 | { | 1371 | { |
1387 | struct u132_ring *ring; | 1372 | struct u132_ring *ring; |
1388 | struct u132_endp *endp = data; | 1373 | struct u132_endp *endp = |
1374 | container_of(work, struct u132_endp, scheduler.work); | ||
1389 | struct u132 *u132 = endp->u132; | 1375 | struct u132 *u132 = endp->u132; |
1390 | down(&u132->scheduler_lock); | 1376 | down(&u132->scheduler_lock); |
1391 | ring = endp->ring; | 1377 | ring = endp->ring; |
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1943 | if (!endp) { | 1929 | if (!endp) { |
1944 | return -ENOMEM; | 1930 | return -ENOMEM; |
1945 | } | 1931 | } |
1946 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 1932 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
1947 | spin_lock_init(&endp->queue_lock.slock); | 1933 | spin_lock_init(&endp->queue_lock.slock); |
1948 | INIT_LIST_HEAD(&endp->urb_more); | 1934 | INIT_LIST_HEAD(&endp->urb_more); |
1949 | ring = endp->ring = &u132->ring[0]; | 1935 | ring = endp->ring = &u132->ring[0]; |
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
2032 | if (!endp) { | 2018 | if (!endp) { |
2033 | return -ENOMEM; | 2019 | return -ENOMEM; |
2034 | } | 2020 | } |
2035 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2021 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2036 | spin_lock_init(&endp->queue_lock.slock); | 2022 | spin_lock_init(&endp->queue_lock.slock); |
2037 | INIT_LIST_HEAD(&endp->urb_more); | 2023 | INIT_LIST_HEAD(&endp->urb_more); |
2038 | endp->dequeueing = 0; | 2024 | endp->dequeueing = 0; |
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2117 | if (!endp) { | 2103 | if (!endp) { |
2118 | return -ENOMEM; | 2104 | return -ENOMEM; |
2119 | } | 2105 | } |
2120 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2106 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2121 | spin_lock_init(&endp->queue_lock.slock); | 2107 | spin_lock_init(&endp->queue_lock.slock); |
2122 | INIT_LIST_HEAD(&endp->urb_more); | 2108 | INIT_LIST_HEAD(&endp->urb_more); |
2123 | ring = endp->ring = &u132->ring[0]; | 2109 | ring = endp->ring = &u132->ring[0]; |
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev) | |||
3096 | ring->number = rings + 1; | 3082 | ring->number = rings + 1; |
3097 | ring->length = 0; | 3083 | ring->length = 0; |
3098 | ring->curr_endp = NULL; | 3084 | ring->curr_endp = NULL; |
3099 | INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, | 3085 | INIT_DELAYED_WORK(&ring->scheduler, |
3100 | (void *)ring); | 3086 | u132_hcd_ring_work_scheduler); |
3101 | } down(&u132->sw_lock); | 3087 | } down(&u132->sw_lock); |
3102 | INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); | 3088 | INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); |
3103 | while (ports-- > 0) { | 3089 | while (ports-- > 0) { |
3104 | struct u132_port *port = &u132->port[ports]; | 3090 | struct u132_port *port = &u132->port[ports]; |
3105 | port->u132 = u132; | 3091 | port->u132 = u132; |
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index a49644b7c58..4295bab4f1e 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c | |||
@@ -969,9 +969,10 @@ static void hid_retry_timeout(unsigned long _hid) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | /* Workqueue routine to reset the device or clear a halt */ | 971 | /* Workqueue routine to reset the device or clear a halt */ |
972 | static void hid_reset(void *_hid) | 972 | static void hid_reset(struct work_struct *work) |
973 | { | 973 | { |
974 | struct hid_device *hid = (struct hid_device *) _hid; | 974 | struct hid_device *hid = |
975 | container_of(work, struct hid_device, reset_work); | ||
975 | int rc_lock, rc = 0; | 976 | int rc_lock, rc = 0; |
976 | 977 | ||
977 | if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { | 978 | if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { |
@@ -2043,7 +2044,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) | |||
2043 | 2044 | ||
2044 | init_waitqueue_head(&hid->wait); | 2045 | init_waitqueue_head(&hid->wait); |
2045 | 2046 | ||
2046 | INIT_WORK(&hid->reset_work, hid_reset, hid); | 2047 | INIT_WORK(&hid->reset_work, hid_reset); |
2047 | setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); | 2048 | setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); |
2048 | 2049 | ||
2049 | spin_lock_init(&hid->inlock); | 2050 | spin_lock_init(&hid->inlock); |
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index ba30ca6a14a..02cbb7fff24 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c | |||
@@ -76,7 +76,7 @@ struct appledisplay { | |||
76 | char *urbdata; /* interrupt URB data buffer */ | 76 | char *urbdata; /* interrupt URB data buffer */ |
77 | char *msgdata; /* control message data buffer */ | 77 | char *msgdata; /* control message data buffer */ |
78 | 78 | ||
79 | struct work_struct work; | 79 | struct delayed_work work; |
80 | int button_pressed; | 80 | int button_pressed; |
81 | spinlock_t lock; | 81 | spinlock_t lock; |
82 | }; | 82 | }; |
@@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb) | |||
117 | case ACD_BTN_BRIGHT_UP: | 117 | case ACD_BTN_BRIGHT_UP: |
118 | case ACD_BTN_BRIGHT_DOWN: | 118 | case ACD_BTN_BRIGHT_DOWN: |
119 | pdata->button_pressed = 1; | 119 | pdata->button_pressed = 1; |
120 | queue_work(wq, &pdata->work); | 120 | queue_delayed_work(wq, &pdata->work, 0); |
121 | break; | 121 | break; |
122 | case ACD_BTN_NONE: | 122 | case ACD_BTN_NONE: |
123 | default: | 123 | default: |
@@ -184,9 +184,10 @@ static struct backlight_properties appledisplay_bl_data = { | |||
184 | .max_brightness = 0xFF | 184 | .max_brightness = 0xFF |
185 | }; | 185 | }; |
186 | 186 | ||
187 | static void appledisplay_work(void *private) | 187 | static void appledisplay_work(struct work_struct *work) |
188 | { | 188 | { |
189 | struct appledisplay *pdata = private; | 189 | struct appledisplay *pdata = |
190 | container_of(work, struct appledisplay, work.work); | ||
190 | int retval; | 191 | int retval; |
191 | 192 | ||
192 | up(&pdata->bd->sem); | 193 | up(&pdata->bd->sem); |
@@ -238,7 +239,7 @@ static int appledisplay_probe(struct usb_interface *iface, | |||
238 | pdata->udev = udev; | 239 | pdata->udev = udev; |
239 | 240 | ||
240 | spin_lock_init(&pdata->lock); | 241 | spin_lock_init(&pdata->lock); |
241 | INIT_WORK(&pdata->work, appledisplay_work, pdata); | 242 | INIT_DELAYED_WORK(&pdata->work, appledisplay_work); |
242 | 243 | ||
243 | /* Allocate buffer for control messages */ | 244 | /* Allocate buffer for control messages */ |
244 | pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); | 245 | pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index cb0ba3107d7..18b1925032a 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -156,9 +156,9 @@ struct usb_ftdi { | |||
156 | struct usb_device *udev; | 156 | struct usb_device *udev; |
157 | struct usb_interface *interface; | 157 | struct usb_interface *interface; |
158 | struct usb_class_driver *class; | 158 | struct usb_class_driver *class; |
159 | struct work_struct status_work; | 159 | struct delayed_work status_work; |
160 | struct work_struct command_work; | 160 | struct delayed_work command_work; |
161 | struct work_struct respond_work; | 161 | struct delayed_work respond_work; |
162 | struct u132_platform_data platform_data; | 162 | struct u132_platform_data platform_data; |
163 | struct resource resources[0]; | 163 | struct resource resources[0]; |
164 | struct platform_device platform_dev; | 164 | struct platform_device platform_dev; |
@@ -210,23 +210,14 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi) | |||
210 | 210 | ||
211 | static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) | 211 | static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) |
212 | { | 212 | { |
213 | if (delta > 0) { | 213 | if (!queue_delayed_work(status_queue, &ftdi->status_work, delta)) |
214 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) | 214 | kref_put(&ftdi->kref, ftdi_elan_delete); |
215 | return; | ||
216 | } else if (queue_work(status_queue, &ftdi->status_work)) | ||
217 | return; | ||
218 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
219 | return; | ||
220 | } | 215 | } |
221 | 216 | ||
222 | static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 217 | static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
223 | { | 218 | { |
224 | if (delta > 0) { | 219 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) |
225 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) | 220 | kref_get(&ftdi->kref); |
226 | kref_get(&ftdi->kref); | ||
227 | } else if (queue_work(status_queue, &ftdi->status_work)) | ||
228 | kref_get(&ftdi->kref); | ||
229 | return; | ||
230 | } | 221 | } |
231 | 222 | ||
232 | static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) | 223 | static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) |
@@ -237,25 +228,14 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) | |||
237 | 228 | ||
238 | static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) | 229 | static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) |
239 | { | 230 | { |
240 | if (delta > 0) { | 231 | if (!queue_delayed_work(command_queue, &ftdi->command_work, delta)) |
241 | if (queue_delayed_work(command_queue, &ftdi->command_work, | 232 | kref_put(&ftdi->kref, ftdi_elan_delete); |
242 | delta)) | ||
243 | return; | ||
244 | } else if (queue_work(command_queue, &ftdi->command_work)) | ||
245 | return; | ||
246 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
247 | return; | ||
248 | } | 233 | } |
249 | 234 | ||
250 | static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 235 | static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
251 | { | 236 | { |
252 | if (delta > 0) { | 237 | if (queue_delayed_work(command_queue, &ftdi->command_work, delta)) |
253 | if (queue_delayed_work(command_queue, &ftdi->command_work, | 238 | kref_get(&ftdi->kref); |
254 | delta)) | ||
255 | kref_get(&ftdi->kref); | ||
256 | } else if (queue_work(command_queue, &ftdi->command_work)) | ||
257 | kref_get(&ftdi->kref); | ||
258 | return; | ||
259 | } | 239 | } |
260 | 240 | ||
261 | static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) | 241 | static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) |
@@ -267,25 +247,14 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) | |||
267 | static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, | 247 | static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, |
268 | unsigned int delta) | 248 | unsigned int delta) |
269 | { | 249 | { |
270 | if (delta > 0) { | 250 | if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) |
271 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, | 251 | kref_put(&ftdi->kref, ftdi_elan_delete); |
272 | delta)) | ||
273 | return; | ||
274 | } else if (queue_work(respond_queue, &ftdi->respond_work)) | ||
275 | return; | ||
276 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
277 | return; | ||
278 | } | 252 | } |
279 | 253 | ||
280 | static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 254 | static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
281 | { | 255 | { |
282 | if (delta > 0) { | 256 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) |
283 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, | 257 | kref_get(&ftdi->kref); |
284 | delta)) | ||
285 | kref_get(&ftdi->kref); | ||
286 | } else if (queue_work(respond_queue, &ftdi->respond_work)) | ||
287 | kref_get(&ftdi->kref); | ||
288 | return; | ||
289 | } | 258 | } |
290 | 259 | ||
291 | static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) | 260 | static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) |
@@ -475,9 +444,11 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) | |||
475 | return; | 444 | return; |
476 | } | 445 | } |
477 | 446 | ||
478 | static void ftdi_elan_command_work(void *data) | 447 | static void ftdi_elan_command_work(struct work_struct *work) |
479 | { | 448 | { |
480 | struct usb_ftdi *ftdi = data; | 449 | struct usb_ftdi *ftdi = |
450 | container_of(work, struct usb_ftdi, command_work.work); | ||
451 | |||
481 | if (ftdi->disconnected > 0) { | 452 | if (ftdi->disconnected > 0) { |
482 | ftdi_elan_put_kref(ftdi); | 453 | ftdi_elan_put_kref(ftdi); |
483 | return; | 454 | return; |
@@ -500,9 +471,10 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi) | |||
500 | return; | 471 | return; |
501 | } | 472 | } |
502 | 473 | ||
503 | static void ftdi_elan_respond_work(void *data) | 474 | static void ftdi_elan_respond_work(struct work_struct *work) |
504 | { | 475 | { |
505 | struct usb_ftdi *ftdi = data; | 476 | struct usb_ftdi *ftdi = |
477 | container_of(work, struct usb_ftdi, respond_work.work); | ||
506 | if (ftdi->disconnected > 0) { | 478 | if (ftdi->disconnected > 0) { |
507 | ftdi_elan_put_kref(ftdi); | 479 | ftdi_elan_put_kref(ftdi); |
508 | return; | 480 | return; |
@@ -534,9 +506,10 @@ static void ftdi_elan_respond_work(void *data) | |||
534 | * after the FTDI has been synchronized | 506 | * after the FTDI has been synchronized |
535 | * | 507 | * |
536 | */ | 508 | */ |
537 | static void ftdi_elan_status_work(void *data) | 509 | static void ftdi_elan_status_work(struct work_struct *work) |
538 | { | 510 | { |
539 | struct usb_ftdi *ftdi = data; | 511 | struct usb_ftdi *ftdi = |
512 | container_of(work, struct usb_ftdi, status_work.work); | ||
540 | int work_delay_in_msec = 0; | 513 | int work_delay_in_msec = 0; |
541 | if (ftdi->disconnected > 0) { | 514 | if (ftdi->disconnected > 0) { |
542 | ftdi_elan_put_kref(ftdi); | 515 | ftdi_elan_put_kref(ftdi); |
@@ -2677,12 +2650,9 @@ static int ftdi_elan_probe(struct usb_interface *interface, | |||
2677 | ftdi->class = NULL; | 2650 | ftdi->class = NULL; |
2678 | dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" | 2651 | dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" |
2679 | "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); | 2652 | "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); |
2680 | INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, | 2653 | INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work); |
2681 | (void *)ftdi); | 2654 | INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work); |
2682 | INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, | 2655 | INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work); |
2683 | (void *)ftdi); | ||
2684 | INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work, | ||
2685 | (void *)ftdi); | ||
2686 | ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); | 2656 | ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); |
2687 | return 0; | 2657 | return 0; |
2688 | } else { | 2658 | } else { |
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c index 9110793f81d..9659c79e187 100644 --- a/drivers/usb/misc/phidgetkit.c +++ b/drivers/usb/misc/phidgetkit.c | |||
@@ -81,8 +81,8 @@ struct interfacekit { | |||
81 | unsigned char *data; | 81 | unsigned char *data; |
82 | dma_addr_t data_dma; | 82 | dma_addr_t data_dma; |
83 | 83 | ||
84 | struct work_struct do_notify; | 84 | struct delayed_work do_notify; |
85 | struct work_struct do_resubmit; | 85 | struct delayed_work do_resubmit; |
86 | unsigned long input_events; | 86 | unsigned long input_events; |
87 | unsigned long sensor_events; | 87 | unsigned long sensor_events; |
88 | }; | 88 | }; |
@@ -374,7 +374,7 @@ static void interfacekit_irq(struct urb *urb) | |||
374 | } | 374 | } |
375 | 375 | ||
376 | if (kit->input_events || kit->sensor_events) | 376 | if (kit->input_events || kit->sensor_events) |
377 | schedule_work(&kit->do_notify); | 377 | schedule_delayed_work(&kit->do_notify, 0); |
378 | 378 | ||
379 | resubmit: | 379 | resubmit: |
380 | status = usb_submit_urb(urb, SLAB_ATOMIC); | 380 | status = usb_submit_urb(urb, SLAB_ATOMIC); |
@@ -384,9 +384,10 @@ resubmit: | |||
384 | kit->udev->devpath, status); | 384 | kit->udev->devpath, status); |
385 | } | 385 | } |
386 | 386 | ||
387 | static void do_notify(void *data) | 387 | static void do_notify(struct work_struct *work) |
388 | { | 388 | { |
389 | struct interfacekit *kit = data; | 389 | struct interfacekit *kit = |
390 | container_of(work, struct interfacekit, do_notify.work); | ||
390 | int i; | 391 | int i; |
391 | char sysfs_file[8]; | 392 | char sysfs_file[8]; |
392 | 393 | ||
@@ -405,9 +406,11 @@ static void do_notify(void *data) | |||
405 | } | 406 | } |
406 | } | 407 | } |
407 | 408 | ||
408 | static void do_resubmit(void *data) | 409 | static void do_resubmit(struct work_struct *work) |
409 | { | 410 | { |
410 | set_outputs(data); | 411 | struct interfacekit *kit = |
412 | container_of(work, struct interfacekit, do_resubmit.work); | ||
413 | set_outputs(kit); | ||
411 | } | 414 | } |
412 | 415 | ||
413 | #define show_set_output(value) \ | 416 | #define show_set_output(value) \ |
@@ -575,8 +578,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic | |||
575 | 578 | ||
576 | kit->udev = usb_get_dev(dev); | 579 | kit->udev = usb_get_dev(dev); |
577 | kit->intf = intf; | 580 | kit->intf = intf; |
578 | INIT_WORK(&kit->do_notify, do_notify, kit); | 581 | INIT_DELAYED_WORK(&kit->do_notify, do_notify); |
579 | INIT_WORK(&kit->do_resubmit, do_resubmit, kit); | 582 | INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit); |
580 | usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, | 583 | usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, |
581 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, | 584 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, |
582 | interfacekit_irq, kit, endpoint->bInterval); | 585 | interfacekit_irq, kit, endpoint->bInterval); |
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c index c3469b0a67c..2bb4fa572bb 100644 --- a/drivers/usb/misc/phidgetmotorcontrol.c +++ b/drivers/usb/misc/phidgetmotorcontrol.c | |||
@@ -41,7 +41,7 @@ struct motorcontrol { | |||
41 | unsigned char *data; | 41 | unsigned char *data; |
42 | dma_addr_t data_dma; | 42 | dma_addr_t data_dma; |
43 | 43 | ||
44 | struct work_struct do_notify; | 44 | struct delayed_work do_notify; |
45 | unsigned long input_events; | 45 | unsigned long input_events; |
46 | unsigned long speed_events; | 46 | unsigned long speed_events; |
47 | unsigned long exceed_events; | 47 | unsigned long exceed_events; |
@@ -148,7 +148,7 @@ static void motorcontrol_irq(struct urb *urb) | |||
148 | set_bit(1, &mc->exceed_events); | 148 | set_bit(1, &mc->exceed_events); |
149 | 149 | ||
150 | if (mc->input_events || mc->exceed_events || mc->speed_events) | 150 | if (mc->input_events || mc->exceed_events || mc->speed_events) |
151 | schedule_work(&mc->do_notify); | 151 | schedule_delayed_work(&mc->do_notify, 0); |
152 | 152 | ||
153 | resubmit: | 153 | resubmit: |
154 | status = usb_submit_urb(urb, SLAB_ATOMIC); | 154 | status = usb_submit_urb(urb, SLAB_ATOMIC); |
@@ -159,9 +159,10 @@ resubmit: | |||
159 | mc->udev->devpath, status); | 159 | mc->udev->devpath, status); |
160 | } | 160 | } |
161 | 161 | ||
162 | static void do_notify(void *data) | 162 | static void do_notify(struct work_struct *work) |
163 | { | 163 | { |
164 | struct motorcontrol *mc = data; | 164 | struct motorcontrol *mc = |
165 | container_of(work, struct motorcontrol, do_notify.work); | ||
165 | int i; | 166 | int i; |
166 | char sysfs_file[8]; | 167 | char sysfs_file[8]; |
167 | 168 | ||
@@ -348,7 +349,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic | |||
348 | mc->udev = usb_get_dev(dev); | 349 | mc->udev = usb_get_dev(dev); |
349 | mc->intf = intf; | 350 | mc->intf = intf; |
350 | mc->acceleration[0] = mc->acceleration[1] = 10; | 351 | mc->acceleration[0] = mc->acceleration[1] = 10; |
351 | INIT_WORK(&mc->do_notify, do_notify, mc); | 352 | INIT_DELAYED_WORK(&mc->do_notify, do_notify); |
352 | usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, | 353 | usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, |
353 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, | 354 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, |
354 | motorcontrol_irq, mc, endpoint->bInterval); | 355 | motorcontrol_irq, mc, endpoint->bInterval); |
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index 7c906a43e49..fa78326d0bf 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c | |||
@@ -222,7 +222,7 @@ struct kaweth_device | |||
222 | int suspend_lowmem_ctrl; | 222 | int suspend_lowmem_ctrl; |
223 | int linkstate; | 223 | int linkstate; |
224 | int opened; | 224 | int opened; |
225 | struct work_struct lowmem_work; | 225 | struct delayed_work lowmem_work; |
226 | 226 | ||
227 | struct usb_device *dev; | 227 | struct usb_device *dev; |
228 | struct net_device *net; | 228 | struct net_device *net; |
@@ -530,9 +530,10 @@ resubmit: | |||
530 | kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); | 530 | kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); |
531 | } | 531 | } |
532 | 532 | ||
533 | static void kaweth_resubmit_tl(void *d) | 533 | static void kaweth_resubmit_tl(struct work_struct *work) |
534 | { | 534 | { |
535 | struct kaweth_device *kaweth = (struct kaweth_device *)d; | 535 | struct kaweth_device *kaweth = |
536 | container_of(work, struct kaweth_device, lowmem_work.work); | ||
536 | 537 | ||
537 | if (IS_BLOCKED(kaweth->status)) | 538 | if (IS_BLOCKED(kaweth->status)) |
538 | return; | 539 | return; |
@@ -1126,7 +1127,7 @@ err_fw: | |||
1126 | 1127 | ||
1127 | /* kaweth is zeroed as part of alloc_netdev */ | 1128 | /* kaweth is zeroed as part of alloc_netdev */ |
1128 | 1129 | ||
1129 | INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); | 1130 | INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); |
1130 | 1131 | ||
1131 | SET_MODULE_OWNER(netdev); | 1132 | SET_MODULE_OWNER(netdev); |
1132 | 1133 | ||
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index 69eb0db399d..b5690b3834e 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c | |||
@@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus) | |||
1281 | static struct workqueue_struct *pegasus_workqueue = NULL; | 1281 | static struct workqueue_struct *pegasus_workqueue = NULL; |
1282 | #define CARRIER_CHECK_DELAY (2 * HZ) | 1282 | #define CARRIER_CHECK_DELAY (2 * HZ) |
1283 | 1283 | ||
1284 | static void check_carrier(void *data) | 1284 | static void check_carrier(struct work_struct *work) |
1285 | { | 1285 | { |
1286 | pegasus_t *pegasus = data; | 1286 | pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); |
1287 | set_carrier(pegasus->net); | 1287 | set_carrier(pegasus->net); |
1288 | if (!(pegasus->flags & PEGASUS_UNPLUG)) { | 1288 | if (!(pegasus->flags & PEGASUS_UNPLUG)) { |
1289 | queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, | 1289 | queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, |
@@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf, | |||
1319 | 1319 | ||
1320 | tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); | 1320 | tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); |
1321 | 1321 | ||
1322 | INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); | 1322 | INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); |
1323 | 1323 | ||
1324 | pegasus->intf = intf; | 1324 | pegasus->intf = intf; |
1325 | pegasus->usb = dev; | 1325 | pegasus->usb = dev; |
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h index 006438069b6..98f6898cae1 100644 --- a/drivers/usb/net/pegasus.h +++ b/drivers/usb/net/pegasus.h | |||
@@ -95,7 +95,7 @@ typedef struct pegasus { | |||
95 | int dev_index; | 95 | int dev_index; |
96 | int intr_interval; | 96 | int intr_interval; |
97 | struct tasklet_struct rx_tl; | 97 | struct tasklet_struct rx_tl; |
98 | struct work_struct carrier_check; | 98 | struct delayed_work carrier_check; |
99 | struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; | 99 | struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; |
100 | struct sk_buff *rx_pool[RX_SKBS]; | 100 | struct sk_buff *rx_pool[RX_SKBS]; |
101 | struct sk_buff *rx_skb; | 101 | struct sk_buff *rx_skb; |
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 7672e11c94c..327f9755567 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c | |||
@@ -782,9 +782,10 @@ static struct ethtool_ops usbnet_ethtool_ops = { | |||
782 | * especially now that control transfers can be queued. | 782 | * especially now that control transfers can be queued. |
783 | */ | 783 | */ |
784 | static void | 784 | static void |
785 | kevent (void *data) | 785 | kevent (struct work_struct *work) |
786 | { | 786 | { |
787 | struct usbnet *dev = data; | 787 | struct usbnet *dev = |
788 | container_of(work, struct usbnet, kevent); | ||
788 | int status; | 789 | int status; |
789 | 790 | ||
790 | /* usb_clear_halt() needs a thread context */ | 791 | /* usb_clear_halt() needs a thread context */ |
@@ -1146,7 +1147,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1146 | skb_queue_head_init (&dev->done); | 1147 | skb_queue_head_init (&dev->done); |
1147 | dev->bh.func = usbnet_bh; | 1148 | dev->bh.func = usbnet_bh; |
1148 | dev->bh.data = (unsigned long) dev; | 1149 | dev->bh.data = (unsigned long) dev; |
1149 | INIT_WORK (&dev->kevent, kevent, dev); | 1150 | INIT_WORK (&dev->kevent, kevent); |
1150 | dev->delay.function = usbnet_bh; | 1151 | dev->delay.function = usbnet_bh; |
1151 | dev->delay.data = (unsigned long) dev; | 1152 | dev->delay.data = (unsigned long) dev; |
1152 | init_timer (&dev->delay); | 1153 | init_timer (&dev->delay); |
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c index b1b5707bc99..86bcf63b6ba 100644 --- a/drivers/usb/serial/aircable.c +++ b/drivers/usb/serial/aircable.c | |||
@@ -92,6 +92,7 @@ struct aircable_private { | |||
92 | struct circ_buf *rx_buf; /* read buffer */ | 92 | struct circ_buf *rx_buf; /* read buffer */ |
93 | int rx_flags; /* for throttilng */ | 93 | int rx_flags; /* for throttilng */ |
94 | struct work_struct rx_work; /* work cue for the receiving line */ | 94 | struct work_struct rx_work; /* work cue for the receiving line */ |
95 | struct usb_serial_port *port; /* USB port with which associated */ | ||
95 | }; | 96 | }; |
96 | 97 | ||
97 | /* Private methods */ | 98 | /* Private methods */ |
@@ -251,10 +252,11 @@ static void aircable_send(struct usb_serial_port *port) | |||
251 | schedule_work(&port->work); | 252 | schedule_work(&port->work); |
252 | } | 253 | } |
253 | 254 | ||
254 | static void aircable_read(void *params) | 255 | static void aircable_read(struct work_struct *work) |
255 | { | 256 | { |
256 | struct usb_serial_port *port = params; | 257 | struct aircable_private *priv = |
257 | struct aircable_private *priv = usb_get_serial_port_data(port); | 258 | container_of(work, struct aircable_private, rx_work); |
259 | struct usb_serial_port *port = priv->port; | ||
258 | struct tty_struct *tty; | 260 | struct tty_struct *tty; |
259 | unsigned char *data; | 261 | unsigned char *data; |
260 | int count; | 262 | int count; |
@@ -349,7 +351,8 @@ static int aircable_attach (struct usb_serial *serial) | |||
349 | } | 351 | } |
350 | 352 | ||
351 | priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); | 353 | priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); |
352 | INIT_WORK(&priv->rx_work, aircable_read, port); | 354 | priv->port = port; |
355 | INIT_WORK(&priv->rx_work, aircable_read); | ||
353 | 356 | ||
354 | usb_set_serial_port_data(serial->port[0], priv); | 357 | usb_set_serial_port_data(serial->port[0], priv); |
355 | 358 | ||
@@ -516,7 +519,7 @@ static void aircable_read_bulk_callback(struct urb *urb) | |||
516 | package_length - shift); | 519 | package_length - shift); |
517 | } | 520 | } |
518 | } | 521 | } |
519 | aircable_read(port); | 522 | aircable_read(&priv->rx_work); |
520 | } | 523 | } |
521 | 524 | ||
522 | /* Schedule the next read _if_ we are still open */ | 525 | /* Schedule the next read _if_ we are still open */ |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 5e3ac281a2f..83d0e21145b 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
@@ -430,13 +430,14 @@ struct digi_port { | |||
430 | int dp_in_close; /* close in progress */ | 430 | int dp_in_close; /* close in progress */ |
431 | wait_queue_head_t dp_close_wait; /* wait queue for close */ | 431 | wait_queue_head_t dp_close_wait; /* wait queue for close */ |
432 | struct work_struct dp_wakeup_work; | 432 | struct work_struct dp_wakeup_work; |
433 | struct usb_serial_port *dp_port; | ||
433 | }; | 434 | }; |
434 | 435 | ||
435 | 436 | ||
436 | /* Local Function Declarations */ | 437 | /* Local Function Declarations */ |
437 | 438 | ||
438 | static void digi_wakeup_write( struct usb_serial_port *port ); | 439 | static void digi_wakeup_write( struct usb_serial_port *port ); |
439 | static void digi_wakeup_write_lock(void *); | 440 | static void digi_wakeup_write_lock(struct work_struct *work); |
440 | static int digi_write_oob_command( struct usb_serial_port *port, | 441 | static int digi_write_oob_command( struct usb_serial_port *port, |
441 | unsigned char *buf, int count, int interruptible ); | 442 | unsigned char *buf, int count, int interruptible ); |
442 | static int digi_write_inb_command( struct usb_serial_port *port, | 443 | static int digi_write_inb_command( struct usb_serial_port *port, |
@@ -598,11 +599,12 @@ static inline long cond_wait_interruptible_timeout_irqrestore( | |||
598 | * on writes. | 599 | * on writes. |
599 | */ | 600 | */ |
600 | 601 | ||
601 | static void digi_wakeup_write_lock(void *arg) | 602 | static void digi_wakeup_write_lock(struct work_struct *work) |
602 | { | 603 | { |
603 | struct usb_serial_port *port = arg; | 604 | struct digi_port *priv = |
605 | container_of(work, struct digi_port, dp_wakeup_work); | ||
606 | struct usb_serial_port *port = priv->dp_port; | ||
604 | unsigned long flags; | 607 | unsigned long flags; |
605 | struct digi_port *priv = usb_get_serial_port_data(port); | ||
606 | 608 | ||
607 | 609 | ||
608 | spin_lock_irqsave( &priv->dp_port_lock, flags ); | 610 | spin_lock_irqsave( &priv->dp_port_lock, flags ); |
@@ -1702,8 +1704,8 @@ dbg( "digi_startup: TOP" ); | |||
1702 | init_waitqueue_head( &priv->dp_flush_wait ); | 1704 | init_waitqueue_head( &priv->dp_flush_wait ); |
1703 | priv->dp_in_close = 0; | 1705 | priv->dp_in_close = 0; |
1704 | init_waitqueue_head( &priv->dp_close_wait ); | 1706 | init_waitqueue_head( &priv->dp_close_wait ); |
1705 | INIT_WORK(&priv->dp_wakeup_work, | 1707 | INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); |
1706 | digi_wakeup_write_lock, serial->port[i]); | 1708 | priv->dp_port = serial->port[i]; |
1707 | 1709 | ||
1708 | /* initialize write wait queue for this port */ | 1710 | /* initialize write wait queue for this port */ |
1709 | init_waitqueue_head( &serial->port[i]->write_wait ); | 1711 | init_waitqueue_head( &serial->port[i]->write_wait ); |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 89ce2775be1..72e4d48f51e 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -559,7 +559,8 @@ struct ftdi_private { | |||
559 | char prev_status, diff_status; /* Used for TIOCMIWAIT */ | 559 | char prev_status, diff_status; /* Used for TIOCMIWAIT */ |
560 | __u8 rx_flags; /* receive state flags (throttling) */ | 560 | __u8 rx_flags; /* receive state flags (throttling) */ |
561 | spinlock_t rx_lock; /* spinlock for receive state */ | 561 | spinlock_t rx_lock; /* spinlock for receive state */ |
562 | struct work_struct rx_work; | 562 | struct delayed_work rx_work; |
563 | struct usb_serial_port *port; | ||
563 | int rx_processed; | 564 | int rx_processed; |
564 | unsigned long rx_bytes; | 565 | unsigned long rx_bytes; |
565 | 566 | ||
@@ -593,7 +594,7 @@ static int ftdi_write_room (struct usb_serial_port *port); | |||
593 | static int ftdi_chars_in_buffer (struct usb_serial_port *port); | 594 | static int ftdi_chars_in_buffer (struct usb_serial_port *port); |
594 | static void ftdi_write_bulk_callback (struct urb *urb); | 595 | static void ftdi_write_bulk_callback (struct urb *urb); |
595 | static void ftdi_read_bulk_callback (struct urb *urb); | 596 | static void ftdi_read_bulk_callback (struct urb *urb); |
596 | static void ftdi_process_read (void *param); | 597 | static void ftdi_process_read (struct work_struct *work); |
597 | static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); | 598 | static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); |
598 | static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); | 599 | static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); |
599 | static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); | 600 | static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); |
@@ -1201,7 +1202,8 @@ static int ftdi_sio_attach (struct usb_serial *serial) | |||
1201 | port->read_urb->transfer_buffer_length = BUFSZ; | 1202 | port->read_urb->transfer_buffer_length = BUFSZ; |
1202 | } | 1203 | } |
1203 | 1204 | ||
1204 | INIT_WORK(&priv->rx_work, ftdi_process_read, port); | 1205 | INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read); |
1206 | priv->port = port; | ||
1205 | 1207 | ||
1206 | /* Free port's existing write urb and transfer buffer. */ | 1208 | /* Free port's existing write urb and transfer buffer. */ |
1207 | if (port->write_urb) { | 1209 | if (port->write_urb) { |
@@ -1640,17 +1642,18 @@ static void ftdi_read_bulk_callback (struct urb *urb) | |||
1640 | priv->rx_bytes += countread; | 1642 | priv->rx_bytes += countread; |
1641 | spin_unlock_irqrestore(&priv->rx_lock, flags); | 1643 | spin_unlock_irqrestore(&priv->rx_lock, flags); |
1642 | 1644 | ||
1643 | ftdi_process_read(port); | 1645 | ftdi_process_read(&priv->rx_work.work); |
1644 | 1646 | ||
1645 | } /* ftdi_read_bulk_callback */ | 1647 | } /* ftdi_read_bulk_callback */ |
1646 | 1648 | ||
1647 | 1649 | ||
1648 | static void ftdi_process_read (void *param) | 1650 | static void ftdi_process_read (struct work_struct *work) |
1649 | { /* ftdi_process_read */ | 1651 | { /* ftdi_process_read */ |
1650 | struct usb_serial_port *port = (struct usb_serial_port*)param; | 1652 | struct ftdi_private *priv = |
1653 | container_of(work, struct ftdi_private, rx_work.work); | ||
1654 | struct usb_serial_port *port = priv->port; | ||
1651 | struct urb *urb; | 1655 | struct urb *urb; |
1652 | struct tty_struct *tty; | 1656 | struct tty_struct *tty; |
1653 | struct ftdi_private *priv; | ||
1654 | char error_flag; | 1657 | char error_flag; |
1655 | unsigned char *data; | 1658 | unsigned char *data; |
1656 | 1659 | ||
@@ -2179,7 +2182,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port) | |||
2179 | spin_unlock_irqrestore(&priv->rx_lock, flags); | 2182 | spin_unlock_irqrestore(&priv->rx_lock, flags); |
2180 | 2183 | ||
2181 | if (actually_throttled) | 2184 | if (actually_throttled) |
2182 | schedule_work(&priv->rx_work); | 2185 | schedule_delayed_work(&priv->rx_work, 0); |
2183 | } | 2186 | } |
2184 | 2187 | ||
2185 | static int __init ftdi_init (void) | 2188 | static int __init ftdi_init (void) |
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 909005107ea..e09a0bfe623 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c | |||
@@ -120,6 +120,8 @@ struct keyspan_pda_private { | |||
120 | int tx_throttled; | 120 | int tx_throttled; |
121 | struct work_struct wakeup_work; | 121 | struct work_struct wakeup_work; |
122 | struct work_struct unthrottle_work; | 122 | struct work_struct unthrottle_work; |
123 | struct usb_serial *serial; | ||
124 | struct usb_serial_port *port; | ||
123 | }; | 125 | }; |
124 | 126 | ||
125 | 127 | ||
@@ -175,9 +177,11 @@ static struct usb_device_id id_table_fake_xircom [] = { | |||
175 | }; | 177 | }; |
176 | #endif | 178 | #endif |
177 | 179 | ||
178 | static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) | 180 | static void keyspan_pda_wakeup_write(struct work_struct *work) |
179 | { | 181 | { |
180 | 182 | struct keyspan_pda_private *priv = | |
183 | container_of(work, struct keyspan_pda_private, wakeup_work); | ||
184 | struct usb_serial_port *port = priv->port; | ||
181 | struct tty_struct *tty = port->tty; | 185 | struct tty_struct *tty = port->tty; |
182 | 186 | ||
183 | /* wake up port processes */ | 187 | /* wake up port processes */ |
@@ -187,8 +191,11 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) | |||
187 | tty_wakeup(tty); | 191 | tty_wakeup(tty); |
188 | } | 192 | } |
189 | 193 | ||
190 | static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) | 194 | static void keyspan_pda_request_unthrottle(struct work_struct *work) |
191 | { | 195 | { |
196 | struct keyspan_pda_private *priv = | ||
197 | container_of(work, struct keyspan_pda_private, unthrottle_work); | ||
198 | struct usb_serial *serial = priv->serial; | ||
192 | int result; | 199 | int result; |
193 | 200 | ||
194 | dbg(" request_unthrottle"); | 201 | dbg(" request_unthrottle"); |
@@ -765,11 +772,10 @@ static int keyspan_pda_startup (struct usb_serial *serial) | |||
765 | return (1); /* error */ | 772 | return (1); /* error */ |
766 | usb_set_serial_port_data(serial->port[0], priv); | 773 | usb_set_serial_port_data(serial->port[0], priv); |
767 | init_waitqueue_head(&serial->port[0]->write_wait); | 774 | init_waitqueue_head(&serial->port[0]->write_wait); |
768 | INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, | 775 | INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); |
769 | (void *)(serial->port[0])); | 776 | INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); |
770 | INIT_WORK(&priv->unthrottle_work, | 777 | priv->serial = serial; |
771 | (void *)keyspan_pda_request_unthrottle, | 778 | priv->port = serial->port[0]; |
772 | (void *)(serial)); | ||
773 | return (0); | 779 | return (0); |
774 | } | 780 | } |
775 | 781 | ||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index c1257d5292f..3d5072f14b8 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -533,9 +533,10 @@ void usb_serial_port_softint(struct usb_serial_port *port) | |||
533 | schedule_work(&port->work); | 533 | schedule_work(&port->work); |
534 | } | 534 | } |
535 | 535 | ||
536 | static void usb_serial_port_work(void *private) | 536 | static void usb_serial_port_work(struct work_struct *work) |
537 | { | 537 | { |
538 | struct usb_serial_port *port = private; | 538 | struct usb_serial_port *port = |
539 | container_of(work, struct usb_serial_port, work); | ||
539 | struct tty_struct *tty; | 540 | struct tty_struct *tty; |
540 | 541 | ||
541 | dbg("%s - port %d", __FUNCTION__, port->number); | 542 | dbg("%s - port %d", __FUNCTION__, port->number); |
@@ -799,7 +800,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
799 | port->serial = serial; | 800 | port->serial = serial; |
800 | spin_lock_init(&port->lock); | 801 | spin_lock_init(&port->lock); |
801 | mutex_init(&port->mutex); | 802 | mutex_init(&port->mutex); |
802 | INIT_WORK(&port->work, usb_serial_port_work, port); | 803 | INIT_WORK(&port->work, usb_serial_port_work); |
803 | serial->port[i] = port; | 804 | serial->port[i] = port; |
804 | } | 805 | } |
805 | 806 | ||
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 4d1cd7aeccd..154c7d29059 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c | |||
@@ -227,6 +227,7 @@ struct whiteheat_private { | |||
227 | struct list_head rx_urbs_submitted; | 227 | struct list_head rx_urbs_submitted; |
228 | struct list_head rx_urb_q; | 228 | struct list_head rx_urb_q; |
229 | struct work_struct rx_work; | 229 | struct work_struct rx_work; |
230 | struct usb_serial_port *port; | ||
230 | struct list_head tx_urbs_free; | 231 | struct list_head tx_urbs_free; |
231 | struct list_head tx_urbs_submitted; | 232 | struct list_head tx_urbs_submitted; |
232 | }; | 233 | }; |
@@ -241,7 +242,7 @@ static void command_port_read_callback(struct urb *urb); | |||
241 | static int start_port_read(struct usb_serial_port *port); | 242 | static int start_port_read(struct usb_serial_port *port); |
242 | static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); | 243 | static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); |
243 | static struct list_head *list_first(struct list_head *head); | 244 | static struct list_head *list_first(struct list_head *head); |
244 | static void rx_data_softint(void *private); | 245 | static void rx_data_softint(struct work_struct *work); |
245 | 246 | ||
246 | static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); | 247 | static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); |
247 | static int firm_open(struct usb_serial_port *port); | 248 | static int firm_open(struct usb_serial_port *port); |
@@ -424,7 +425,8 @@ static int whiteheat_attach (struct usb_serial *serial) | |||
424 | spin_lock_init(&info->lock); | 425 | spin_lock_init(&info->lock); |
425 | info->flags = 0; | 426 | info->flags = 0; |
426 | info->mcr = 0; | 427 | info->mcr = 0; |
427 | INIT_WORK(&info->rx_work, rx_data_softint, port); | 428 | INIT_WORK(&info->rx_work, rx_data_softint); |
429 | info->port = port; | ||
428 | 430 | ||
429 | INIT_LIST_HEAD(&info->rx_urbs_free); | 431 | INIT_LIST_HEAD(&info->rx_urbs_free); |
430 | INIT_LIST_HEAD(&info->rx_urbs_submitted); | 432 | INIT_LIST_HEAD(&info->rx_urbs_submitted); |
@@ -949,7 +951,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port) | |||
949 | spin_unlock_irqrestore(&info->lock, flags); | 951 | spin_unlock_irqrestore(&info->lock, flags); |
950 | 952 | ||
951 | if (actually_throttled) | 953 | if (actually_throttled) |
952 | rx_data_softint(port); | 954 | rx_data_softint(&info->rx_work); |
953 | 955 | ||
954 | return; | 956 | return; |
955 | } | 957 | } |
@@ -1400,10 +1402,11 @@ static struct list_head *list_first(struct list_head *head) | |||
1400 | } | 1402 | } |
1401 | 1403 | ||
1402 | 1404 | ||
1403 | static void rx_data_softint(void *private) | 1405 | static void rx_data_softint(struct work_struct *work) |
1404 | { | 1406 | { |
1405 | struct usb_serial_port *port = (struct usb_serial_port *)private; | 1407 | struct whiteheat_private *info = |
1406 | struct whiteheat_private *info = usb_get_serial_port_data(port); | 1408 | container_of(work, struct whiteheat_private, rx_work); |
1409 | struct usb_serial_port *port = info->port; | ||
1407 | struct tty_struct *tty = port->tty; | 1410 | struct tty_struct *tty = port->tty; |
1408 | struct whiteheat_urb_wrap *wrap; | 1411 | struct whiteheat_urb_wrap *wrap; |
1409 | struct urb *urb; | 1412 | struct urb *urb; |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 302174b8e47..31f476a6479 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc) | |||
383 | softback_top = 0; | 383 | softback_top = 0; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void fb_flashcursor(void *private) | 386 | static void fb_flashcursor(struct work_struct *work) |
387 | { | 387 | { |
388 | struct fb_info *info = private; | 388 | struct fb_info *info = container_of(work, struct fb_info, queue); |
389 | struct fbcon_ops *ops = info->fbcon_par; | 389 | struct fbcon_ops *ops = info->fbcon_par; |
390 | struct display *p; | 390 | struct display *p; |
391 | struct vc_data *vc = NULL; | 391 | struct vc_data *vc = NULL; |
@@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info) | |||
442 | if ((!info->queue.func || info->queue.func == fb_flashcursor) && | 442 | if ((!info->queue.func || info->queue.func == fb_flashcursor) && |
443 | !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { | 443 | !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { |
444 | if (!info->queue.func) | 444 | if (!info->queue.func) |
445 | INIT_WORK(&info->queue, fb_flashcursor, info); | 445 | INIT_WORK(&info->queue, fb_flashcursor); |
446 | 446 | ||
447 | init_timer(&ops->cursor_timer); | 447 | init_timer(&ops->cursor_timer); |
448 | ops->cursor_timer.function = cursor_timer_handler; | 448 | ops->cursor_timer.function = cursor_timer_handler; |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 8a8ae55a740..38eb0b69c2d 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -964,9 +964,10 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) | |||
964 | * Our LCD controller task (which is called when we blank or unblank) | 964 | * Our LCD controller task (which is called when we blank or unblank) |
965 | * via keventd. | 965 | * via keventd. |
966 | */ | 966 | */ |
967 | static void pxafb_task(void *dummy) | 967 | static void pxafb_task(struct work_struct *work) |
968 | { | 968 | { |
969 | struct pxafb_info *fbi = dummy; | 969 | struct pxafb_info *fbi = |
970 | container_of(work, struct pxafb_info, task); | ||
970 | u_int state = xchg(&fbi->task_state, -1); | 971 | u_int state = xchg(&fbi->task_state, -1); |
971 | 972 | ||
972 | set_ctrlr_state(fbi, state); | 973 | set_ctrlr_state(fbi, state); |
@@ -1159,7 +1160,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) | |||
1159 | } | 1160 | } |
1160 | 1161 | ||
1161 | init_waitqueue_head(&fbi->ctrlr_wait); | 1162 | init_waitqueue_head(&fbi->ctrlr_wait); |
1162 | INIT_WORK(&fbi->task, pxafb_task, fbi); | 1163 | INIT_WORK(&fbi->task, pxafb_task); |
1163 | init_MUTEX(&fbi->ctrlr_sem); | 1164 | init_MUTEX(&fbi->ctrlr_sem); |
1164 | 1165 | ||
1165 | return fbi; | 1166 | return fbi; |
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 90a79c78454..944273c3dbf 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | static int v9fs_poll_proc(void *); | 112 | static int v9fs_poll_proc(void *); |
113 | static void v9fs_read_work(void *); | 113 | static void v9fs_read_work(struct work_struct *work); |
114 | static void v9fs_write_work(void *); | 114 | static void v9fs_write_work(struct work_struct *work); |
115 | static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, | 115 | static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, |
116 | poll_table * p); | 116 | poll_table * p); |
117 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); | 117 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); |
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, | |||
297 | m->rbuf = NULL; | 297 | m->rbuf = NULL; |
298 | m->wpos = m->wsize = 0; | 298 | m->wpos = m->wsize = 0; |
299 | m->wbuf = NULL; | 299 | m->wbuf = NULL; |
300 | INIT_WORK(&m->rq, v9fs_read_work, m); | 300 | INIT_WORK(&m->rq, v9fs_read_work); |
301 | INIT_WORK(&m->wq, v9fs_write_work, m); | 301 | INIT_WORK(&m->wq, v9fs_write_work); |
302 | m->wsched = 0; | 302 | m->wsched = 0; |
303 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | 303 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); |
304 | m->poll_task = NULL; | 304 | m->poll_task = NULL; |
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a) | |||
458 | /** | 458 | /** |
459 | * v9fs_write_work - called when a transport can send some data | 459 | * v9fs_write_work - called when a transport can send some data |
460 | */ | 460 | */ |
461 | static void v9fs_write_work(void *a) | 461 | static void v9fs_write_work(struct work_struct *work) |
462 | { | 462 | { |
463 | int n, err; | 463 | int n, err; |
464 | struct v9fs_mux_data *m; | 464 | struct v9fs_mux_data *m; |
465 | struct v9fs_req *req; | 465 | struct v9fs_req *req; |
466 | 466 | ||
467 | m = a; | 467 | m = container_of(work, struct v9fs_mux_data, wq); |
468 | 468 | ||
469 | if (m->err < 0) { | 469 | if (m->err < 0) { |
470 | clear_bit(Wworksched, &m->wsched); | 470 | clear_bit(Wworksched, &m->wsched); |
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |||
564 | /** | 564 | /** |
565 | * v9fs_read_work - called when there is some data to be read from a transport | 565 | * v9fs_read_work - called when there is some data to be read from a transport |
566 | */ | 566 | */ |
567 | static void v9fs_read_work(void *a) | 567 | static void v9fs_read_work(struct work_struct *work) |
568 | { | 568 | { |
569 | int n, err; | 569 | int n, err; |
570 | struct v9fs_mux_data *m; | 570 | struct v9fs_mux_data *m; |
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a) | |||
572 | struct v9fs_fcall *rcall; | 572 | struct v9fs_fcall *rcall; |
573 | char *rbuf; | 573 | char *rbuf; |
574 | 574 | ||
575 | m = a; | 575 | m = container_of(work, struct v9fs_mux_data, rq); |
576 | 576 | ||
577 | if (m->err < 0) | 577 | if (m->err < 0) |
578 | return; | 578 | return; |
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep; | |||
53 | static struct workqueue_struct *aio_wq; | 53 | static struct workqueue_struct *aio_wq; |
54 | 54 | ||
55 | /* Used for rare fput completion. */ | 55 | /* Used for rare fput completion. */ |
56 | static void aio_fput_routine(void *); | 56 | static void aio_fput_routine(struct work_struct *); |
57 | static DECLARE_WORK(fput_work, aio_fput_routine, NULL); | 57 | static DECLARE_WORK(fput_work, aio_fput_routine); |
58 | 58 | ||
59 | static DEFINE_SPINLOCK(fput_lock); | 59 | static DEFINE_SPINLOCK(fput_lock); |
60 | static LIST_HEAD(fput_head); | 60 | static LIST_HEAD(fput_head); |
61 | 61 | ||
62 | static void aio_kick_handler(void *); | 62 | static void aio_kick_handler(struct work_struct *); |
63 | static void aio_queue_work(struct kioctx *); | 63 | static void aio_queue_work(struct kioctx *); |
64 | 64 | ||
65 | /* aio_setup | 65 | /* aio_setup |
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
227 | 227 | ||
228 | INIT_LIST_HEAD(&ctx->active_reqs); | 228 | INIT_LIST_HEAD(&ctx->active_reqs); |
229 | INIT_LIST_HEAD(&ctx->run_list); | 229 | INIT_LIST_HEAD(&ctx->run_list); |
230 | INIT_WORK(&ctx->wq, aio_kick_handler, ctx); | 230 | INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); |
231 | 231 | ||
232 | if (aio_setup_ring(ctx) < 0) | 232 | if (aio_setup_ring(ctx) < 0) |
233 | goto out_freectx; | 233 | goto out_freectx; |
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
469 | wake_up(&ctx->wait); | 469 | wake_up(&ctx->wait); |
470 | } | 470 | } |
471 | 471 | ||
472 | static void aio_fput_routine(void *data) | 472 | static void aio_fput_routine(struct work_struct *data) |
473 | { | 473 | { |
474 | spin_lock_irq(&fput_lock); | 474 | spin_lock_irq(&fput_lock); |
475 | while (likely(!list_empty(&fput_head))) { | 475 | while (likely(!list_empty(&fput_head))) { |
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx) | |||
857 | * space. | 857 | * space. |
858 | * Run on aiod's context. | 858 | * Run on aiod's context. |
859 | */ | 859 | */ |
860 | static void aio_kick_handler(void *data) | 860 | static void aio_kick_handler(struct work_struct *work) |
861 | { | 861 | { |
862 | struct kioctx *ctx = data; | 862 | struct kioctx *ctx = container_of(work, struct kioctx, wq.work); |
863 | mm_segment_t oldfs = get_fs(); | 863 | mm_segment_t oldfs = get_fs(); |
864 | int requeue; | 864 | int requeue; |
865 | 865 | ||
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data) | |||
874 | * we're in a worker thread already, don't use queue_delayed_work, | 874 | * we're in a worker thread already, don't use queue_delayed_work, |
875 | */ | 875 | */ |
876 | if (requeue) | 876 | if (requeue) |
877 | queue_work(aio_wq, &ctx->wq); | 877 | queue_delayed_work(aio_wq, &ctx->wq, 0); |
878 | } | 878 | } |
879 | 879 | ||
880 | 880 | ||
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio) | |||
940 | * run one bio_put() against the BIO. | 940 | * run one bio_put() against the BIO. |
941 | */ | 941 | */ |
942 | 942 | ||
943 | static void bio_dirty_fn(void *data); | 943 | static void bio_dirty_fn(struct work_struct *work); |
944 | 944 | ||
945 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); | 945 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
946 | static DEFINE_SPINLOCK(bio_dirty_lock); | 946 | static DEFINE_SPINLOCK(bio_dirty_lock); |
947 | static struct bio *bio_dirty_list; | 947 | static struct bio *bio_dirty_list; |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * This runs in process context | 950 | * This runs in process context |
951 | */ | 951 | */ |
952 | static void bio_dirty_fn(void *data) | 952 | static void bio_dirty_fn(struct work_struct *work) |
953 | { | 953 | { |
954 | unsigned long flags; | 954 | unsigned long flags; |
955 | struct bio *bio; | 955 | struct bio *bio; |
@@ -91,8 +91,10 @@ out: | |||
91 | spin_unlock(&fddef->lock); | 91 | spin_unlock(&fddef->lock); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void free_fdtable_work(struct fdtable_defer *f) | 94 | static void free_fdtable_work(struct work_struct *work) |
95 | { | 95 | { |
96 | struct fdtable_defer *f = | ||
97 | container_of(work, struct fdtable_defer, wq); | ||
96 | struct fdtable *fdt; | 98 | struct fdtable *fdt; |
97 | 99 | ||
98 | spin_lock_bh(&f->lock); | 100 | spin_lock_bh(&f->lock); |
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu) | |||
351 | { | 353 | { |
352 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | 354 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); |
353 | spin_lock_init(&fddef->lock); | 355 | spin_lock_init(&fddef->lock); |
354 | INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); | 356 | INIT_WORK(&fddef->wq, free_fdtable_work); |
355 | init_timer(&fddef->timer); | 357 | init_timer(&fddef->timer); |
356 | fddef->timer.data = (unsigned long)fddef; | 358 | fddef->timer.data = (unsigned long)fddef; |
357 | fddef->timer.function = fdtable_timer; | 359 | fddef->timer.function = fdtable_timer; |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 78fe0fae23f..55f5333dae9 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | struct greedy { | 36 | struct greedy { |
37 | struct gfs2_holder gr_gh; | 37 | struct gfs2_holder gr_gh; |
38 | struct work_struct gr_work; | 38 | struct delayed_work gr_work; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct gfs2_gl_hash_bucket { | 41 | struct gfs2_gl_hash_bucket { |
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, | |||
1368 | glops->go_xmote_th(gl, state, flags); | 1368 | glops->go_xmote_th(gl, state, flags); |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void greedy_work(void *data) | 1371 | static void greedy_work(struct work_struct *work) |
1372 | { | 1372 | { |
1373 | struct greedy *gr = data; | 1373 | struct greedy *gr = container_of(work, struct greedy, gr_work.work); |
1374 | struct gfs2_holder *gh = &gr->gr_gh; | 1374 | struct gfs2_holder *gh = &gr->gr_gh; |
1375 | struct gfs2_glock *gl = gh->gh_gl; | 1375 | struct gfs2_glock *gl = gh->gh_gl; |
1376 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1376 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) | |||
1422 | 1422 | ||
1423 | gfs2_holder_init(gl, 0, 0, gh); | 1423 | gfs2_holder_init(gl, 0, 0, gh); |
1424 | set_bit(HIF_GREEDY, &gh->gh_iflags); | 1424 | set_bit(HIF_GREEDY, &gh->gh_iflags); |
1425 | INIT_WORK(&gr->gr_work, greedy_work, gr); | 1425 | INIT_DELAYED_WORK(&gr->gr_work, greedy_work); |
1426 | 1426 | ||
1427 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | 1427 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); |
1428 | schedule_delayed_work(&gr->gr_work, time); | 1428 | schedule_delayed_work(&gr->gr_work, time); |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 42e3bef270c..72dad552aa0 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
577 | server->rcv.ptr = (unsigned char*)&server->rcv.buf; | 577 | server->rcv.ptr = (unsigned char*)&server->rcv.buf; |
578 | server->rcv.len = 10; | 578 | server->rcv.len = 10; |
579 | server->rcv.state = 0; | 579 | server->rcv.state = 0; |
580 | INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); | 580 | INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); |
581 | INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); | 581 | INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); |
582 | sock->sk->sk_write_space = ncp_tcp_write_space; | 582 | sock->sk->sk_write_space = ncp_tcp_write_space; |
583 | } else { | 583 | } else { |
584 | INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); | 584 | INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); |
585 | INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); | 585 | INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); |
586 | server->timeout_tm.data = (unsigned long)server; | 586 | server->timeout_tm.data = (unsigned long)server; |
587 | server->timeout_tm.function = ncpdgram_timeout_call; | 587 | server->timeout_tm.function = ncpdgram_timeout_call; |
588 | } | 588 | } |
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 11c2b252ebe..e496d8b65e9 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c | |||
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void * | |||
350 | } | 350 | } |
351 | } | 351 | } |
352 | 352 | ||
353 | void ncpdgram_rcv_proc(void *s) | 353 | void ncpdgram_rcv_proc(struct work_struct *work) |
354 | { | 354 | { |
355 | struct ncp_server *server = s; | 355 | struct ncp_server *server = |
356 | container_of(work, struct ncp_server, rcv.tq); | ||
356 | struct socket* sock; | 357 | struct socket* sock; |
357 | 358 | ||
358 | sock = server->ncp_sock; | 359 | sock = server->ncp_sock; |
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) | |||
468 | } | 469 | } |
469 | } | 470 | } |
470 | 471 | ||
471 | void ncpdgram_timeout_proc(void *s) | 472 | void ncpdgram_timeout_proc(struct work_struct *work) |
472 | { | 473 | { |
473 | struct ncp_server *server = s; | 474 | struct ncp_server *server = |
475 | container_of(work, struct ncp_server, timeout_tq); | ||
474 | mutex_lock(&server->rcv.creq_mutex); | 476 | mutex_lock(&server->rcv.creq_mutex); |
475 | __ncpdgram_timeout_proc(server); | 477 | __ncpdgram_timeout_proc(server); |
476 | mutex_unlock(&server->rcv.creq_mutex); | 478 | mutex_unlock(&server->rcv.creq_mutex); |
@@ -652,18 +654,20 @@ skipdata:; | |||
652 | } | 654 | } |
653 | } | 655 | } |
654 | 656 | ||
655 | void ncp_tcp_rcv_proc(void *s) | 657 | void ncp_tcp_rcv_proc(struct work_struct *work) |
656 | { | 658 | { |
657 | struct ncp_server *server = s; | 659 | struct ncp_server *server = |
660 | container_of(work, struct ncp_server, rcv.tq); | ||
658 | 661 | ||
659 | mutex_lock(&server->rcv.creq_mutex); | 662 | mutex_lock(&server->rcv.creq_mutex); |
660 | __ncptcp_rcv_proc(server); | 663 | __ncptcp_rcv_proc(server); |
661 | mutex_unlock(&server->rcv.creq_mutex); | 664 | mutex_unlock(&server->rcv.creq_mutex); |
662 | } | 665 | } |
663 | 666 | ||
664 | void ncp_tcp_tx_proc(void *s) | 667 | void ncp_tcp_tx_proc(struct work_struct *work) |
665 | { | 668 | { |
666 | struct ncp_server *server = s; | 669 | struct ncp_server *server = |
670 | container_of(work, struct ncp_server, tx.tq); | ||
667 | 671 | ||
668 | mutex_lock(&server->rcv.creq_mutex); | 672 | mutex_lock(&server->rcv.creq_mutex); |
669 | __ncptcp_try_send(server); | 673 | __ncptcp_try_send(server); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 5fea638743e..23ab145daa2 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, | |||
143 | INIT_LIST_HEAD(&clp->cl_state_owners); | 143 | INIT_LIST_HEAD(&clp->cl_state_owners); |
144 | INIT_LIST_HEAD(&clp->cl_unused); | 144 | INIT_LIST_HEAD(&clp->cl_unused); |
145 | spin_lock_init(&clp->cl_lock); | 145 | spin_lock_init(&clp->cl_lock); |
146 | INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); | 146 | INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); |
147 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); | 147 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); |
148 | clp->cl_boot_time = CURRENT_TIME; | 148 | clp->cl_boot_time = CURRENT_TIME; |
149 | clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; | 149 | clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index ec1114b33d8..371b804e7cc 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -18,10 +18,10 @@ | |||
18 | 18 | ||
19 | #define NFSDBG_FACILITY NFSDBG_VFS | 19 | #define NFSDBG_FACILITY NFSDBG_VFS |
20 | 20 | ||
21 | static void nfs_expire_automounts(void *list); | 21 | static void nfs_expire_automounts(struct work_struct *work); |
22 | 22 | ||
23 | LIST_HEAD(nfs_automount_list); | 23 | LIST_HEAD(nfs_automount_list); |
24 | static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); | 24 | static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); |
25 | int nfs_mountpoint_expiry_timeout = 500 * HZ; | 25 | int nfs_mountpoint_expiry_timeout = 500 * HZ; |
26 | 26 | ||
27 | static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, | 27 | static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, |
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = { | |||
164 | .follow_link = nfs_follow_mountpoint, | 164 | .follow_link = nfs_follow_mountpoint, |
165 | }; | 165 | }; |
166 | 166 | ||
167 | static void nfs_expire_automounts(void *data) | 167 | static void nfs_expire_automounts(struct work_struct *work) |
168 | { | 168 | { |
169 | struct list_head *list = (struct list_head *)data; | 169 | struct list_head *list = &nfs_automount_list; |
170 | 170 | ||
171 | mark_mounts_for_expiry(list); | 171 | mark_mounts_for_expiry(list); |
172 | if (!list_empty(list)) | 172 | if (!list_empty(list)) |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 6f346677332..c26cd978c7c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2]; | |||
185 | extern void nfs4_schedule_state_renewal(struct nfs_client *); | 185 | extern void nfs4_schedule_state_renewal(struct nfs_client *); |
186 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); | 186 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); |
187 | extern void nfs4_kill_renewd(struct nfs_client *); | 187 | extern void nfs4_kill_renewd(struct nfs_client *); |
188 | extern void nfs4_renew_state(void *); | 188 | extern void nfs4_renew_state(struct work_struct *); |
189 | 189 | ||
190 | /* nfs4state.c */ | 190 | /* nfs4state.c */ |
191 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); | 191 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 7b6df1852e7..823298561c0 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -59,9 +59,10 @@ | |||
59 | #define NFSDBG_FACILITY NFSDBG_PROC | 59 | #define NFSDBG_FACILITY NFSDBG_PROC |
60 | 60 | ||
61 | void | 61 | void |
62 | nfs4_renew_state(void *data) | 62 | nfs4_renew_state(struct work_struct *work) |
63 | { | 63 | { |
64 | struct nfs_client *clp = (struct nfs_client *)data; | 64 | struct nfs_client *clp = |
65 | container_of(work, struct nfs_client, cl_renewd.work); | ||
65 | struct rpc_cred *cred; | 66 | struct rpc_cred *cred; |
66 | long lease, timeout; | 67 | long lease, timeout; |
67 | unsigned long last, now; | 68 | unsigned long last, now; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 293b6495829..e431e93ab50 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -1829,9 +1829,8 @@ out: | |||
1829 | } | 1829 | } |
1830 | 1830 | ||
1831 | static struct workqueue_struct *laundry_wq; | 1831 | static struct workqueue_struct *laundry_wq; |
1832 | static struct work_struct laundromat_work; | 1832 | static void laundromat_main(struct work_struct *); |
1833 | static void laundromat_main(void *); | 1833 | static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); |
1834 | static DECLARE_WORK(laundromat_work, laundromat_main, NULL); | ||
1835 | 1834 | ||
1836 | __be32 | 1835 | __be32 |
1837 | nfsd4_renew(clientid_t *clid) | 1836 | nfsd4_renew(clientid_t *clid) |
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void) | |||
1940 | } | 1939 | } |
1941 | 1940 | ||
1942 | void | 1941 | void |
1943 | laundromat_main(void *not_used) | 1942 | laundromat_main(struct work_struct *not_used) |
1944 | { | 1943 | { |
1945 | time_t t; | 1944 | time_t t; |
1946 | 1945 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 85a048b7a67..edc91ca3792 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -1197,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |||
1197 | return status; | 1197 | return status; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static void ocfs2_truncate_log_worker(void *data) | 1200 | static void ocfs2_truncate_log_worker(struct work_struct *work) |
1201 | { | 1201 | { |
1202 | int status; | 1202 | int status; |
1203 | struct ocfs2_super *osb = data; | 1203 | struct ocfs2_super *osb = |
1204 | container_of(work, struct ocfs2_super, | ||
1205 | osb_truncate_log_wq.work); | ||
1204 | 1206 | ||
1205 | mlog_entry_void(); | 1207 | mlog_entry_void(); |
1206 | 1208 | ||
@@ -1432,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |||
1432 | /* ocfs2_truncate_log_shutdown keys on the existence of | 1434 | /* ocfs2_truncate_log_shutdown keys on the existence of |
1433 | * osb->osb_tl_inode so we don't set any of the osb variables | 1435 | * osb->osb_tl_inode so we don't set any of the osb variables |
1434 | * until we're sure all is well. */ | 1436 | * until we're sure all is well. */ |
1435 | INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); | 1437 | INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, |
1438 | ocfs2_truncate_log_worker); | ||
1436 | osb->osb_tl_bh = tl_bh; | 1439 | osb->osb_tl_bh = tl_bh; |
1437 | osb->osb_tl_inode = tl_inode; | 1440 | osb->osb_tl_inode = tl_inode; |
1438 | 1441 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 305cba3681f..4cd9a958045 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -141,7 +141,7 @@ struct o2hb_region { | |||
141 | * recognizes a node going up and down in one iteration */ | 141 | * recognizes a node going up and down in one iteration */ |
142 | u64 hr_generation; | 142 | u64 hr_generation; |
143 | 143 | ||
144 | struct work_struct hr_write_timeout_work; | 144 | struct delayed_work hr_write_timeout_work; |
145 | unsigned long hr_last_timeout_start; | 145 | unsigned long hr_last_timeout_start; |
146 | 146 | ||
147 | /* Used during o2hb_check_slot to hold a copy of the block | 147 | /* Used during o2hb_check_slot to hold a copy of the block |
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt { | |||
156 | int wc_error; | 156 | int wc_error; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static void o2hb_write_timeout(void *arg) | 159 | static void o2hb_write_timeout(struct work_struct *work) |
160 | { | 160 | { |
161 | struct o2hb_region *reg = arg; | 161 | struct o2hb_region *reg = |
162 | container_of(work, struct o2hb_region, | ||
163 | hr_write_timeout_work.work); | ||
162 | 164 | ||
163 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " | 165 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " |
164 | "milliseconds\n", reg->hr_dev_name, | 166 | "milliseconds\n", reg->hr_dev_name, |
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1404 | goto out; | 1406 | goto out; |
1405 | } | 1407 | } |
1406 | 1408 | ||
1407 | INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg); | 1409 | INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); |
1408 | 1410 | ||
1409 | /* | 1411 | /* |
1410 | * A node is considered live after it has beat LIVE_THRESHOLD | 1412 | * A node is considered live after it has beat LIVE_THRESHOLD |
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 7bba98fbfc1..4705d659fe5 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c | |||
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void) | |||
88 | o2quo_fence_self(); | 88 | o2quo_fence_self(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void o2quo_make_decision(void *arg) | 91 | static void o2quo_make_decision(struct work_struct *work) |
92 | { | 92 | { |
93 | int quorum; | 93 | int quorum; |
94 | int lowest_hb, lowest_reachable = 0, fence = 0; | 94 | int lowest_hb, lowest_reachable = 0, fence = 0; |
@@ -306,7 +306,7 @@ void o2quo_init(void) | |||
306 | struct o2quo_state *qs = &o2quo_state; | 306 | struct o2quo_state *qs = &o2quo_state; |
307 | 307 | ||
308 | spin_lock_init(&qs->qs_lock); | 308 | spin_lock_init(&qs->qs_lock); |
309 | INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); | 309 | INIT_WORK(&qs->qs_work, o2quo_make_decision); |
310 | } | 310 | } |
311 | 311 | ||
312 | void o2quo_exit(void) | 312 | void o2quo_exit(void) |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index b650efa8c8b..9b3209dc0b1 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] = | |||
140 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; | 140 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; |
141 | 141 | ||
142 | /* can't quite avoid *all* internal declarations :/ */ | 142 | /* can't quite avoid *all* internal declarations :/ */ |
143 | static void o2net_sc_connect_completed(void *arg); | 143 | static void o2net_sc_connect_completed(struct work_struct *work); |
144 | static void o2net_rx_until_empty(void *arg); | 144 | static void o2net_rx_until_empty(struct work_struct *work); |
145 | static void o2net_shutdown_sc(void *arg); | 145 | static void o2net_shutdown_sc(struct work_struct *work); |
146 | static void o2net_listen_data_ready(struct sock *sk, int bytes); | 146 | static void o2net_listen_data_ready(struct sock *sk, int bytes); |
147 | static void o2net_sc_send_keep_req(void *arg); | 147 | static void o2net_sc_send_keep_req(struct work_struct *work); |
148 | static void o2net_idle_timer(unsigned long data); | 148 | static void o2net_idle_timer(unsigned long data); |
149 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); | 149 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); |
150 | 150 | ||
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) | |||
308 | o2nm_node_get(node); | 308 | o2nm_node_get(node); |
309 | sc->sc_node = node; | 309 | sc->sc_node = node; |
310 | 310 | ||
311 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); | 311 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); |
312 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); | 312 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); |
313 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); | 313 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); |
314 | INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); | 314 | INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); |
315 | 315 | ||
316 | init_timer(&sc->sc_idle_timeout); | 316 | init_timer(&sc->sc_idle_timeout); |
317 | sc->sc_idle_timeout.function = o2net_idle_timer; | 317 | sc->sc_idle_timeout.function = o2net_idle_timer; |
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc, | |||
342 | sc_put(sc); | 342 | sc_put(sc); |
343 | } | 343 | } |
344 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, | 344 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, |
345 | struct work_struct *work, | 345 | struct delayed_work *work, |
346 | int delay) | 346 | int delay) |
347 | { | 347 | { |
348 | sc_get(sc); | 348 | sc_get(sc); |
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, | |||
350 | sc_put(sc); | 350 | sc_put(sc); |
351 | } | 351 | } |
352 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, | 352 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, |
353 | struct work_struct *work) | 353 | struct delayed_work *work) |
354 | { | 354 | { |
355 | if (cancel_delayed_work(work)) | 355 | if (cancel_delayed_work(work)) |
356 | sc_put(sc); | 356 | sc_put(sc); |
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn, | |||
564 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state | 564 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state |
565 | * itself. | 565 | * itself. |
566 | */ | 566 | */ |
567 | static void o2net_shutdown_sc(void *arg) | 567 | static void o2net_shutdown_sc(struct work_struct *work) |
568 | { | 568 | { |
569 | struct o2net_sock_container *sc = arg; | 569 | struct o2net_sock_container *sc = |
570 | container_of(work, struct o2net_sock_container, | ||
571 | sc_shutdown_work); | ||
570 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 572 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
571 | 573 | ||
572 | sclog(sc, "shutting down\n"); | 574 | sclog(sc, "shutting down\n"); |
@@ -1201,9 +1203,10 @@ out: | |||
1201 | /* this work func is triggerd by data ready. it reads until it can read no | 1203 | /* this work func is triggerd by data ready. it reads until it can read no |
1202 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing | 1204 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing |
1203 | * our work the work struct will be marked and we'll be called again. */ | 1205 | * our work the work struct will be marked and we'll be called again. */ |
1204 | static void o2net_rx_until_empty(void *arg) | 1206 | static void o2net_rx_until_empty(struct work_struct *work) |
1205 | { | 1207 | { |
1206 | struct o2net_sock_container *sc = arg; | 1208 | struct o2net_sock_container *sc = |
1209 | container_of(work, struct o2net_sock_container, sc_rx_work); | ||
1207 | int ret; | 1210 | int ret; |
1208 | 1211 | ||
1209 | do { | 1212 | do { |
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock) | |||
1249 | 1252 | ||
1250 | /* called when a connect completes and after a sock is accepted. the | 1253 | /* called when a connect completes and after a sock is accepted. the |
1251 | * rx path will see the response and mark the sc valid */ | 1254 | * rx path will see the response and mark the sc valid */ |
1252 | static void o2net_sc_connect_completed(void *arg) | 1255 | static void o2net_sc_connect_completed(struct work_struct *work) |
1253 | { | 1256 | { |
1254 | struct o2net_sock_container *sc = arg; | 1257 | struct o2net_sock_container *sc = |
1258 | container_of(work, struct o2net_sock_container, | ||
1259 | sc_connect_work); | ||
1255 | 1260 | ||
1256 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", | 1261 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", |
1257 | (unsigned long long)O2NET_PROTOCOL_VERSION, | 1262 | (unsigned long long)O2NET_PROTOCOL_VERSION, |
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg) | |||
1262 | } | 1267 | } |
1263 | 1268 | ||
1264 | /* this is called as a work_struct func. */ | 1269 | /* this is called as a work_struct func. */ |
1265 | static void o2net_sc_send_keep_req(void *arg) | 1270 | static void o2net_sc_send_keep_req(struct work_struct *work) |
1266 | { | 1271 | { |
1267 | struct o2net_sock_container *sc = arg; | 1272 | struct o2net_sock_container *sc = |
1273 | container_of(work, struct o2net_sock_container, | ||
1274 | sc_keepalive_work.work); | ||
1268 | 1275 | ||
1269 | o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); | 1276 | o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); |
1270 | sc_put(sc); | 1277 | sc_put(sc); |
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) | |||
1314 | * having a connect attempt fail, etc. This centralizes the logic which decides | 1321 | * having a connect attempt fail, etc. This centralizes the logic which decides |
1315 | * if a connect attempt should be made or if we should give up and all future | 1322 | * if a connect attempt should be made or if we should give up and all future |
1316 | * transmit attempts should fail */ | 1323 | * transmit attempts should fail */ |
1317 | static void o2net_start_connect(void *arg) | 1324 | static void o2net_start_connect(struct work_struct *work) |
1318 | { | 1325 | { |
1319 | struct o2net_node *nn = arg; | 1326 | struct o2net_node *nn = |
1327 | container_of(work, struct o2net_node, nn_connect_work.work); | ||
1320 | struct o2net_sock_container *sc = NULL; | 1328 | struct o2net_sock_container *sc = NULL; |
1321 | struct o2nm_node *node = NULL, *mynode = NULL; | 1329 | struct o2nm_node *node = NULL, *mynode = NULL; |
1322 | struct socket *sock = NULL; | 1330 | struct socket *sock = NULL; |
1323 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; | 1331 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; |
1324 | int ret = 0; | 1332 | int ret = 0, stop; |
1325 | 1333 | ||
1326 | /* if we're greater we initiate tx, otherwise we accept */ | 1334 | /* if we're greater we initiate tx, otherwise we accept */ |
1327 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) | 1335 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) |
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg) | |||
1342 | 1350 | ||
1343 | spin_lock(&nn->nn_lock); | 1351 | spin_lock(&nn->nn_lock); |
1344 | /* see if we already have one pending or have given up */ | 1352 | /* see if we already have one pending or have given up */ |
1345 | if (nn->nn_sc || nn->nn_persistent_error) | 1353 | stop = (nn->nn_sc || nn->nn_persistent_error); |
1346 | arg = NULL; | ||
1347 | spin_unlock(&nn->nn_lock); | 1354 | spin_unlock(&nn->nn_lock); |
1348 | if (arg == NULL) /* *shrug*, needed some indicator */ | 1355 | if (stop) |
1349 | goto out; | 1356 | goto out; |
1350 | 1357 | ||
1351 | nn->nn_last_connect_attempt = jiffies; | 1358 | nn->nn_last_connect_attempt = jiffies; |
@@ -1421,9 +1428,10 @@ out: | |||
1421 | return; | 1428 | return; |
1422 | } | 1429 | } |
1423 | 1430 | ||
1424 | static void o2net_connect_expired(void *arg) | 1431 | static void o2net_connect_expired(struct work_struct *work) |
1425 | { | 1432 | { |
1426 | struct o2net_node *nn = arg; | 1433 | struct o2net_node *nn = |
1434 | container_of(work, struct o2net_node, nn_connect_expired.work); | ||
1427 | 1435 | ||
1428 | spin_lock(&nn->nn_lock); | 1436 | spin_lock(&nn->nn_lock); |
1429 | if (!nn->nn_sc_valid) { | 1437 | if (!nn->nn_sc_valid) { |
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg) | |||
1436 | spin_unlock(&nn->nn_lock); | 1444 | spin_unlock(&nn->nn_lock); |
1437 | } | 1445 | } |
1438 | 1446 | ||
1439 | static void o2net_still_up(void *arg) | 1447 | static void o2net_still_up(struct work_struct *work) |
1440 | { | 1448 | { |
1441 | struct o2net_node *nn = arg; | 1449 | struct o2net_node *nn = |
1450 | container_of(work, struct o2net_node, nn_still_up.work); | ||
1442 | 1451 | ||
1443 | o2quo_hb_still_up(o2net_num_from_nn(nn)); | 1452 | o2quo_hb_still_up(o2net_num_from_nn(nn)); |
1444 | } | 1453 | } |
@@ -1644,9 +1653,9 @@ out: | |||
1644 | return ret; | 1653 | return ret; |
1645 | } | 1654 | } |
1646 | 1655 | ||
1647 | static void o2net_accept_many(void *arg) | 1656 | static void o2net_accept_many(struct work_struct *work) |
1648 | { | 1657 | { |
1649 | struct socket *sock = arg; | 1658 | struct socket *sock = o2net_listen_sock; |
1650 | while (o2net_accept_one(sock) == 0) | 1659 | while (o2net_accept_one(sock) == 0) |
1651 | cond_resched(); | 1660 | cond_resched(); |
1652 | } | 1661 | } |
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port) | |||
1700 | write_unlock_bh(&sock->sk->sk_callback_lock); | 1709 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1701 | 1710 | ||
1702 | o2net_listen_sock = sock; | 1711 | o2net_listen_sock = sock; |
1703 | INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); | 1712 | INIT_WORK(&o2net_listen_work, o2net_accept_many); |
1704 | 1713 | ||
1705 | sock->sk->sk_reuse = 1; | 1714 | sock->sk->sk_reuse = 1; |
1706 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | 1715 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
@@ -1819,9 +1828,10 @@ int o2net_init(void) | |||
1819 | struct o2net_node *nn = o2net_nn_from_num(i); | 1828 | struct o2net_node *nn = o2net_nn_from_num(i); |
1820 | 1829 | ||
1821 | spin_lock_init(&nn->nn_lock); | 1830 | spin_lock_init(&nn->nn_lock); |
1822 | INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); | 1831 | INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); |
1823 | INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); | 1832 | INIT_DELAYED_WORK(&nn->nn_connect_expired, |
1824 | INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); | 1833 | o2net_connect_expired); |
1834 | INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); | ||
1825 | /* until we see hb from a node we'll return einval */ | 1835 | /* until we see hb from a node we'll return einval */ |
1826 | nn->nn_persistent_error = -ENOTCONN; | 1836 | nn->nn_persistent_error = -ENOTCONN; |
1827 | init_waitqueue_head(&nn->nn_sc_wq); | 1837 | init_waitqueue_head(&nn->nn_sc_wq); |
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 4b46aac7d24..daebbd3a2c8 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h | |||
@@ -86,18 +86,18 @@ struct o2net_node { | |||
86 | * connect attempt fails and so can be self-arming. shutdown is | 86 | * connect attempt fails and so can be self-arming. shutdown is |
87 | * careful to first mark the nn such that no connects will be attempted | 87 | * careful to first mark the nn such that no connects will be attempted |
88 | * before canceling delayed connect work and flushing the queue. */ | 88 | * before canceling delayed connect work and flushing the queue. */ |
89 | struct work_struct nn_connect_work; | 89 | struct delayed_work nn_connect_work; |
90 | unsigned long nn_last_connect_attempt; | 90 | unsigned long nn_last_connect_attempt; |
91 | 91 | ||
92 | /* this is queued as nodes come up and is canceled when a connection is | 92 | /* this is queued as nodes come up and is canceled when a connection is |
93 | * established. this expiring gives up on the node and errors out | 93 | * established. this expiring gives up on the node and errors out |
94 | * transmits */ | 94 | * transmits */ |
95 | struct work_struct nn_connect_expired; | 95 | struct delayed_work nn_connect_expired; |
96 | 96 | ||
97 | /* after we give up on a socket we wait a while before deciding | 97 | /* after we give up on a socket we wait a while before deciding |
98 | * that it is still heartbeating and that we should do some | 98 | * that it is still heartbeating and that we should do some |
99 | * quorum work */ | 99 | * quorum work */ |
100 | struct work_struct nn_still_up; | 100 | struct delayed_work nn_still_up; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct o2net_sock_container { | 103 | struct o2net_sock_container { |
@@ -129,7 +129,7 @@ struct o2net_sock_container { | |||
129 | struct work_struct sc_shutdown_work; | 129 | struct work_struct sc_shutdown_work; |
130 | 130 | ||
131 | struct timer_list sc_idle_timeout; | 131 | struct timer_list sc_idle_timeout; |
132 | struct work_struct sc_keepalive_work; | 132 | struct delayed_work sc_keepalive_work; |
133 | 133 | ||
134 | unsigned sc_handshake_ok:1; | 134 | unsigned sc_handshake_ok:1; |
135 | 135 | ||
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index fa968180b07..6b6ff76538c 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned | |||
153 | * called functions that cannot be directly called from the | 153 | * called functions that cannot be directly called from the |
154 | * net message handlers for some reason, usually because | 154 | * net message handlers for some reason, usually because |
155 | * they need to send net messages of their own. */ | 155 | * they need to send net messages of their own. */ |
156 | void dlm_dispatch_work(void *data); | 156 | void dlm_dispatch_work(struct work_struct *work); |
157 | 157 | ||
158 | struct dlm_lock_resource; | 158 | struct dlm_lock_resource; |
159 | struct dlm_work_item; | 159 | struct dlm_work_item; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index f6cdab3a2c6..420a375a394 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -1297,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | |||
1297 | 1297 | ||
1298 | spin_lock_init(&dlm->work_lock); | 1298 | spin_lock_init(&dlm->work_lock); |
1299 | INIT_LIST_HEAD(&dlm->work_list); | 1299 | INIT_LIST_HEAD(&dlm->work_list); |
1300 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); | 1300 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); |
1301 | 1301 | ||
1302 | kref_init(&dlm->dlm_refs); | 1302 | kref_init(&dlm->dlm_refs); |
1303 | dlm->dlm_state = DLM_CTXT_NEW; | 1303 | dlm->dlm_state = DLM_CTXT_NEW; |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 9d950d7cea3..fb3e2b0817f 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | |||
153 | } | 153 | } |
154 | 154 | ||
155 | /* Worker function used during recovery. */ | 155 | /* Worker function used during recovery. */ |
156 | void dlm_dispatch_work(void *data) | 156 | void dlm_dispatch_work(struct work_struct *work) |
157 | { | 157 | { |
158 | struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; | 158 | struct dlm_ctxt *dlm = |
159 | container_of(work, struct dlm_ctxt, dispatched_work); | ||
159 | LIST_HEAD(tmp_list); | 160 | LIST_HEAD(tmp_list); |
160 | struct list_head *iter, *iter2; | 161 | struct list_head *iter, *iter2; |
161 | struct dlm_work_item *item; | 162 | struct dlm_work_item *item; |
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index eead48bbfac..7d2f578b267 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c | |||
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) | |||
171 | BUG(); | 171 | BUG(); |
172 | } | 172 | } |
173 | 173 | ||
174 | static void user_dlm_unblock_lock(void *opaque); | 174 | static void user_dlm_unblock_lock(struct work_struct *work); |
175 | 175 | ||
176 | static void __user_dlm_queue_lockres(struct user_lock_res *lockres) | 176 | static void __user_dlm_queue_lockres(struct user_lock_res *lockres) |
177 | { | 177 | { |
178 | if (!(lockres->l_flags & USER_LOCK_QUEUED)) { | 178 | if (!(lockres->l_flags & USER_LOCK_QUEUED)) { |
179 | user_dlm_grab_inode_ref(lockres); | 179 | user_dlm_grab_inode_ref(lockres); |
180 | 180 | ||
181 | INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, | 181 | INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); |
182 | lockres); | ||
183 | 182 | ||
184 | queue_work(user_dlm_worker, &lockres->l_work); | 183 | queue_work(user_dlm_worker, &lockres->l_work); |
185 | lockres->l_flags |= USER_LOCK_QUEUED; | 184 | lockres->l_flags |= USER_LOCK_QUEUED; |
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) | |||
279 | iput(inode); | 278 | iput(inode); |
280 | } | 279 | } |
281 | 280 | ||
282 | static void user_dlm_unblock_lock(void *opaque) | 281 | static void user_dlm_unblock_lock(struct work_struct *work) |
283 | { | 282 | { |
284 | int new_level, status; | 283 | int new_level, status; |
285 | struct user_lock_res *lockres = (struct user_lock_res *) opaque; | 284 | struct user_lock_res *lockres = |
285 | container_of(work, struct user_lock_res, l_work); | ||
286 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | 286 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); |
287 | 287 | ||
288 | mlog(0, "processing lockres %.*s\n", lockres->l_namelen, | 288 | mlog(0, "processing lockres %.*s\n", lockres->l_namelen, |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index c0ad7cb5952..1d7f4ab1e5e 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -703,11 +703,12 @@ struct ocfs2_la_recovery_item { | |||
703 | * NOTE: This function can and will sleep on recovery of other nodes | 703 | * NOTE: This function can and will sleep on recovery of other nodes |
704 | * during cluster locking, just like any other ocfs2 process. | 704 | * during cluster locking, just like any other ocfs2 process. |
705 | */ | 705 | */ |
706 | void ocfs2_complete_recovery(void *data) | 706 | void ocfs2_complete_recovery(struct work_struct *work) |
707 | { | 707 | { |
708 | int ret; | 708 | int ret; |
709 | struct ocfs2_super *osb = data; | 709 | struct ocfs2_journal *journal = |
710 | struct ocfs2_journal *journal = osb->journal; | 710 | container_of(work, struct ocfs2_journal, j_recovery_work); |
711 | struct ocfs2_super *osb = journal->j_osb; | ||
711 | struct ocfs2_dinode *la_dinode, *tl_dinode; | 712 | struct ocfs2_dinode *la_dinode, *tl_dinode; |
712 | struct ocfs2_la_recovery_item *item; | 713 | struct ocfs2_la_recovery_item *item; |
713 | struct list_head *p, *n; | 714 | struct list_head *p, *n; |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index d86cb960b7e..899112ad813 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -133,7 +133,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* Exported only for the journal struct init code in super.c. Do not call. */ | 135 | /* Exported only for the journal struct init code in super.c. Do not call. */ |
136 | void ocfs2_complete_recovery(void *data); | 136 | void ocfs2_complete_recovery(struct work_struct *work); |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Journal Control: | 139 | * Journal Control: |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 078883772bd..b767fd7da6e 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -285,7 +285,7 @@ struct ocfs2_super | |||
285 | /* Truncate log info */ | 285 | /* Truncate log info */ |
286 | struct inode *osb_tl_inode; | 286 | struct inode *osb_tl_inode; |
287 | struct buffer_head *osb_tl_bh; | 287 | struct buffer_head *osb_tl_bh; |
288 | struct work_struct osb_truncate_log_wq; | 288 | struct delayed_work osb_truncate_log_wq; |
289 | 289 | ||
290 | struct ocfs2_node_map osb_recovering_orphan_dirs; | 290 | struct ocfs2_node_map osb_recovering_orphan_dirs; |
291 | unsigned int *osb_orphan_wipes; | 291 | unsigned int *osb_orphan_wipes; |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index b0992573dee..d9b4214a12d 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1365 | spin_lock_init(&journal->j_lock); | 1365 | spin_lock_init(&journal->j_lock); |
1366 | journal->j_trans_id = (unsigned long) 1; | 1366 | journal->j_trans_id = (unsigned long) 1; |
1367 | INIT_LIST_HEAD(&journal->j_la_cleanups); | 1367 | INIT_LIST_HEAD(&journal->j_la_cleanups); |
1368 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); | 1368 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); |
1369 | journal->j_state = OCFS2_JOURNAL_FREE; | 1369 | journal->j_state = OCFS2_JOURNAL_FREE; |
1370 | 1370 | ||
1371 | /* get some pseudo constants for clustersize bits */ | 1371 | /* get some pseudo constants for clustersize bits */ |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index ac93174c963..7280a23ef34 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super, | |||
104 | struct reiserfs_journal *journal); | 104 | struct reiserfs_journal *journal); |
105 | static int dirty_one_transaction(struct super_block *s, | 105 | static int dirty_one_transaction(struct super_block *s, |
106 | struct reiserfs_journal_list *jl); | 106 | struct reiserfs_journal_list *jl); |
107 | static void flush_async_commits(void *p); | 107 | static void flush_async_commits(struct work_struct *work); |
108 | static void queue_log_writer(struct super_block *s); | 108 | static void queue_log_writer(struct super_block *s); |
109 | 109 | ||
110 | /* values for join in do_journal_begin_r */ | 110 | /* values for join in do_journal_begin_r */ |
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, | |||
2836 | if (reiserfs_mounted_fs_count <= 1) | 2836 | if (reiserfs_mounted_fs_count <= 1) |
2837 | commit_wq = create_workqueue("reiserfs"); | 2837 | commit_wq = create_workqueue("reiserfs"); |
2838 | 2838 | ||
2839 | INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); | 2839 | INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); |
2840 | journal->j_work_sb = p_s_sb; | ||
2840 | return 0; | 2841 | return 0; |
2841 | free_and_return: | 2842 | free_and_return: |
2842 | free_journal_ram(p_s_sb); | 2843 | free_journal_ram(p_s_sb); |
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th, | |||
3447 | /* | 3448 | /* |
3448 | ** writeback the pending async commits to disk | 3449 | ** writeback the pending async commits to disk |
3449 | */ | 3450 | */ |
3450 | static void flush_async_commits(void *p) | 3451 | static void flush_async_commits(struct work_struct *work) |
3451 | { | 3452 | { |
3452 | struct super_block *p_s_sb = p; | 3453 | struct reiserfs_journal *journal = |
3453 | struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); | 3454 | container_of(work, struct reiserfs_journal, j_work.work); |
3455 | struct super_block *p_s_sb = journal->j_work_sb; | ||
3454 | struct reiserfs_journal_list *jl; | 3456 | struct reiserfs_journal_list *jl; |
3455 | struct list_head *entry; | 3457 | struct list_head *entry; |
3456 | 3458 | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 09360cf1e1f..8e6b56fc1ca 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -149,9 +149,10 @@ xfs_destroy_ioend( | |||
149 | */ | 149 | */ |
150 | STATIC void | 150 | STATIC void |
151 | xfs_end_bio_delalloc( | 151 | xfs_end_bio_delalloc( |
152 | void *data) | 152 | struct work_struct *work) |
153 | { | 153 | { |
154 | xfs_ioend_t *ioend = data; | 154 | xfs_ioend_t *ioend = |
155 | container_of(work, xfs_ioend_t, io_work); | ||
155 | 156 | ||
156 | xfs_destroy_ioend(ioend); | 157 | xfs_destroy_ioend(ioend); |
157 | } | 158 | } |
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc( | |||
161 | */ | 162 | */ |
162 | STATIC void | 163 | STATIC void |
163 | xfs_end_bio_written( | 164 | xfs_end_bio_written( |
164 | void *data) | 165 | struct work_struct *work) |
165 | { | 166 | { |
166 | xfs_ioend_t *ioend = data; | 167 | xfs_ioend_t *ioend = |
168 | container_of(work, xfs_ioend_t, io_work); | ||
167 | 169 | ||
168 | xfs_destroy_ioend(ioend); | 170 | xfs_destroy_ioend(ioend); |
169 | } | 171 | } |
@@ -176,9 +178,10 @@ xfs_end_bio_written( | |||
176 | */ | 178 | */ |
177 | STATIC void | 179 | STATIC void |
178 | xfs_end_bio_unwritten( | 180 | xfs_end_bio_unwritten( |
179 | void *data) | 181 | struct work_struct *work) |
180 | { | 182 | { |
181 | xfs_ioend_t *ioend = data; | 183 | xfs_ioend_t *ioend = |
184 | container_of(work, xfs_ioend_t, io_work); | ||
182 | bhv_vnode_t *vp = ioend->io_vnode; | 185 | bhv_vnode_t *vp = ioend->io_vnode; |
183 | xfs_off_t offset = ioend->io_offset; | 186 | xfs_off_t offset = ioend->io_offset; |
184 | size_t size = ioend->io_size; | 187 | size_t size = ioend->io_size; |
@@ -220,11 +223,11 @@ xfs_alloc_ioend( | |||
220 | ioend->io_size = 0; | 223 | ioend->io_size = 0; |
221 | 224 | ||
222 | if (type == IOMAP_UNWRITTEN) | 225 | if (type == IOMAP_UNWRITTEN) |
223 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); | 226 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); |
224 | else if (type == IOMAP_DELAY) | 227 | else if (type == IOMAP_DELAY) |
225 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); | 228 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); |
226 | else | 229 | else |
227 | INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); | 230 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); |
228 | 231 | ||
229 | return ioend; | 232 | return ioend; |
230 | } | 233 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index d3382843698..eef4a0ba11e 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin( | |||
994 | 994 | ||
995 | STATIC void | 995 | STATIC void |
996 | xfs_buf_iodone_work( | 996 | xfs_buf_iodone_work( |
997 | void *v) | 997 | struct work_struct *work) |
998 | { | 998 | { |
999 | xfs_buf_t *bp = (xfs_buf_t *)v; | 999 | xfs_buf_t *bp = |
1000 | container_of(work, xfs_buf_t, b_iodone_work); | ||
1000 | 1001 | ||
1001 | if (bp->b_iodone) | 1002 | if (bp->b_iodone) |
1002 | (*(bp->b_iodone))(bp); | 1003 | (*(bp->b_iodone))(bp); |
@@ -1017,10 +1018,10 @@ xfs_buf_ioend( | |||
1017 | 1018 | ||
1018 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { | 1019 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { |
1019 | if (schedule) { | 1020 | if (schedule) { |
1020 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); | 1021 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); |
1021 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); | 1022 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); |
1022 | } else { | 1023 | } else { |
1023 | xfs_buf_iodone_work(bp); | 1024 | xfs_buf_iodone_work(&bp->b_iodone_work); |
1024 | } | 1025 | } |
1025 | } else { | 1026 | } else { |
1026 | up(&bp->b_iodonesema); | 1027 | up(&bp->b_iodonesema); |
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h index 805ae3575e4..345a649ec83 100644 --- a/include/asm-arm/arch-omap/irda.h +++ b/include/asm-arm/arch-omap/irda.h | |||
@@ -24,7 +24,7 @@ struct omap_irda_config { | |||
24 | /* Very specific to the needs of some platforms (h3,h4) | 24 | /* Very specific to the needs of some platforms (h3,h4) |
25 | * having calls which can sleep in irda_set_speed. | 25 | * having calls which can sleep in irda_set_speed. |
26 | */ | 26 | */ |
27 | struct work_struct gpio_expa; | 27 | struct delayed_work gpio_expa; |
28 | int rx_channel; | 28 | int rx_channel; |
29 | int tx_channel; | 29 | int tx_channel; |
30 | unsigned long dest_start; | 30 | unsigned long dest_start; |
diff --git a/include/linux/aio.h b/include/linux/aio.h index 0d71c0041f1..9e350fd44d7 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -194,7 +194,7 @@ struct kioctx { | |||
194 | 194 | ||
195 | struct aio_ring_info ring_info; | 195 | struct aio_ring_info ring_info; |
196 | 196 | ||
197 | struct work_struct wq; | 197 | struct delayed_work wq; |
198 | }; | 198 | }; |
199 | 199 | ||
200 | /* prototypes */ | 200 | /* prototypes */ |
diff --git a/include/linux/connector.h b/include/linux/connector.h index 4c02119c6ab..3ea1cd58de9 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -133,7 +133,7 @@ struct cn_callback_data { | |||
133 | struct cn_callback_entry { | 133 | struct cn_callback_entry { |
134 | struct list_head callback_entry; | 134 | struct list_head callback_entry; |
135 | struct cn_callback *cb; | 135 | struct cn_callback *cb; |
136 | struct work_struct work; | 136 | struct delayed_work work; |
137 | struct cn_queue_dev *pdev; | 137 | struct cn_queue_dev *pdev; |
138 | 138 | ||
139 | struct cn_callback_id id; | 139 | struct cn_callback_id id; |
@@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); | |||
170 | 170 | ||
171 | int cn_cb_equal(struct cb_id *, struct cb_id *); | 171 | int cn_cb_equal(struct cb_id *, struct cb_id *); |
172 | 172 | ||
173 | void cn_queue_wrapper(void *data); | 173 | void cn_queue_wrapper(struct work_struct *work); |
174 | 174 | ||
175 | extern int cn_already_initialized; | 175 | extern int cn_already_initialized; |
176 | 176 | ||
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index c115e9e840b..1fb02e17f6f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -461,7 +461,7 @@ struct i2o_driver { | |||
461 | int (*reply) (struct i2o_controller *, u32, struct i2o_message *); | 461 | int (*reply) (struct i2o_controller *, u32, struct i2o_message *); |
462 | 462 | ||
463 | /* Event handler */ | 463 | /* Event handler */ |
464 | void (*event) (struct i2o_event *); | 464 | work_func_t event; |
465 | 465 | ||
466 | struct workqueue_struct *event_queue; /* Event queue */ | 466 | struct workqueue_struct *event_queue; /* Event queue */ |
467 | 467 | ||
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index efe0ee4cc80..06c58c423fe 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h | |||
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t) | |||
158 | if (t->buf.tail != NULL) | 158 | if (t->buf.tail != NULL) |
159 | t->buf.tail->commit = t->buf.tail->used; | 159 | t->buf.tail->commit = t->buf.tail->used; |
160 | spin_unlock_irqrestore(&t->buf.lock, flags); | 160 | spin_unlock_irqrestore(&t->buf.lock, flags); |
161 | schedule_work(&t->buf.work); | 161 | schedule_delayed_work(&t->buf.work, 0); |
162 | } | 162 | } |
163 | 163 | ||
164 | #endif | 164 | #endif |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 202283b5df9..ab275483032 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -575,8 +575,9 @@ struct ata_port { | |||
575 | struct ata_host *host; | 575 | struct ata_host *host; |
576 | struct device *dev; | 576 | struct device *dev; |
577 | 577 | ||
578 | struct work_struct port_task; | 578 | void *port_task_data; |
579 | struct work_struct hotplug_task; | 579 | struct delayed_work port_task; |
580 | struct delayed_work hotplug_task; | ||
580 | struct work_struct scsi_rescan_task; | 581 | struct work_struct scsi_rescan_task; |
581 | 582 | ||
582 | unsigned int hsm_task_state; | 583 | unsigned int hsm_task_state; |
@@ -755,7 +756,7 @@ extern void ata_host_resume(struct ata_host *host); | |||
755 | extern int ata_ratelimit(void); | 756 | extern int ata_ratelimit(void); |
756 | extern int ata_busy_sleep(struct ata_port *ap, | 757 | extern int ata_busy_sleep(struct ata_port *ap, |
757 | unsigned long timeout_pat, unsigned long timeout); | 758 | unsigned long timeout_pat, unsigned long timeout); |
758 | extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), | 759 | extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, |
759 | void *data, unsigned long delay); | 760 | void *data, unsigned long delay); |
760 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 761 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
761 | unsigned long interval_msec, | 762 | unsigned long interval_msec, |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 528e7d3fecb..c15ae1986b9 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -110,7 +110,7 @@ struct mmc_host { | |||
110 | struct mmc_card *card_busy; /* the MMC card claiming host */ | 110 | struct mmc_card *card_busy; /* the MMC card claiming host */ |
111 | struct mmc_card *card_selected; /* the selected MMC card */ | 111 | struct mmc_card *card_selected; /* the selected MMC card */ |
112 | 112 | ||
113 | struct work_struct detect; | 113 | struct delayed_work detect; |
114 | 114 | ||
115 | unsigned long private[0] ____cacheline_aligned; | 115 | unsigned long private[0] ____cacheline_aligned; |
116 | }; | 116 | }; |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index b089d950628..a503052138b 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -127,10 +127,10 @@ struct ncp_server { | |||
127 | } unexpected_packet; | 127 | } unexpected_packet; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | extern void ncp_tcp_rcv_proc(void *server); | 130 | extern void ncp_tcp_rcv_proc(struct work_struct *work); |
131 | extern void ncp_tcp_tx_proc(void *server); | 131 | extern void ncp_tcp_tx_proc(struct work_struct *work); |
132 | extern void ncpdgram_rcv_proc(void *server); | 132 | extern void ncpdgram_rcv_proc(struct work_struct *work); |
133 | extern void ncpdgram_timeout_proc(void *server); | 133 | extern void ncpdgram_timeout_proc(struct work_struct *work); |
134 | extern void ncpdgram_timeout_call(unsigned long server); | 134 | extern void ncpdgram_timeout_call(unsigned long server); |
135 | extern void ncp_tcp_data_ready(struct sock* sk, int len); | 135 | extern void ncp_tcp_data_ready(struct sock* sk, int len); |
136 | extern void ncp_tcp_write_space(struct sock* sk); | 136 | extern void ncp_tcp_write_space(struct sock* sk); |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 2cc9867b162..29930b71a9a 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -32,7 +32,7 @@ struct netpoll_info { | |||
32 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ | 32 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ |
33 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ | 33 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ |
34 | struct sk_buff_head txq; | 34 | struct sk_buff_head txq; |
35 | struct work_struct tx_work; | 35 | struct delayed_work tx_work; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | void netpoll_poll(struct netpoll *np); | 38 | void netpoll_poll(struct netpoll *np); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 7ccfc7ef0a8..95796e6924f 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -51,7 +51,7 @@ struct nfs_client { | |||
51 | 51 | ||
52 | unsigned long cl_lease_time; | 52 | unsigned long cl_lease_time; |
53 | unsigned long cl_last_renewal; | 53 | unsigned long cl_last_renewal; |
54 | struct work_struct cl_renewd; | 54 | struct delayed_work cl_renewd; |
55 | 55 | ||
56 | struct rpc_wait_queue cl_rpcwaitq; | 56 | struct rpc_wait_queue cl_rpcwaitq; |
57 | 57 | ||
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 62a7169aed8..3a28742d86f 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h | |||
@@ -249,7 +249,8 @@ struct reiserfs_journal { | |||
249 | int j_errno; | 249 | int j_errno; |
250 | 250 | ||
251 | /* when flushing ordered buffers, throttle new ordered writers */ | 251 | /* when flushing ordered buffers, throttle new ordered writers */ |
252 | struct work_struct j_work; | 252 | struct delayed_work j_work; |
253 | struct super_block *j_work_sb; | ||
253 | atomic_t j_async_throttle; | 254 | atomic_t j_async_throttle; |
254 | }; | 255 | }; |
255 | 256 | ||
diff --git a/include/linux/relay.h b/include/linux/relay.h index 24accb48384..0e3d91b7699 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
@@ -38,7 +38,7 @@ struct rchan_buf | |||
38 | size_t subbufs_consumed; /* count of sub-buffers consumed */ | 38 | size_t subbufs_consumed; /* count of sub-buffers consumed */ |
39 | struct rchan *chan; /* associated channel */ | 39 | struct rchan *chan; /* associated channel */ |
40 | wait_queue_head_t read_wait; /* reader wait queue */ | 40 | wait_queue_head_t read_wait; /* reader wait queue */ |
41 | struct work_struct wake_readers; /* reader wake-up work struct */ | 41 | struct delayed_work wake_readers; /* reader wake-up work struct */ |
42 | struct dentry *dentry; /* channel file dentry */ | 42 | struct dentry *dentry; /* channel file dentry */ |
43 | struct kref kref; /* channel buffer refcount */ | 43 | struct kref kref; /* channel buffer refcount */ |
44 | struct page **page_array; /* array of current buffer pages */ | 44 | struct page **page_array; /* array of current buffer pages */ |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index a2eb9b4a9de..4a68125b6de 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
@@ -30,7 +30,7 @@ struct rpc_inode { | |||
30 | #define RPC_PIPE_WAIT_FOR_OPEN 1 | 30 | #define RPC_PIPE_WAIT_FOR_OPEN 1 |
31 | int flags; | 31 | int flags; |
32 | struct rpc_pipe_ops *ops; | 32 | struct rpc_pipe_ops *ops; |
33 | struct work_struct queue_timeout; | 33 | struct delayed_work queue_timeout; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static inline struct rpc_inode * | 36 | static inline struct rpc_inode * |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 60394fbc4c7..3e04c1512fc 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -177,7 +177,7 @@ struct rpc_xprt { | |||
177 | unsigned long connect_timeout, | 177 | unsigned long connect_timeout, |
178 | bind_timeout, | 178 | bind_timeout, |
179 | reestablish_timeout; | 179 | reestablish_timeout; |
180 | struct work_struct connect_worker; | 180 | struct delayed_work connect_worker; |
181 | unsigned short port; | 181 | unsigned short port; |
182 | 182 | ||
183 | /* | 183 | /* |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 65321f911c1..f717f089823 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -53,7 +53,7 @@ struct tty_buffer { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct tty_bufhead { | 55 | struct tty_bufhead { |
56 | struct work_struct work; | 56 | struct delayed_work work; |
57 | struct semaphore pty_sem; | 57 | struct semaphore pty_sem; |
58 | spinlock_t lock; | 58 | spinlock_t lock; |
59 | struct tty_buffer *head; /* Queue head */ | 59 | struct tty_buffer *head; /* Queue head */ |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 0cd73edeef1..aab5b1b7202 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -388,7 +388,7 @@ struct usb_device { | |||
388 | 388 | ||
389 | int pm_usage_cnt; /* usage counter for autosuspend */ | 389 | int pm_usage_cnt; /* usage counter for autosuspend */ |
390 | #ifdef CONFIG_PM | 390 | #ifdef CONFIG_PM |
391 | struct work_struct autosuspend; /* for delayed autosuspends */ | 391 | struct delayed_work autosuspend; /* for delayed autosuspends */ |
392 | struct mutex pm_mutex; /* protects PM operations */ | 392 | struct mutex pm_mutex; /* protects PM operations */ |
393 | 393 | ||
394 | unsigned auto_pm:1; /* autosuspend/resume in progress */ | 394 | unsigned auto_pm:1; /* autosuspend/resume in progress */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9bca3539a1e..4a3ea83c6d1 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -11,12 +11,23 @@ | |||
11 | 11 | ||
12 | struct workqueue_struct; | 12 | struct workqueue_struct; |
13 | 13 | ||
14 | struct work_struct; | ||
15 | typedef void (*work_func_t)(struct work_struct *work); | ||
16 | |||
14 | struct work_struct { | 17 | struct work_struct { |
15 | unsigned long pending; | 18 | /* the first word is the work queue pointer and the flags rolled into |
19 | * one */ | ||
20 | unsigned long management; | ||
21 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | ||
22 | #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ | ||
23 | #define WORK_STRUCT_FLAG_MASK (3UL) | ||
24 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | ||
16 | struct list_head entry; | 25 | struct list_head entry; |
17 | void (*func)(void *); | 26 | work_func_t func; |
18 | void *data; | 27 | }; |
19 | void *wq_data; | 28 | |
29 | struct delayed_work { | ||
30 | struct work_struct work; | ||
20 | struct timer_list timer; | 31 | struct timer_list timer; |
21 | }; | 32 | }; |
22 | 33 | ||
@@ -24,36 +35,117 @@ struct execute_work { | |||
24 | struct work_struct work; | 35 | struct work_struct work; |
25 | }; | 36 | }; |
26 | 37 | ||
27 | #define __WORK_INITIALIZER(n, f, d) { \ | 38 | #define __WORK_INITIALIZER(n, f) { \ |
39 | .management = 0, \ | ||
40 | .entry = { &(n).entry, &(n).entry }, \ | ||
41 | .func = (f), \ | ||
42 | } | ||
43 | |||
44 | #define __WORK_INITIALIZER_NAR(n, f) { \ | ||
45 | .management = (1 << WORK_STRUCT_NOAUTOREL), \ | ||
28 | .entry = { &(n).entry, &(n).entry }, \ | 46 | .entry = { &(n).entry, &(n).entry }, \ |
29 | .func = (f), \ | 47 | .func = (f), \ |
30 | .data = (d), \ | 48 | } |
49 | |||
50 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | ||
51 | .work = __WORK_INITIALIZER((n).work, (f)), \ | ||
52 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | ||
53 | } | ||
54 | |||
55 | #define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ | ||
56 | .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ | ||
31 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | 57 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ |
32 | } | 58 | } |
33 | 59 | ||
34 | #define DECLARE_WORK(n, f, d) \ | 60 | #define DECLARE_WORK(n, f) \ |
35 | struct work_struct n = __WORK_INITIALIZER(n, f, d) | 61 | struct work_struct n = __WORK_INITIALIZER(n, f) |
62 | |||
63 | #define DECLARE_WORK_NAR(n, f) \ | ||
64 | struct work_struct n = __WORK_INITIALIZER_NAR(n, f) | ||
65 | |||
66 | #define DECLARE_DELAYED_WORK(n, f) \ | ||
67 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | ||
68 | |||
69 | #define DECLARE_DELAYED_WORK_NAR(n, f) \ | ||
70 | struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) | ||
36 | 71 | ||
37 | /* | 72 | /* |
38 | * initialize a work-struct's func and data pointers: | 73 | * initialize a work item's function pointer |
39 | */ | 74 | */ |
40 | #define PREPARE_WORK(_work, _func, _data) \ | 75 | #define PREPARE_WORK(_work, _func) \ |
41 | do { \ | 76 | do { \ |
42 | (_work)->func = _func; \ | 77 | (_work)->func = (_func); \ |
43 | (_work)->data = _data; \ | ||
44 | } while (0) | 78 | } while (0) |
45 | 79 | ||
80 | #define PREPARE_DELAYED_WORK(_work, _func) \ | ||
81 | PREPARE_WORK(&(_work)->work, (_func)) | ||
82 | |||
46 | /* | 83 | /* |
47 | * initialize all of a work-struct: | 84 | * initialize all of a work item in one go |
48 | */ | 85 | */ |
49 | #define INIT_WORK(_work, _func, _data) \ | 86 | #define INIT_WORK(_work, _func) \ |
50 | do { \ | 87 | do { \ |
88 | (_work)->management = 0; \ | ||
51 | INIT_LIST_HEAD(&(_work)->entry); \ | 89 | INIT_LIST_HEAD(&(_work)->entry); \ |
52 | (_work)->pending = 0; \ | 90 | PREPARE_WORK((_work), (_func)); \ |
53 | PREPARE_WORK((_work), (_func), (_data)); \ | 91 | } while (0) |
92 | |||
93 | #define INIT_WORK_NAR(_work, _func) \ | ||
94 | do { \ | ||
95 | (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ | ||
96 | INIT_LIST_HEAD(&(_work)->entry); \ | ||
97 | PREPARE_WORK((_work), (_func)); \ | ||
98 | } while (0) | ||
99 | |||
100 | #define INIT_DELAYED_WORK(_work, _func) \ | ||
101 | do { \ | ||
102 | INIT_WORK(&(_work)->work, (_func)); \ | ||
103 | init_timer(&(_work)->timer); \ | ||
104 | } while (0) | ||
105 | |||
106 | #define INIT_DELAYED_WORK_NAR(_work, _func) \ | ||
107 | do { \ | ||
108 | INIT_WORK_NAR(&(_work)->work, (_func)); \ | ||
54 | init_timer(&(_work)->timer); \ | 109 | init_timer(&(_work)->timer); \ |
55 | } while (0) | 110 | } while (0) |
56 | 111 | ||
112 | /** | ||
113 | * work_pending - Find out whether a work item is currently pending | ||
114 | * @work: The work item in question | ||
115 | */ | ||
116 | #define work_pending(work) \ | ||
117 | test_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
118 | |||
119 | /** | ||
120 | * delayed_work_pending - Find out whether a delayable work item is currently | ||
121 | * pending | ||
122 | * @work: The work item in question | ||
123 | */ | ||
124 | #define delayed_work_pending(work) \ | ||
125 | test_bit(WORK_STRUCT_PENDING, &(work)->work.management) | ||
126 | |||
127 | /** | ||
128 | * work_release - Release a work item under execution | ||
129 | * @work: The work item to release | ||
130 | * | ||
131 | * This is used to release a work item that has been initialised with automatic | ||
132 | * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work | ||
133 | * function the opportunity to grab auxiliary data from the container of the | ||
134 | * work_struct before clearing the pending bit as the work_struct may be | ||
135 | * subject to deallocation the moment the pending bit is cleared. | ||
136 | * | ||
137 | * In such a case, this should be called in the work function after it has | ||
138 | * fetched any data it may require from the containter of the work_struct. | ||
139 | * After this function has been called, the work_struct may be scheduled for | ||
140 | * further execution or it may be deallocated unless other precautions are | ||
141 | * taken. | ||
142 | * | ||
143 | * This should also be used to release a delayed work item. | ||
144 | */ | ||
145 | #define work_release(work) \ | ||
146 | clear_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
147 | |||
148 | |||
57 | extern struct workqueue_struct *__create_workqueue(const char *name, | 149 | extern struct workqueue_struct *__create_workqueue(const char *name, |
58 | int singlethread); | 150 | int singlethread); |
59 | #define create_workqueue(name) __create_workqueue((name), 0) | 151 | #define create_workqueue(name) __create_workqueue((name), 0) |
@@ -62,39 +154,38 @@ extern struct workqueue_struct *__create_workqueue(const char *name, | |||
62 | extern void destroy_workqueue(struct workqueue_struct *wq); | 154 | extern void destroy_workqueue(struct workqueue_struct *wq); |
63 | 155 | ||
64 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); | 156 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); |
65 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); | 157 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); |
66 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 158 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
67 | struct work_struct *work, unsigned long delay); | 159 | struct delayed_work *work, unsigned long delay); |
68 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); | 160 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); |
69 | 161 | ||
70 | extern int FASTCALL(schedule_work(struct work_struct *work)); | 162 | extern int FASTCALL(schedule_work(struct work_struct *work)); |
71 | extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); | 163 | extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); |
72 | 164 | ||
73 | extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); | 165 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); |
74 | extern int schedule_on_each_cpu(void (*func)(void *info), void *info); | 166 | extern int schedule_on_each_cpu(work_func_t func); |
75 | extern void flush_scheduled_work(void); | 167 | extern void flush_scheduled_work(void); |
76 | extern int current_is_keventd(void); | 168 | extern int current_is_keventd(void); |
77 | extern int keventd_up(void); | 169 | extern int keventd_up(void); |
78 | 170 | ||
79 | extern void init_workqueues(void); | 171 | extern void init_workqueues(void); |
80 | void cancel_rearming_delayed_work(struct work_struct *work); | 172 | void cancel_rearming_delayed_work(struct delayed_work *work); |
81 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, | 173 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, |
82 | struct work_struct *); | 174 | struct delayed_work *); |
83 | int execute_in_process_context(void (*fn)(void *), void *, | 175 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
84 | struct execute_work *); | ||
85 | 176 | ||
86 | /* | 177 | /* |
87 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 178 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
88 | * function may still be running on return from cancel_delayed_work(). Run | 179 | * function may still be running on return from cancel_delayed_work(). Run |
89 | * flush_scheduled_work() to wait on it. | 180 | * flush_scheduled_work() to wait on it. |
90 | */ | 181 | */ |
91 | static inline int cancel_delayed_work(struct work_struct *work) | 182 | static inline int cancel_delayed_work(struct delayed_work *work) |
92 | { | 183 | { |
93 | int ret; | 184 | int ret; |
94 | 185 | ||
95 | ret = del_timer_sync(&work->timer); | 186 | ret = del_timer_sync(&work->timer); |
96 | if (ret) | 187 | if (ret) |
97 | clear_bit(0, &work->pending); | 188 | clear_bit(WORK_STRUCT_PENDING, &work->work.management); |
98 | return ret; | 189 | return ret; |
99 | } | 190 | } |
100 | 191 | ||
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 617b672b113..89119277553 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h | |||
@@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info { | |||
108 | /* Scan retries remaining */ | 108 | /* Scan retries remaining */ |
109 | int scan_retry; | 109 | int scan_retry; |
110 | 110 | ||
111 | struct work_struct work; | 111 | struct delayed_work work; |
112 | struct work_struct timeout; | 112 | struct delayed_work timeout; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | struct ieee80211softmac_bss_info { | 115 | struct ieee80211softmac_bss_info { |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 5f48748fe01..f7be1ac7360 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -84,7 +84,7 @@ struct inet_timewait_death_row { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | extern void inet_twdr_hangman(unsigned long data); | 86 | extern void inet_twdr_hangman(unsigned long data); |
87 | extern void inet_twdr_twkill_work(void *data); | 87 | extern void inet_twdr_twkill_work(struct work_struct *work); |
88 | extern void inet_twdr_twcal_tick(unsigned long data); | 88 | extern void inet_twdr_twcal_tick(unsigned long data); |
89 | 89 | ||
90 | #if (BITS_PER_LONG == 64) | 90 | #if (BITS_PER_LONG == 64) |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f8cbe40f52c..c089f93ba59 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *); | |||
1030 | void sctp_inq_free(struct sctp_inq *); | 1030 | void sctp_inq_free(struct sctp_inq *); |
1031 | void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); | 1031 | void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); |
1032 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); | 1032 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); |
1033 | void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); | 1033 | void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t); |
1034 | 1034 | ||
1035 | /* This is the structure we use to hold outbound chunks. You push | 1035 | /* This is the structure we use to hold outbound chunks. You push |
1036 | * chunks in and they automatically pop out the other end as bundled | 1036 | * chunks in and they automatically pop out the other end as bundled |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 44b2f82a6ee..9233ed5de66 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -201,9 +201,14 @@ struct domain_device { | |||
201 | void *lldd_dev; | 201 | void *lldd_dev; |
202 | }; | 202 | }; |
203 | 203 | ||
204 | struct sas_discovery_event { | ||
205 | struct work_struct work; | ||
206 | struct asd_sas_port *port; | ||
207 | }; | ||
208 | |||
204 | struct sas_discovery { | 209 | struct sas_discovery { |
205 | spinlock_t disc_event_lock; | 210 | spinlock_t disc_event_lock; |
206 | struct work_struct disc_work[DISC_NUM_EVENTS]; | 211 | struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; |
207 | unsigned long pending; | 212 | unsigned long pending; |
208 | u8 fanout_sas_addr[8]; | 213 | u8 fanout_sas_addr[8]; |
209 | u8 eeds_a[8]; | 214 | u8 eeds_a[8]; |
@@ -249,14 +254,19 @@ struct asd_sas_port { | |||
249 | void *lldd_port; /* not touched by the sas class code */ | 254 | void *lldd_port; /* not touched by the sas class code */ |
250 | }; | 255 | }; |
251 | 256 | ||
257 | struct asd_sas_event { | ||
258 | struct work_struct work; | ||
259 | struct asd_sas_phy *phy; | ||
260 | }; | ||
261 | |||
252 | /* The phy pretty much is controlled by the LLDD. | 262 | /* The phy pretty much is controlled by the LLDD. |
253 | * The class only reads those fields. | 263 | * The class only reads those fields. |
254 | */ | 264 | */ |
255 | struct asd_sas_phy { | 265 | struct asd_sas_phy { |
256 | /* private: */ | 266 | /* private: */ |
257 | /* protected by ha->event_lock */ | 267 | /* protected by ha->event_lock */ |
258 | struct work_struct port_events[PORT_NUM_EVENTS]; | 268 | struct asd_sas_event port_events[PORT_NUM_EVENTS]; |
259 | struct work_struct phy_events[PHY_NUM_EVENTS]; | 269 | struct asd_sas_event phy_events[PHY_NUM_EVENTS]; |
260 | 270 | ||
261 | unsigned long port_events_pending; | 271 | unsigned long port_events_pending; |
262 | unsigned long phy_events_pending; | 272 | unsigned long phy_events_pending; |
@@ -308,10 +318,15 @@ struct scsi_core { | |||
308 | int queue_thread_kill; | 318 | int queue_thread_kill; |
309 | }; | 319 | }; |
310 | 320 | ||
321 | struct sas_ha_event { | ||
322 | struct work_struct work; | ||
323 | struct sas_ha_struct *ha; | ||
324 | }; | ||
325 | |||
311 | struct sas_ha_struct { | 326 | struct sas_ha_struct { |
312 | /* private: */ | 327 | /* private: */ |
313 | spinlock_t event_lock; | 328 | spinlock_t event_lock; |
314 | struct work_struct ha_events[HA_NUM_EVENTS]; | 329 | struct sas_ha_event ha_events[HA_NUM_EVENTS]; |
315 | unsigned long pending; | 330 | unsigned long pending; |
316 | 331 | ||
317 | struct scsi_core core; | 332 | struct scsi_core core; |
@@ -631,6 +646,6 @@ void sas_unregister_dev(struct domain_device *); | |||
631 | 646 | ||
632 | void sas_init_dev(struct domain_device *); | 647 | void sas_init_dev(struct domain_device *); |
633 | 648 | ||
634 | void sas_task_abort(struct sas_task *task); | 649 | void sas_task_abort(struct work_struct *); |
635 | 650 | ||
636 | #endif /* _SASLIB_H_ */ | 651 | #endif /* _SASLIB_H_ */ |
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index fd352323378..798f7c7ee42 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h | |||
@@ -206,9 +206,9 @@ struct fc_rport { /* aka fc_starget_attrs */ | |||
206 | u8 flags; | 206 | u8 flags; |
207 | struct list_head peers; | 207 | struct list_head peers; |
208 | struct device dev; | 208 | struct device dev; |
209 | struct work_struct dev_loss_work; | 209 | struct delayed_work dev_loss_work; |
210 | struct work_struct scan_work; | 210 | struct work_struct scan_work; |
211 | struct work_struct fail_io_work; | 211 | struct delayed_work fail_io_work; |
212 | struct work_struct stgt_delete_work; | 212 | struct work_struct stgt_delete_work; |
213 | struct work_struct rport_delete_work; | 213 | struct work_struct rport_delete_work; |
214 | } __attribute__((aligned(sizeof(unsigned long)))); | 214 | } __attribute__((aligned(sizeof(unsigned long)))); |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 4b95c89c95c..d5c218ddc52 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
@@ -176,7 +176,7 @@ struct iscsi_cls_session { | |||
176 | 176 | ||
177 | /* recovery fields */ | 177 | /* recovery fields */ |
178 | int recovery_tmo; | 178 | int recovery_tmo; |
179 | struct work_struct recovery_work; | 179 | struct delayed_work recovery_work; |
180 | 180 | ||
181 | int target_id; | 181 | int target_id; |
182 | 182 | ||
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index 4c43521cc49..33720397a90 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h | |||
@@ -511,7 +511,7 @@ struct snd_ac97 { | |||
511 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 511 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
512 | unsigned int power_up; /* power states */ | 512 | unsigned int power_up; /* power states */ |
513 | struct workqueue_struct *power_workq; | 513 | struct workqueue_struct *power_workq; |
514 | struct work_struct power_work; | 514 | struct delayed_work power_work; |
515 | #endif | 515 | #endif |
516 | struct device dev; | 516 | struct device dev; |
517 | }; | 517 | }; |
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h index 11702aa0bea..2ee061625fd 100644 --- a/include/sound/ak4114.h +++ b/include/sound/ak4114.h | |||
@@ -182,7 +182,7 @@ struct ak4114 { | |||
182 | unsigned char rcs0; | 182 | unsigned char rcs0; |
183 | unsigned char rcs1; | 183 | unsigned char rcs1; |
184 | struct workqueue_struct *workqueue; | 184 | struct workqueue_struct *workqueue; |
185 | struct work_struct work; | 185 | struct delayed_work work; |
186 | void *change_callback_private; | 186 | void *change_callback_private; |
187 | void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); | 187 | void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); |
188 | }; | 188 | }; |
diff --git a/ipc/util.c b/ipc/util.c index cd8bb14a431..a9b7a227b8d 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr) | |||
514 | container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; | 514 | container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; |
515 | } | 515 | } |
516 | 516 | ||
517 | static void ipc_do_vfree(struct work_struct *work) | ||
518 | { | ||
519 | vfree(container_of(work, struct ipc_rcu_sched, work)); | ||
520 | } | ||
521 | |||
517 | /** | 522 | /** |
518 | * ipc_schedule_free - free ipc + rcu space | 523 | * ipc_schedule_free - free ipc + rcu space |
519 | * @head: RCU callback structure for queued work | 524 | * @head: RCU callback structure for queued work |
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head) | |||
528 | struct ipc_rcu_sched *sched = | 533 | struct ipc_rcu_sched *sched = |
529 | container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); | 534 | container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); |
530 | 535 | ||
531 | INIT_WORK(&sched->work, vfree, sched); | 536 | INIT_WORK(&sched->work, ipc_do_vfree); |
532 | schedule_work(&sched->work); | 537 | schedule_work(&sched->work); |
533 | } | 538 | } |
534 | 539 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 2b76dee2849..8d2bea09a4e 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module); | |||
114 | #endif /* CONFIG_KMOD */ | 114 | #endif /* CONFIG_KMOD */ |
115 | 115 | ||
116 | struct subprocess_info { | 116 | struct subprocess_info { |
117 | struct work_struct work; | ||
117 | struct completion *complete; | 118 | struct completion *complete; |
118 | char *path; | 119 | char *path; |
119 | char **argv; | 120 | char **argv; |
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data) | |||
221 | } | 222 | } |
222 | 223 | ||
223 | /* This is run by khelper thread */ | 224 | /* This is run by khelper thread */ |
224 | static void __call_usermodehelper(void *data) | 225 | static void __call_usermodehelper(struct work_struct *work) |
225 | { | 226 | { |
226 | struct subprocess_info *sub_info = data; | 227 | struct subprocess_info *sub_info = |
228 | container_of(work, struct subprocess_info, work); | ||
227 | pid_t pid; | 229 | pid_t pid; |
228 | int wait = sub_info->wait; | 230 | int wait = sub_info->wait; |
229 | 231 | ||
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
264 | { | 266 | { |
265 | DECLARE_COMPLETION_ONSTACK(done); | 267 | DECLARE_COMPLETION_ONSTACK(done); |
266 | struct subprocess_info sub_info = { | 268 | struct subprocess_info sub_info = { |
269 | .work = __WORK_INITIALIZER(sub_info.work, | ||
270 | __call_usermodehelper), | ||
267 | .complete = &done, | 271 | .complete = &done, |
268 | .path = path, | 272 | .path = path, |
269 | .argv = argv, | 273 | .argv = argv, |
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
272 | .wait = wait, | 276 | .wait = wait, |
273 | .retval = 0, | 277 | .retval = 0, |
274 | }; | 278 | }; |
275 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
276 | 279 | ||
277 | if (!khelper_wq) | 280 | if (!khelper_wq) |
278 | return -EBUSY; | 281 | return -EBUSY; |
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
280 | if (path[0] == '\0') | 283 | if (path[0] == '\0') |
281 | return 0; | 284 | return 0; |
282 | 285 | ||
283 | queue_work(khelper_wq, &work); | 286 | queue_work(khelper_wq, &sub_info.work); |
284 | wait_for_completion(&done); | 287 | wait_for_completion(&done); |
285 | return sub_info.retval; | 288 | return sub_info.retval; |
286 | } | 289 | } |
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
291 | { | 294 | { |
292 | DECLARE_COMPLETION(done); | 295 | DECLARE_COMPLETION(done); |
293 | struct subprocess_info sub_info = { | 296 | struct subprocess_info sub_info = { |
297 | .work = __WORK_INITIALIZER(sub_info.work, | ||
298 | __call_usermodehelper), | ||
294 | .complete = &done, | 299 | .complete = &done, |
295 | .path = path, | 300 | .path = path, |
296 | .argv = argv, | 301 | .argv = argv, |
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
298 | .retval = 0, | 303 | .retval = 0, |
299 | }; | 304 | }; |
300 | struct file *f; | 305 | struct file *f; |
301 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
302 | 306 | ||
303 | if (!khelper_wq) | 307 | if (!khelper_wq) |
304 | return -EBUSY; | 308 | return -EBUSY; |
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
318 | } | 322 | } |
319 | sub_info.stdin = f; | 323 | sub_info.stdin = f; |
320 | 324 | ||
321 | queue_work(khelper_wq, &work); | 325 | queue_work(khelper_wq, &sub_info.work); |
322 | wait_for_completion(&done); | 326 | wait_for_completion(&done); |
323 | return sub_info.retval; | 327 | return sub_info.retval; |
324 | } | 328 | } |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 4f9c60ef95e..1db8c72d0d3 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -31,6 +31,8 @@ struct kthread_create_info | |||
31 | /* Result passed back to kthread_create() from keventd. */ | 31 | /* Result passed back to kthread_create() from keventd. */ |
32 | struct task_struct *result; | 32 | struct task_struct *result; |
33 | struct completion done; | 33 | struct completion done; |
34 | |||
35 | struct work_struct work; | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct kthread_stop_info | 38 | struct kthread_stop_info |
@@ -111,9 +113,10 @@ static int kthread(void *_create) | |||
111 | } | 113 | } |
112 | 114 | ||
113 | /* We are keventd: create a thread. */ | 115 | /* We are keventd: create a thread. */ |
114 | static void keventd_create_kthread(void *_create) | 116 | static void keventd_create_kthread(struct work_struct *work) |
115 | { | 117 | { |
116 | struct kthread_create_info *create = _create; | 118 | struct kthread_create_info *create = |
119 | container_of(work, struct kthread_create_info, work); | ||
117 | int pid; | 120 | int pid; |
118 | 121 | ||
119 | /* We want our own signal handler (we take no signals by default). */ | 122 | /* We want our own signal handler (we take no signals by default). */ |
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
154 | ...) | 157 | ...) |
155 | { | 158 | { |
156 | struct kthread_create_info create; | 159 | struct kthread_create_info create; |
157 | DECLARE_WORK(work, keventd_create_kthread, &create); | ||
158 | 160 | ||
159 | create.threadfn = threadfn; | 161 | create.threadfn = threadfn; |
160 | create.data = data; | 162 | create.data = data; |
161 | init_completion(&create.started); | 163 | init_completion(&create.started); |
162 | init_completion(&create.done); | 164 | init_completion(&create.done); |
165 | INIT_WORK(&create.work, keventd_create_kthread); | ||
163 | 166 | ||
164 | /* | 167 | /* |
165 | * The workqueue needs to start up first: | 168 | * The workqueue needs to start up first: |
166 | */ | 169 | */ |
167 | if (!helper_wq) | 170 | if (!helper_wq) |
168 | work.func(work.data); | 171 | create.work.func(&create.work); |
169 | else { | 172 | else { |
170 | queue_work(helper_wq, &work); | 173 | queue_work(helper_wq, &create.work); |
171 | wait_for_completion(&create.done); | 174 | wait_for_completion(&create.done); |
172 | } | 175 | } |
173 | if (!IS_ERR(create.result)) { | 176 | if (!IS_ERR(create.result)) { |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index f1f900ac316..678ec736076 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -16,12 +16,12 @@ | |||
16 | * callback we use. | 16 | * callback we use. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | static void do_poweroff(void *dummy) | 19 | static void do_poweroff(struct work_struct *dummy) |
20 | { | 20 | { |
21 | kernel_power_off(); | 21 | kernel_power_off(); |
22 | } | 22 | } |
23 | 23 | ||
24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); | 24 | static DECLARE_WORK(poweroff_work, do_poweroff); |
25 | 25 | ||
26 | static void handle_poweroff(int key, struct tty_struct *tty) | 26 | static void handle_poweroff(int key, struct tty_struct *tty) |
27 | { | 27 | { |
diff --git a/kernel/relay.c b/kernel/relay.c index f04bbdb56ac..2b92e8ece85 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = { | |||
308 | * reason waking is deferred is that calling directly from write | 308 | * reason waking is deferred is that calling directly from write |
309 | * causes problems if you're writing from say the scheduler. | 309 | * causes problems if you're writing from say the scheduler. |
310 | */ | 310 | */ |
311 | static void wakeup_readers(void *private) | 311 | static void wakeup_readers(struct work_struct *work) |
312 | { | 312 | { |
313 | struct rchan_buf *buf = private; | 313 | struct rchan_buf *buf = |
314 | container_of(work, struct rchan_buf, wake_readers.work); | ||
314 | wake_up_interruptible(&buf->read_wait); | 315 | wake_up_interruptible(&buf->read_wait); |
315 | } | 316 | } |
316 | 317 | ||
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
328 | if (init) { | 329 | if (init) { |
329 | init_waitqueue_head(&buf->read_wait); | 330 | init_waitqueue_head(&buf->read_wait); |
330 | kref_init(&buf->kref); | 331 | kref_init(&buf->kref); |
331 | INIT_WORK(&buf->wake_readers, NULL, NULL); | 332 | INIT_DELAYED_WORK(&buf->wake_readers, NULL); |
332 | } else { | 333 | } else { |
333 | cancel_delayed_work(&buf->wake_readers); | 334 | cancel_delayed_work(&buf->wake_readers); |
334 | flush_scheduled_work(); | 335 | flush_scheduled_work(); |
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
549 | buf->padding[old_subbuf]; | 550 | buf->padding[old_subbuf]; |
550 | smp_mb(); | 551 | smp_mb(); |
551 | if (waitqueue_active(&buf->read_wait)) { | 552 | if (waitqueue_active(&buf->read_wait)) { |
552 | PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); | 553 | PREPARE_DELAYED_WORK(&buf->wake_readers, |
554 | wakeup_readers); | ||
553 | schedule_delayed_work(&buf->wake_readers, 1); | 555 | schedule_delayed_work(&buf->wake_readers, 1); |
554 | } | 556 | } |
555 | } | 557 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 98489d82801..c87b461de38 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
880 | return 0; | 880 | return 0; |
881 | } | 881 | } |
882 | 882 | ||
883 | static void deferred_cad(void *dummy) | 883 | static void deferred_cad(struct work_struct *dummy) |
884 | { | 884 | { |
885 | kernel_restart(NULL); | 885 | kernel_restart(NULL); |
886 | } | 886 | } |
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy) | |||
892 | */ | 892 | */ |
893 | void ctrl_alt_del(void) | 893 | void ctrl_alt_del(void) |
894 | { | 894 | { |
895 | static DECLARE_WORK(cad_work, deferred_cad, NULL); | 895 | static DECLARE_WORK(cad_work, deferred_cad); |
896 | 896 | ||
897 | if (C_A_D) | 897 | if (C_A_D) |
898 | schedule_work(&cad_work); | 898 | schedule_work(&cad_work); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 17c2f03d2c2..8d1e7cb8a51 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq) | |||
80 | return list_empty(&wq->list); | 80 | return list_empty(&wq->list); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void set_wq_data(struct work_struct *work, void *wq) | ||
84 | { | ||
85 | unsigned long new, old, res; | ||
86 | |||
87 | /* assume the pending flag is already set and that the task has already | ||
88 | * been queued on this workqueue */ | ||
89 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | ||
90 | res = work->management; | ||
91 | if (res != new) { | ||
92 | do { | ||
93 | old = res; | ||
94 | new = (unsigned long) wq; | ||
95 | new |= (old & WORK_STRUCT_FLAG_MASK); | ||
96 | res = cmpxchg(&work->management, old, new); | ||
97 | } while (res != old); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | static inline void *get_wq_data(struct work_struct *work) | ||
102 | { | ||
103 | return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); | ||
104 | } | ||
105 | |||
83 | /* Preempt must be disabled. */ | 106 | /* Preempt must be disabled. */ |
84 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 107 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
85 | struct work_struct *work) | 108 | struct work_struct *work) |
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
87 | unsigned long flags; | 110 | unsigned long flags; |
88 | 111 | ||
89 | spin_lock_irqsave(&cwq->lock, flags); | 112 | spin_lock_irqsave(&cwq->lock, flags); |
90 | work->wq_data = cwq; | 113 | set_wq_data(work, cwq); |
91 | list_add_tail(&work->entry, &cwq->worklist); | 114 | list_add_tail(&work->entry, &cwq->worklist); |
92 | cwq->insert_sequence++; | 115 | cwq->insert_sequence++; |
93 | wake_up(&cwq->more_work); | 116 | wake_up(&cwq->more_work); |
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
108 | { | 131 | { |
109 | int ret = 0, cpu = get_cpu(); | 132 | int ret = 0, cpu = get_cpu(); |
110 | 133 | ||
111 | if (!test_and_set_bit(0, &work->pending)) { | 134 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
112 | if (unlikely(is_single_threaded(wq))) | 135 | if (unlikely(is_single_threaded(wq))) |
113 | cpu = singlethread_cpu; | 136 | cpu = singlethread_cpu; |
114 | BUG_ON(!list_empty(&work->entry)); | 137 | BUG_ON(!list_empty(&work->entry)); |
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
122 | 145 | ||
123 | static void delayed_work_timer_fn(unsigned long __data) | 146 | static void delayed_work_timer_fn(unsigned long __data) |
124 | { | 147 | { |
125 | struct work_struct *work = (struct work_struct *)__data; | 148 | struct delayed_work *dwork = (struct delayed_work *)__data; |
126 | struct workqueue_struct *wq = work->wq_data; | 149 | struct workqueue_struct *wq = get_wq_data(&dwork->work); |
127 | int cpu = smp_processor_id(); | 150 | int cpu = smp_processor_id(); |
128 | 151 | ||
129 | if (unlikely(is_single_threaded(wq))) | 152 | if (unlikely(is_single_threaded(wq))) |
130 | cpu = singlethread_cpu; | 153 | cpu = singlethread_cpu; |
131 | 154 | ||
132 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 155 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); |
133 | } | 156 | } |
134 | 157 | ||
135 | /** | 158 | /** |
136 | * queue_delayed_work - queue work on a workqueue after delay | 159 | * queue_delayed_work - queue work on a workqueue after delay |
137 | * @wq: workqueue to use | 160 | * @wq: workqueue to use |
138 | * @work: work to queue | 161 | * @work: delayable work to queue |
139 | * @delay: number of jiffies to wait before queueing | 162 | * @delay: number of jiffies to wait before queueing |
140 | * | 163 | * |
141 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 164 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 165 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 166 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 167 | struct delayed_work *dwork, unsigned long delay) |
145 | { | 168 | { |
146 | int ret = 0; | 169 | int ret = 0; |
147 | struct timer_list *timer = &work->timer; | 170 | struct timer_list *timer = &dwork->timer; |
171 | struct work_struct *work = &dwork->work; | ||
172 | |||
173 | if (delay == 0) | ||
174 | return queue_work(wq, work); | ||
148 | 175 | ||
149 | if (!test_and_set_bit(0, &work->pending)) { | 176 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
150 | BUG_ON(timer_pending(timer)); | 177 | BUG_ON(timer_pending(timer)); |
151 | BUG_ON(!list_empty(&work->entry)); | 178 | BUG_ON(!list_empty(&work->entry)); |
152 | 179 | ||
153 | /* This stores wq for the moment, for the timer_fn */ | 180 | /* This stores wq for the moment, for the timer_fn */ |
154 | work->wq_data = wq; | 181 | set_wq_data(work, wq); |
155 | timer->expires = jiffies + delay; | 182 | timer->expires = jiffies + delay; |
156 | timer->data = (unsigned long)work; | 183 | timer->data = (unsigned long)dwork; |
157 | timer->function = delayed_work_timer_fn; | 184 | timer->function = delayed_work_timer_fn; |
158 | add_timer(timer); | 185 | add_timer(timer); |
159 | ret = 1; | 186 | ret = 1; |
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
172 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 199 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 200 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 201 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 202 | struct delayed_work *dwork, unsigned long delay) |
176 | { | 203 | { |
177 | int ret = 0; | 204 | int ret = 0; |
178 | struct timer_list *timer = &work->timer; | 205 | struct timer_list *timer = &dwork->timer; |
206 | struct work_struct *work = &dwork->work; | ||
179 | 207 | ||
180 | if (!test_and_set_bit(0, &work->pending)) { | 208 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
181 | BUG_ON(timer_pending(timer)); | 209 | BUG_ON(timer_pending(timer)); |
182 | BUG_ON(!list_empty(&work->entry)); | 210 | BUG_ON(!list_empty(&work->entry)); |
183 | 211 | ||
184 | /* This stores wq for the moment, for the timer_fn */ | 212 | /* This stores wq for the moment, for the timer_fn */ |
185 | work->wq_data = wq; | 213 | set_wq_data(work, wq); |
186 | timer->expires = jiffies + delay; | 214 | timer->expires = jiffies + delay; |
187 | timer->data = (unsigned long)work; | 215 | timer->data = (unsigned long)dwork; |
188 | timer->function = delayed_work_timer_fn; | 216 | timer->function = delayed_work_timer_fn; |
189 | add_timer_on(timer, cpu); | 217 | add_timer_on(timer, cpu); |
190 | ret = 1; | 218 | ret = 1; |
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
212 | while (!list_empty(&cwq->worklist)) { | 240 | while (!list_empty(&cwq->worklist)) { |
213 | struct work_struct *work = list_entry(cwq->worklist.next, | 241 | struct work_struct *work = list_entry(cwq->worklist.next, |
214 | struct work_struct, entry); | 242 | struct work_struct, entry); |
215 | void (*f) (void *) = work->func; | 243 | work_func_t f = work->func; |
216 | void *data = work->data; | ||
217 | 244 | ||
218 | list_del_init(cwq->worklist.next); | 245 | list_del_init(cwq->worklist.next); |
219 | spin_unlock_irqrestore(&cwq->lock, flags); | 246 | spin_unlock_irqrestore(&cwq->lock, flags); |
220 | 247 | ||
221 | BUG_ON(work->wq_data != cwq); | 248 | BUG_ON(get_wq_data(work) != cwq); |
222 | clear_bit(0, &work->pending); | 249 | if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) |
223 | f(data); | 250 | work_release(work); |
251 | f(work); | ||
224 | 252 | ||
225 | spin_lock_irqsave(&cwq->lock, flags); | 253 | spin_lock_irqsave(&cwq->lock, flags); |
226 | cwq->remove_sequence++; | 254 | cwq->remove_sequence++; |
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work); | |||
468 | 496 | ||
469 | /** | 497 | /** |
470 | * schedule_delayed_work - put work task in global workqueue after delay | 498 | * schedule_delayed_work - put work task in global workqueue after delay |
471 | * @work: job to be done | 499 | * @dwork: job to be done |
472 | * @delay: number of jiffies to wait | 500 | * @delay: number of jiffies to wait or 0 for immediate execution |
473 | * | 501 | * |
474 | * After waiting for a given time this puts a job in the kernel-global | 502 | * After waiting for a given time this puts a job in the kernel-global |
475 | * workqueue. | 503 | * workqueue. |
476 | */ | 504 | */ |
477 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 505 | int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) |
478 | { | 506 | { |
479 | return queue_delayed_work(keventd_wq, work, delay); | 507 | return queue_delayed_work(keventd_wq, dwork, delay); |
480 | } | 508 | } |
481 | EXPORT_SYMBOL(schedule_delayed_work); | 509 | EXPORT_SYMBOL(schedule_delayed_work); |
482 | 510 | ||
483 | /** | 511 | /** |
484 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 512 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
485 | * @cpu: cpu to use | 513 | * @cpu: cpu to use |
486 | * @work: job to be done | 514 | * @dwork: job to be done |
487 | * @delay: number of jiffies to wait | 515 | * @delay: number of jiffies to wait |
488 | * | 516 | * |
489 | * After waiting for a given time this puts a job in the kernel-global | 517 | * After waiting for a given time this puts a job in the kernel-global |
490 | * workqueue on the specified CPU. | 518 | * workqueue on the specified CPU. |
491 | */ | 519 | */ |
492 | int schedule_delayed_work_on(int cpu, | 520 | int schedule_delayed_work_on(int cpu, |
493 | struct work_struct *work, unsigned long delay) | 521 | struct delayed_work *dwork, unsigned long delay) |
494 | { | 522 | { |
495 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 523 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
496 | } | 524 | } |
497 | EXPORT_SYMBOL(schedule_delayed_work_on); | 525 | EXPORT_SYMBOL(schedule_delayed_work_on); |
498 | 526 | ||
499 | /** | 527 | /** |
500 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 528 | * schedule_on_each_cpu - call a function on each online CPU from keventd |
501 | * @func: the function to call | 529 | * @func: the function to call |
502 | * @info: a pointer to pass to func() | ||
503 | * | 530 | * |
504 | * Returns zero on success. | 531 | * Returns zero on success. |
505 | * Returns -ve errno on failure. | 532 | * Returns -ve errno on failure. |
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
508 | * | 535 | * |
509 | * schedule_on_each_cpu() is very slow. | 536 | * schedule_on_each_cpu() is very slow. |
510 | */ | 537 | */ |
511 | int schedule_on_each_cpu(void (*func)(void *info), void *info) | 538 | int schedule_on_each_cpu(work_func_t func) |
512 | { | 539 | { |
513 | int cpu; | 540 | int cpu; |
514 | struct work_struct *works; | 541 | struct work_struct *works; |
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) | |||
519 | 546 | ||
520 | mutex_lock(&workqueue_mutex); | 547 | mutex_lock(&workqueue_mutex); |
521 | for_each_online_cpu(cpu) { | 548 | for_each_online_cpu(cpu) { |
522 | INIT_WORK(per_cpu_ptr(works, cpu), func, info); | 549 | INIT_WORK(per_cpu_ptr(works, cpu), func); |
523 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 550 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), |
524 | per_cpu_ptr(works, cpu)); | 551 | per_cpu_ptr(works, cpu)); |
525 | } | 552 | } |
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
539 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 566 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
540 | * work whose handler rearms the delayed work. | 567 | * work whose handler rearms the delayed work. |
541 | * @wq: the controlling workqueue structure | 568 | * @wq: the controlling workqueue structure |
542 | * @work: the delayed work struct | 569 | * @dwork: the delayed work struct |
543 | */ | 570 | */ |
544 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 571 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
545 | struct work_struct *work) | 572 | struct delayed_work *dwork) |
546 | { | 573 | { |
547 | while (!cancel_delayed_work(work)) | 574 | while (!cancel_delayed_work(dwork)) |
548 | flush_workqueue(wq); | 575 | flush_workqueue(wq); |
549 | } | 576 | } |
550 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 577 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | |||
552 | /** | 579 | /** |
553 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 580 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd |
554 | * work whose handler rearms the delayed work. | 581 | * work whose handler rearms the delayed work. |
555 | * @work: the delayed work struct | 582 | * @dwork: the delayed work struct |
556 | */ | 583 | */ |
557 | void cancel_rearming_delayed_work(struct work_struct *work) | 584 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |
558 | { | 585 | { |
559 | cancel_rearming_delayed_workqueue(keventd_wq, work); | 586 | cancel_rearming_delayed_workqueue(keventd_wq, dwork); |
560 | } | 587 | } |
561 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 588 | EXPORT_SYMBOL(cancel_rearming_delayed_work); |
562 | 589 | ||
563 | /** | 590 | /** |
564 | * execute_in_process_context - reliably execute the routine with user context | 591 | * execute_in_process_context - reliably execute the routine with user context |
565 | * @fn: the function to execute | 592 | * @fn: the function to execute |
566 | * @data: data to pass to the function | ||
567 | * @ew: guaranteed storage for the execute work structure (must | 593 | * @ew: guaranteed storage for the execute work structure (must |
568 | * be available when the work executes) | 594 | * be available when the work executes) |
569 | * | 595 | * |
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work); | |||
573 | * Returns: 0 - function was executed | 599 | * Returns: 0 - function was executed |
574 | * 1 - function was scheduled for execution | 600 | * 1 - function was scheduled for execution |
575 | */ | 601 | */ |
576 | int execute_in_process_context(void (*fn)(void *data), void *data, | 602 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
577 | struct execute_work *ew) | ||
578 | { | 603 | { |
579 | if (!in_interrupt()) { | 604 | if (!in_interrupt()) { |
580 | fn(data); | 605 | fn(&ew->work); |
581 | return 0; | 606 | return 0; |
582 | } | 607 | } |
583 | 608 | ||
584 | INIT_WORK(&ew->work, fn, data); | 609 | INIT_WORK(&ew->work, fn); |
585 | schedule_work(&ew->work); | 610 | schedule_work(&ew->work); |
586 | 611 | ||
587 | return 1; | 612 | return 1; |
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
314 | int node); | 314 | int node); |
315 | static int enable_cpucache(struct kmem_cache *cachep); | 315 | static int enable_cpucache(struct kmem_cache *cachep); |
316 | static void cache_reap(void *unused); | 316 | static void cache_reap(struct work_struct *unused); |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * This function must be completely optimized away if a constant is passed to | 319 | * This function must be completely optimized away if a constant is passed to |
@@ -753,7 +753,7 @@ int slab_is_available(void) | |||
753 | return g_cpucache_up == FULL; | 753 | return g_cpucache_up == FULL; |
754 | } | 754 | } |
755 | 755 | ||
756 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 756 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
757 | 757 | ||
758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
759 | { | 759 | { |
@@ -916,16 +916,16 @@ static void next_reap_node(void) | |||
916 | */ | 916 | */ |
917 | static void __devinit start_cpu_timer(int cpu) | 917 | static void __devinit start_cpu_timer(int cpu) |
918 | { | 918 | { |
919 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 919 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); |
920 | 920 | ||
921 | /* | 921 | /* |
922 | * When this gets called from do_initcalls via cpucache_init(), | 922 | * When this gets called from do_initcalls via cpucache_init(), |
923 | * init_workqueues() has already run, so keventd will be setup | 923 | * init_workqueues() has already run, so keventd will be setup |
924 | * at that time. | 924 | * at that time. |
925 | */ | 925 | */ |
926 | if (keventd_up() && reap_work->func == NULL) { | 926 | if (keventd_up() && reap_work->work.func == NULL) { |
927 | init_reap_node(cpu); | 927 | init_reap_node(cpu); |
928 | INIT_WORK(reap_work, cache_reap, NULL); | 928 | INIT_DELAYED_WORK(reap_work, cache_reap); |
929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); |
930 | } | 930 | } |
931 | } | 931 | } |
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try | 3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try |
3816 | * again on the next iteration. | 3816 | * again on the next iteration. |
3817 | */ | 3817 | */ |
3818 | static void cache_reap(void *unused) | 3818 | static void cache_reap(struct work_struct *unused) |
3819 | { | 3819 | { |
3820 | struct kmem_cache *searchp; | 3820 | struct kmem_cache *searchp; |
3821 | struct kmem_list3 *l3; | 3821 | struct kmem_list3 *l3; |
@@ -216,7 +216,7 @@ void lru_add_drain(void) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | #ifdef CONFIG_NUMA | 218 | #ifdef CONFIG_NUMA |
219 | static void lru_add_drain_per_cpu(void *dummy) | 219 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
220 | { | 220 | { |
221 | lru_add_drain(); | 221 | lru_add_drain(); |
222 | } | 222 | } |
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy) | |||
226 | */ | 226 | */ |
227 | int lru_add_drain_all(void) | 227 | int lru_add_drain_all(void) |
228 | { | 228 | { |
229 | return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | 229 | return schedule_on_each_cpu(lru_add_drain_per_cpu); |
230 | } | 230 | } |
231 | 231 | ||
232 | #else | 232 | #else |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5946ec63724..3fc0abeeaf3 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | |||
1454 | 1454 | ||
1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) | 1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) |
1456 | 1456 | ||
1457 | static void lec_arp_check_expire(void *data); | 1457 | static void lec_arp_check_expire(struct work_struct *work); |
1458 | static void lec_arp_expire_arp(unsigned long data); | 1458 | static void lec_arp_expire_arp(unsigned long data); |
1459 | 1459 | ||
1460 | /* | 1460 | /* |
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) | |||
1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); | 1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); |
1479 | spin_lock_init(&priv->lec_arp_lock); | 1479 | spin_lock_init(&priv->lec_arp_lock); |
1480 | INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); | 1480 | INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); |
1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); | 1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); |
1482 | } | 1482 | } |
1483 | 1483 | ||
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data) | |||
1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end | 1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end |
1876 | * regardless of the progress of the flush protocol. | 1876 | * regardless of the progress of the flush protocol. |
1877 | */ | 1877 | */ |
1878 | static void lec_arp_check_expire(void *data) | 1878 | static void lec_arp_check_expire(struct work_struct *work) |
1879 | { | 1879 | { |
1880 | unsigned long flags; | 1880 | unsigned long flags; |
1881 | struct lec_priv *priv = data; | 1881 | struct lec_priv *priv = |
1882 | container_of(work, struct lec_priv, lec_arp_work.work); | ||
1882 | struct hlist_node *node, *next; | 1883 | struct hlist_node *node, *next; |
1883 | struct lec_arp_table *entry; | 1884 | struct lec_arp_table *entry; |
1884 | unsigned long now; | 1885 | unsigned long now; |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 24cc95f8674..99136babd53 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -92,7 +92,7 @@ struct lec_priv { | |||
92 | spinlock_t lec_arp_lock; | 92 | spinlock_t lec_arp_lock; |
93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ | 93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ |
94 | struct atm_vcc *lecd; | 94 | struct atm_vcc *lecd; |
95 | struct work_struct lec_arp_work; /* C10 */ | 95 | struct delayed_work lec_arp_work; /* C10 */ |
96 | unsigned int maximum_unknown_frame_count; | 96 | unsigned int maximum_unknown_frame_count; |
97 | /* | 97 | /* |
98 | * Within the period of time defined by this variable, the client will send | 98 | * Within the period of time defined by this variable, the client will send |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e7..d4c935692cc 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev) | |||
237 | kfree(data); | 237 | kfree(data); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void add_conn(void *data) | 240 | static void add_conn(struct work_struct *work) |
241 | { | 241 | { |
242 | struct hci_conn *conn = data; | 242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | if (device_register(&conn->dev) < 0) { | 245 | if (device_register(&conn->dev) < 0) { |
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
272 | 272 | ||
273 | dev_set_drvdata(&conn->dev, conn); | 273 | dev_set_drvdata(&conn->dev, conn); |
274 | 274 | ||
275 | INIT_WORK(&conn->work, add_conn, (void *) conn); | 275 | INIT_WORK(&conn->work, add_conn); |
276 | 276 | ||
277 | schedule_work(&conn->work); | 277 | schedule_work(&conn->work); |
278 | } | 278 | } |
279 | 279 | ||
280 | static void del_conn(void *data) | 280 | static void del_conn(struct work_struct *work) |
281 | { | 281 | { |
282 | struct hci_conn *conn = data; | 282 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
283 | device_del(&conn->dev); | 283 | device_del(&conn->dev); |
284 | } | 284 | } |
285 | 285 | ||
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) | |||
287 | { | 287 | { |
288 | BT_DBG("conn %p", conn); | 288 | BT_DBG("conn %p", conn); |
289 | 289 | ||
290 | INIT_WORK(&conn->work, del_conn, (void *) conn); | 290 | INIT_WORK(&conn->work, del_conn); |
291 | 291 | ||
292 | schedule_work(&conn->work); | 292 | schedule_work(&conn->work); |
293 | } | 293 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d..55bb2634c08 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) | |||
77 | * Called from work queue to allow for calling functions that | 77 | * Called from work queue to allow for calling functions that |
78 | * might sleep (such as speed check), and to debounce. | 78 | * might sleep (such as speed check), and to debounce. |
79 | */ | 79 | */ |
80 | static void port_carrier_check(void *arg) | 80 | static void port_carrier_check(struct work_struct *work) |
81 | { | 81 | { |
82 | struct net_device *dev = arg; | ||
83 | struct net_bridge_port *p; | 82 | struct net_bridge_port *p; |
83 | struct net_device *dev; | ||
84 | struct net_bridge *br; | 84 | struct net_bridge *br; |
85 | 85 | ||
86 | dev = container_of(work, struct net_bridge_port, | ||
87 | carrier_check.work)->dev; | ||
88 | work_release(work); | ||
89 | |||
86 | rtnl_lock(); | 90 | rtnl_lock(); |
87 | p = dev->br_port; | 91 | p = dev->br_port; |
88 | if (!p) | 92 | if (!p) |
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
276 | p->port_no = index; | 280 | p->port_no = index; |
277 | br_init_port(p); | 281 | br_init_port(p); |
278 | p->state = BR_STATE_DISABLED; | 282 | p->state = BR_STATE_DISABLED; |
279 | INIT_WORK(&p->carrier_check, port_carrier_check, dev); | 283 | INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); |
280 | br_stp_port_timer_init(p); | 284 | br_stp_port_timer_init(p); |
281 | 285 | ||
282 | kobject_init(&p->kobj); | 286 | kobject_init(&p->kobj); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f25..3a534e94c7f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -82,7 +82,7 @@ struct net_bridge_port | |||
82 | struct timer_list hold_timer; | 82 | struct timer_list hold_timer; |
83 | struct timer_list message_age_timer; | 83 | struct timer_list message_age_timer; |
84 | struct kobject kobj; | 84 | struct kobject kobj; |
85 | struct work_struct carrier_check; | 85 | struct delayed_work carrier_check; |
86 | struct rcu_head rcu; | 86 | struct rcu_head rcu; |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c..549a2ce951b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -34,8 +34,8 @@ enum lw_bits { | |||
34 | static unsigned long linkwatch_flags; | 34 | static unsigned long linkwatch_flags; |
35 | static unsigned long linkwatch_nextevent; | 35 | static unsigned long linkwatch_nextevent; |
36 | 36 | ||
37 | static void linkwatch_event(void *dummy); | 37 | static void linkwatch_event(struct work_struct *dummy); |
38 | static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); | 38 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
39 | 39 | ||
40 | static LIST_HEAD(lweventlist); | 40 | static LIST_HEAD(lweventlist); |
41 | static DEFINE_SPINLOCK(lweventlist_lock); | 41 | static DEFINE_SPINLOCK(lweventlist_lock); |
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | 129 | ||
130 | static void linkwatch_event(void *dummy) | 130 | static void linkwatch_event(struct work_struct *dummy) |
131 | { | 131 | { |
132 | /* Limit the number of linkwatch events to one | 132 | /* Limit the number of linkwatch events to one |
133 | * per second so that a runaway driver does not | 133 | * per second so that a runaway driver does not |
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) | |||
171 | unsigned long delay = linkwatch_nextevent - jiffies; | 171 | unsigned long delay = linkwatch_nextevent - jiffies; |
172 | 172 | ||
173 | /* If we wrap around we'll delay it by at most HZ. */ | 173 | /* If we wrap around we'll delay it by at most HZ. */ |
174 | if (!delay || delay > HZ) | 174 | if (delay > HZ) |
175 | schedule_work(&linkwatch_work); | 175 | delay = 0; |
176 | else | 176 | schedule_delayed_work(&linkwatch_work, delay); |
177 | schedule_delayed_work(&linkwatch_work, delay); | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | } | 179 | } |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3c58846fcaa..b3c559b9ac3 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -50,9 +50,10 @@ static atomic_t trapped; | |||
50 | static void zap_completion_queue(void); | 50 | static void zap_completion_queue(void); |
51 | static void arp_reply(struct sk_buff *skb); | 51 | static void arp_reply(struct sk_buff *skb); |
52 | 52 | ||
53 | static void queue_process(void *p) | 53 | static void queue_process(struct work_struct *work) |
54 | { | 54 | { |
55 | struct netpoll_info *npinfo = p; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
56 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
57 | 58 | ||
58 | while ((skb = skb_dequeue(&npinfo->txq))) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
@@ -72,8 +73,6 @@ static void queue_process(void *p) | |||
72 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
73 | return; | 74 | return; |
74 | } | 75 | } |
75 | |||
76 | netif_tx_unlock_bh(dev); | ||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
263 | 262 | ||
264 | if (status != NETDEV_TX_OK) { | 263 | if (status != NETDEV_TX_OK) { |
265 | skb_queue_tail(&npinfo->txq, skb); | 264 | skb_queue_tail(&npinfo->txq, skb); |
266 | schedule_work(&npinfo->tx_work); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
267 | } | 266 | } |
268 | } | 267 | } |
269 | 268 | ||
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np) | |||
628 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
629 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
630 | skb_queue_head_init(&npinfo->txq); | 629 | skb_queue_head_init(&npinfo->txq); |
631 | INIT_WORK(&npinfo->tx_work, queue_process, npinfo); | 630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
632 | 631 | ||
633 | atomic_set(&npinfo->refcnt, 1); | 632 | atomic_set(&npinfo->refcnt, 1); |
634 | } else { | 633 | } else { |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 7b52f2a03ee..4c9e26775f7 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = { | |||
32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
33 | (unsigned long)&dccp_death_row), | 33 | (unsigned long)&dccp_death_row), |
34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, | 34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, |
35 | inet_twdr_twkill_work, | 35 | inet_twdr_twkill_work), |
36 | &dccp_death_row), | ||
37 | /* Short-time timewait calendar */ | 36 | /* Short-time timewait calendar */ |
38 | 37 | ||
39 | .twcal_hand = -1, | 38 | .twcal_hand = -1, |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971..08386c10295 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft | |||
58 | } | 58 | } |
59 | 59 | ||
60 | void | 60 | void |
61 | ieee80211softmac_assoc_timeout(void *d) | 61 | ieee80211softmac_assoc_timeout(struct work_struct *work) |
62 | { | 62 | { |
63 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 63 | struct ieee80211softmac_device *mac = |
64 | container_of(work, struct ieee80211softmac_device, | ||
65 | associnfo.timeout.work); | ||
64 | struct ieee80211softmac_network *n; | 66 | struct ieee80211softmac_network *n; |
65 | 67 | ||
66 | mutex_lock(&mac->associnfo.mutex); | 68 | mutex_lock(&mac->associnfo.mutex); |
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void | |||
186 | 188 | ||
187 | /* This function is called to handle userspace requests (asynchronously) */ | 189 | /* This function is called to handle userspace requests (asynchronously) */ |
188 | void | 190 | void |
189 | ieee80211softmac_assoc_work(void *d) | 191 | ieee80211softmac_assoc_work(struct work_struct *work) |
190 | { | 192 | { |
191 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 193 | struct ieee80211softmac_device *mac = |
194 | container_of(work, struct ieee80211softmac_device, | ||
195 | associnfo.work.work); | ||
192 | struct ieee80211softmac_network *found = NULL; | 196 | struct ieee80211softmac_network *found = NULL; |
193 | struct ieee80211_network *net = NULL, *best = NULL; | 197 | struct ieee80211_network *net = NULL, *best = NULL; |
194 | int bssvalid; | 198 | int bssvalid; |
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
412 | network->authenticated = 0; | 416 | network->authenticated = 0; |
413 | /* we don't want to do this more than once ... */ | 417 | /* we don't want to do this more than once ... */ |
414 | network->auth_desynced_once = 1; | 418 | network->auth_desynced_once = 1; |
415 | schedule_work(&mac->associnfo.work); | 419 | schedule_delayed_work(&mac->associnfo.work, 0); |
416 | break; | 420 | break; |
417 | } | 421 | } |
418 | default: | 422 | default: |
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
446 | ieee80211softmac_disassoc(mac); | 450 | ieee80211softmac_disassoc(mac); |
447 | 451 | ||
448 | /* try to reassociate */ | 452 | /* try to reassociate */ |
449 | schedule_work(&mac->associnfo.work); | 453 | schedule_delayed_work(&mac->associnfo.work, 0); |
450 | 454 | ||
451 | return 0; | 455 | return 0; |
452 | } | 456 | } |
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, | |||
466 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | 470 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); |
467 | return 0; | 471 | return 0; |
468 | } | 472 | } |
469 | schedule_work(&mac->associnfo.work); | 473 | schedule_delayed_work(&mac->associnfo.work, 0); |
470 | 474 | ||
471 | return 0; | 475 | return 0; |
472 | } | 476 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 0612015f1c7..6012705aa4f 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211softmac_priv.h" | 27 | #include "ieee80211softmac_priv.h" |
28 | 28 | ||
29 | static void ieee80211softmac_auth_queue(void *data); | 29 | static void ieee80211softmac_auth_queue(struct work_struct *work); |
30 | 30 | ||
31 | /* Queues an auth request to the desired AP */ | 31 | /* Queues an auth request to the desired AP */ |
32 | int | 32 | int |
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
54 | auth->mac = mac; | 54 | auth->mac = mac; |
55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; | 55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; |
56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; | 56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; |
57 | INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); | 57 | INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); |
58 | 58 | ||
59 | /* Lock (for list) */ | 59 | /* Lock (for list) */ |
60 | spin_lock_irqsave(&mac->lock, flags); | 60 | spin_lock_irqsave(&mac->lock, flags); |
61 | 61 | ||
62 | /* add to list */ | 62 | /* add to list */ |
63 | list_add_tail(&auth->list, &mac->auth_queue); | 63 | list_add_tail(&auth->list, &mac->auth_queue); |
64 | schedule_work(&auth->work); | 64 | schedule_delayed_work(&auth->work, 0); |
65 | spin_unlock_irqrestore(&mac->lock, flags); | 65 | spin_unlock_irqrestore(&mac->lock, flags); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
70 | 70 | ||
71 | /* Sends an auth request to the desired AP and handles timeouts */ | 71 | /* Sends an auth request to the desired AP and handles timeouts */ |
72 | static void | 72 | static void |
73 | ieee80211softmac_auth_queue(void *data) | 73 | ieee80211softmac_auth_queue(struct work_struct *work) |
74 | { | 74 | { |
75 | struct ieee80211softmac_device *mac; | 75 | struct ieee80211softmac_device *mac; |
76 | struct ieee80211softmac_auth_queue_item *auth; | 76 | struct ieee80211softmac_auth_queue_item *auth; |
77 | struct ieee80211softmac_network *net; | 77 | struct ieee80211softmac_network *net; |
78 | unsigned long flags; | 78 | unsigned long flags; |
79 | 79 | ||
80 | auth = (struct ieee80211softmac_auth_queue_item *)data; | 80 | auth = container_of(work, struct ieee80211softmac_auth_queue_item, |
81 | work.work); | ||
81 | net = auth->net; | 82 | net = auth->net; |
82 | mac = auth->mac; | 83 | mac = auth->mac; |
83 | 84 | ||
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) | |||
118 | 119 | ||
119 | /* Sends a response to an auth challenge (for shared key auth). */ | 120 | /* Sends a response to an auth challenge (for shared key auth). */ |
120 | static void | 121 | static void |
121 | ieee80211softmac_auth_challenge_response(void *_aq) | 122 | ieee80211softmac_auth_challenge_response(struct work_struct *work) |
122 | { | 123 | { |
123 | struct ieee80211softmac_auth_queue_item *aq = _aq; | 124 | struct ieee80211softmac_auth_queue_item *aq = |
125 | container_of(work, struct ieee80211softmac_auth_queue_item, | ||
126 | work.work); | ||
124 | 127 | ||
125 | /* Send our response */ | 128 | /* Send our response */ |
126 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); | 129 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); |
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
234 | * we have obviously already sent the initial auth | 237 | * we have obviously already sent the initial auth |
235 | * request. */ | 238 | * request. */ |
236 | cancel_delayed_work(&aq->work); | 239 | cancel_delayed_work(&aq->work); |
237 | INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); | 240 | INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); |
238 | schedule_work(&aq->work); | 241 | schedule_delayed_work(&aq->work, 0); |
239 | spin_unlock_irqrestore(&mac->lock, flags); | 242 | spin_unlock_irqrestore(&mac->lock, flags); |
240 | return 0; | 243 | return 0; |
241 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: | 244 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: |
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
398 | ieee80211softmac_deauth_from_net(mac, net); | 401 | ieee80211softmac_deauth_from_net(mac, net); |
399 | 402 | ||
400 | /* let's try to re-associate */ | 403 | /* let's try to re-associate */ |
401 | schedule_work(&mac->associnfo.work); | 404 | schedule_delayed_work(&mac->associnfo.work, 0); |
402 | return 0; | 405 | return 0; |
403 | } | 406 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666..b9015656cfb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c | |||
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | |||
73 | 73 | ||
74 | 74 | ||
75 | static void | 75 | static void |
76 | ieee80211softmac_notify_callback(void *d) | 76 | ieee80211softmac_notify_callback(struct work_struct *work) |
77 | { | 77 | { |
78 | struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; | 78 | struct ieee80211softmac_event *pevent = |
79 | kfree(d); | 79 | container_of(work, struct ieee80211softmac_event, work.work); |
80 | struct ieee80211softmac_event event = *pevent; | ||
81 | kfree(pevent); | ||
80 | 82 | ||
81 | event.fun(event.mac->dev, event.event_type, event.context); | 83 | event.fun(event.mac->dev, event.event_type, event.context); |
82 | } | 84 | } |
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | |||
99 | return -ENOMEM; | 101 | return -ENOMEM; |
100 | 102 | ||
101 | eventptr->event_type = event; | 103 | eventptr->event_type = event; |
102 | INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); | 104 | INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); |
103 | eventptr->fun = fun; | 105 | eventptr->fun = fun; |
104 | eventptr->context = context; | 106 | eventptr->context = context; |
105 | eventptr->mac = mac; | 107 | eventptr->mac = mac; |
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve | |||
170 | /* User may have subscribed to ANY event, so | 172 | /* User may have subscribed to ANY event, so |
171 | * we tell them which event triggered it. */ | 173 | * we tell them which event triggered it. */ |
172 | eventptr->event_type = event; | 174 | eventptr->event_type = event; |
173 | schedule_work(&eventptr->work); | 175 | schedule_delayed_work(&eventptr->work, 0); |
174 | } | 176 | } |
175 | } | 177 | } |
176 | } | 178 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a47..256207b71dc 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) | |||
58 | INIT_LIST_HEAD(&softmac->events); | 58 | INIT_LIST_HEAD(&softmac->events); |
59 | 59 | ||
60 | mutex_init(&softmac->associnfo.mutex); | 60 | mutex_init(&softmac->associnfo.mutex); |
61 | INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); | 61 | INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); |
62 | INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); | 62 | INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); |
63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; | 63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; |
64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | 64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; |
65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | 65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a..c0dbe070e54 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -78,7 +78,7 @@ | |||
78 | /* private definitions and prototypes */ | 78 | /* private definitions and prototypes */ |
79 | 79 | ||
80 | /*** prototypes from _scan.c */ | 80 | /*** prototypes from _scan.c */ |
81 | void ieee80211softmac_scan(void *sm); | 81 | void ieee80211softmac_scan(struct work_struct *work); |
82 | /* for internal use if scanning is needed */ | 82 | /* for internal use if scanning is needed */ |
83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); | 83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); |
84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); | 84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); |
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au | |||
149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); | 149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); |
150 | 150 | ||
151 | /*** prototypes from _assoc.c */ | 151 | /*** prototypes from _assoc.c */ |
152 | void ieee80211softmac_assoc_work(void *d); | 152 | void ieee80211softmac_assoc_work(struct work_struct *work); |
153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, | 153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, |
154 | struct ieee80211_assoc_response * resp, | 154 | struct ieee80211_assoc_response * resp, |
155 | struct ieee80211_network * network); | 155 | struct ieee80211_network * network); |
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
157 | struct ieee80211_disassoc * disassoc); | 157 | struct ieee80211_disassoc * disassoc); |
158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | 158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, |
159 | struct ieee80211_reassoc_request * reassoc); | 159 | struct ieee80211_reassoc_request * reassoc); |
160 | void ieee80211softmac_assoc_timeout(void *d); | 160 | void ieee80211softmac_assoc_timeout(struct work_struct *work); |
161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | 161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); |
162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | 162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); |
163 | 163 | ||
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { | |||
207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ | 207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ |
208 | u8 retry; /* Retry limit */ | 208 | u8 retry; /* Retry limit */ |
209 | u8 state; /* Auth State */ | 209 | u8 state; /* Auth State */ |
210 | struct work_struct work; /* Work queue */ | 210 | struct delayed_work work; /* Work queue */ |
211 | }; | 211 | }; |
212 | 212 | ||
213 | /* scanning information */ | 213 | /* scanning information */ |
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { | |||
219 | stop:1; | 219 | stop:1; |
220 | u8 skip_flags; | 220 | u8 skip_flags; |
221 | struct completion finished; | 221 | struct completion finished; |
222 | struct work_struct softmac_scan; | 222 | struct delayed_work softmac_scan; |
223 | struct ieee80211softmac_device *mac; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | /* private event struct */ | 226 | /* private event struct */ |
@@ -227,7 +228,7 @@ struct ieee80211softmac_event { | |||
227 | struct list_head list; | 228 | struct list_head list; |
228 | int event_type; | 229 | int event_type; |
229 | void *event_context; | 230 | void *event_context; |
230 | struct work_struct work; | 231 | struct delayed_work work; |
231 | notify_function_ptr fun; | 232 | notify_function_ptr fun; |
232 | void *context; | 233 | void *context; |
233 | struct ieee80211softmac_device *mac; | 234 | struct ieee80211softmac_device *mac; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 5507feab32d..0c85d6c24cd 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) | |||
90 | 90 | ||
91 | 91 | ||
92 | /* internal scanning implementation follows */ | 92 | /* internal scanning implementation follows */ |
93 | void ieee80211softmac_scan(void *d) | 93 | void ieee80211softmac_scan(struct work_struct *work) |
94 | { | 94 | { |
95 | int invalid_channel; | 95 | int invalid_channel; |
96 | u8 current_channel_idx; | 96 | u8 current_channel_idx; |
97 | struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; | 97 | struct ieee80211softmac_scaninfo *si = |
98 | struct ieee80211softmac_scaninfo *si = sm->scaninfo; | 98 | container_of(work, struct ieee80211softmac_scaninfo, |
99 | softmac_scan.work); | ||
100 | struct ieee80211softmac_device *sm = si->mac; | ||
99 | unsigned long flags; | 101 | unsigned long flags; |
100 | 102 | ||
101 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { | 103 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { |
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 | |||
146 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); | 148 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); |
147 | if (unlikely(!info)) | 149 | if (unlikely(!info)) |
148 | return NULL; | 150 | return NULL; |
149 | INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); | 151 | INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); |
152 | info->mac = mac; | ||
150 | init_completion(&info->finished); | 153 | init_completion(&info->finished); |
151 | return info; | 154 | return info; |
152 | } | 155 | } |
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
187 | sm->scaninfo->started = 1; | 190 | sm->scaninfo->started = 1; |
188 | sm->scaninfo->stop = 0; | 191 | sm->scaninfo->stop = 0; |
189 | INIT_COMPLETION(sm->scaninfo->finished); | 192 | INIT_COMPLETION(sm->scaninfo->finished); |
190 | schedule_work(&sm->scaninfo->softmac_scan); | 193 | schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); |
191 | spin_unlock_irqrestore(&sm->lock, flags); | 194 | spin_unlock_irqrestore(&sm->lock, flags); |
192 | return 0; | 195 | return 0; |
193 | } | 196 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7..2ffaebd21c5 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, | |||
122 | 122 | ||
123 | sm->associnfo.associating = 1; | 123 | sm->associnfo.associating = 1; |
124 | /* queue lower level code to do work (if necessary) */ | 124 | /* queue lower level code to do work (if necessary) */ |
125 | schedule_work(&sm->associnfo.work); | 125 | schedule_delayed_work(&sm->associnfo.work, 0); |
126 | out: | 126 | out: |
127 | mutex_unlock(&sm->associnfo.mutex); | 127 | mutex_unlock(&sm->associnfo.mutex); |
128 | 128 | ||
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
356 | /* force reassociation */ | 356 | /* force reassociation */ |
357 | mac->associnfo.bssvalid = 0; | 357 | mac->associnfo.bssvalid = 0; |
358 | if (mac->associnfo.associated) | 358 | if (mac->associnfo.associated) |
359 | schedule_work(&mac->associnfo.work); | 359 | schedule_delayed_work(&mac->associnfo.work, 0); |
360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { | 360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { |
361 | /* the bssid we have is no longer fixed */ | 361 | /* the bssid we have is no longer fixed */ |
362 | mac->associnfo.bssfixed = 0; | 362 | mac->associnfo.bssfixed = 0; |
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
373 | /* tell the other code that this bssid should be used no matter what */ | 373 | /* tell the other code that this bssid should be used no matter what */ |
374 | mac->associnfo.bssfixed = 1; | 374 | mac->associnfo.bssfixed = 1; |
375 | /* queue associate if new bssid or (old one again and not associated) */ | 375 | /* queue associate if new bssid or (old one again and not associated) */ |
376 | schedule_work(&mac->associnfo.work); | 376 | schedule_delayed_work(&mac->associnfo.work, 0); |
377 | } | 377 | } |
378 | 378 | ||
379 | out: | 379 | out: |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c6..8c74f9168b7 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); | |||
197 | 197 | ||
198 | extern void twkill_slots_invalid(void); | 198 | extern void twkill_slots_invalid(void); |
199 | 199 | ||
200 | void inet_twdr_twkill_work(void *data) | 200 | void inet_twdr_twkill_work(struct work_struct *work) |
201 | { | 201 | { |
202 | struct inet_timewait_death_row *twdr = data; | 202 | struct inet_timewait_death_row *twdr = |
203 | container_of(work, struct inet_timewait_death_row, twkill_work); | ||
203 | int i; | 204 | int i; |
204 | 205 | ||
205 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) | 206 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e460..9b933381ebb 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -221,10 +221,10 @@ static void update_defense_level(void) | |||
221 | * Timer for checking the defense | 221 | * Timer for checking the defense |
222 | */ | 222 | */ |
223 | #define DEFENSE_TIMER_PERIOD 1*HZ | 223 | #define DEFENSE_TIMER_PERIOD 1*HZ |
224 | static void defense_work_handler(void *data); | 224 | static void defense_work_handler(struct work_struct *work); |
225 | static DECLARE_WORK(defense_work, defense_work_handler, NULL); | 225 | static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); |
226 | 226 | ||
227 | static void defense_work_handler(void *data) | 227 | static void defense_work_handler(struct work_struct *work) |
228 | { | 228 | { |
229 | update_defense_level(); | 229 | update_defense_level(); |
230 | if (atomic_read(&ip_vs_dropentry)) | 230 | if (atomic_read(&ip_vs_dropentry)) |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6dddf59c1fb..4a3889dd194 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { | |||
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |
47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | 47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, |
48 | inet_twdr_twkill_work, | 48 | inet_twdr_twkill_work), |
49 | &tcp_death_row), | ||
50 | /* Short-time timewait calendar */ | 49 | /* Short-time timewait calendar */ |
51 | 50 | ||
52 | .twcal_hand = -1, | 51 | .twcal_hand = -1, |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad..262bda808d9 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); | |||
61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); | 61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); |
62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); | 62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); |
63 | static void ircomm_tty_hangup(struct tty_struct *tty); | 63 | static void ircomm_tty_hangup(struct tty_struct *tty); |
64 | static void ircomm_tty_do_softint(void *private_); | 64 | static void ircomm_tty_do_softint(struct work_struct *work); |
65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); | 65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); |
66 | static void ircomm_tty_stop(struct tty_struct *tty); | 66 | static void ircomm_tty_stop(struct tty_struct *tty); |
67 | 67 | ||
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
389 | self->flow = FLOW_STOP; | 389 | self->flow = FLOW_STOP; |
390 | 390 | ||
391 | self->line = line; | 391 | self->line = line; |
392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); | 392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); |
393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; | 393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; |
394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; | 394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; |
395 | self->close_delay = 5*HZ/10; | 395 | self->close_delay = 5*HZ/10; |
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) | |||
594 | } | 594 | } |
595 | 595 | ||
596 | /* | 596 | /* |
597 | * Function ircomm_tty_do_softint (private_) | 597 | * Function ircomm_tty_do_softint (work) |
598 | * | 598 | * |
599 | * We use this routine to give the write wakeup to the user at at a | 599 | * We use this routine to give the write wakeup to the user at at a |
600 | * safe time (as fast as possible after write have completed). This | 600 | * safe time (as fast as possible after write have completed). This |
601 | * can be compared to the Tx interrupt. | 601 | * can be compared to the Tx interrupt. |
602 | */ | 602 | */ |
603 | static void ircomm_tty_do_softint(void *private_) | 603 | static void ircomm_tty_do_softint(struct work_struct *work) |
604 | { | 604 | { |
605 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; | 605 | struct ircomm_tty_cb *self = |
606 | container_of(work, struct ircomm_tty_cb, tqueue); | ||
606 | struct tty_struct *tty; | 607 | struct tty_struct *tty; |
607 | unsigned long flags; | 608 | unsigned long flags; |
608 | struct sk_buff *skb, *ctrl_skb; | 609 | struct sk_buff *skb, *ctrl_skb; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 39471d3b31b..ad0057db0f9 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal functions. */ | 63 | /* Forward declarations for internal functions. */ |
64 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc); | 64 | static void sctp_assoc_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | 66 | ||
67 | /* 1st Level Abstractions. */ | 67 | /* 1st Level Abstractions. */ |
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
269 | 269 | ||
270 | /* Create an input queue. */ | 270 | /* Create an input queue. */ |
271 | sctp_inq_init(&asoc->base.inqueue); | 271 | sctp_inq_init(&asoc->base.inqueue); |
272 | sctp_inq_set_th_handler(&asoc->base.inqueue, | 272 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
273 | (void (*)(void *))sctp_assoc_bh_rcv, | ||
274 | asoc); | ||
275 | 273 | ||
276 | /* Create an output queue. */ | 274 | /* Create an output queue. */ |
277 | sctp_outq_init(asoc, &asoc->outqueue); | 275 | sctp_outq_init(asoc, &asoc->outqueue); |
@@ -946,8 +944,11 @@ out: | |||
946 | } | 944 | } |
947 | 945 | ||
948 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | 946 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
949 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc) | 947 | static void sctp_assoc_bh_rcv(struct work_struct *work) |
950 | { | 948 | { |
949 | struct sctp_association *asoc = | ||
950 | container_of(work, struct sctp_association, | ||
951 | base.inqueue.immediate); | ||
951 | struct sctp_endpoint *ep; | 952 | struct sctp_endpoint *ep; |
952 | struct sctp_chunk *chunk; | 953 | struct sctp_chunk *chunk; |
953 | struct sock *sk; | 954 | struct sock *sk; |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 33a42e90c32..129756908da 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal helpers. */ | 63 | /* Forward declarations for internal helpers. */ |
64 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); | 64 | static void sctp_endpoint_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Initialize the base fields of the endpoint structure. | 67 | * Initialize the base fields of the endpoint structure. |
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
89 | sctp_inq_init(&ep->base.inqueue); | 89 | sctp_inq_init(&ep->base.inqueue); |
90 | 90 | ||
91 | /* Set its top-half handler */ | 91 | /* Set its top-half handler */ |
92 | sctp_inq_set_th_handler(&ep->base.inqueue, | 92 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); |
93 | (void (*)(void *))sctp_endpoint_bh_rcv, ep); | ||
94 | 93 | ||
95 | /* Initialize the bind addr area */ | 94 | /* Initialize the bind addr area */ |
96 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 95 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | |||
318 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | 317 | /* Do delayed input processing. This is scheduled by sctp_rcv(). |
319 | * This may be called on BH or task time. | 318 | * This may be called on BH or task time. |
320 | */ | 319 | */ |
321 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) | 320 | static void sctp_endpoint_bh_rcv(struct work_struct *work) |
322 | { | 321 | { |
322 | struct sctp_endpoint *ep = | ||
323 | container_of(work, struct sctp_endpoint, | ||
324 | base.inqueue.immediate); | ||
323 | struct sctp_association *asoc; | 325 | struct sctp_association *asoc; |
324 | struct sock *sk; | 326 | struct sock *sk; |
325 | struct sctp_transport *transport; | 327 | struct sctp_transport *transport; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e84..71b07466e88 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) | |||
54 | queue->in_progress = NULL; | 54 | queue->in_progress = NULL; |
55 | 55 | ||
56 | /* Create a task for delivering data. */ | 56 | /* Create a task for delivering data. */ |
57 | INIT_WORK(&queue->immediate, NULL, NULL); | 57 | INIT_WORK(&queue->immediate, NULL); |
58 | 58 | ||
59 | queue->malloced = 0; | 59 | queue->malloced = 0; |
60 | } | 60 | } |
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) | |||
97 | * on the BH related data structures. | 97 | * on the BH related data structures. |
98 | */ | 98 | */ |
99 | list_add_tail(&chunk->list, &q->in_chunk_list); | 99 | list_add_tail(&chunk->list, &q->in_chunk_list); |
100 | q->immediate.func(q->immediate.data); | 100 | q->immediate.func(&q->immediate); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Extract a chunk from an SCTP inqueue. | 103 | /* Extract a chunk from an SCTP inqueue. |
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
205 | * The intent is that this routine will pull stuff out of the | 205 | * The intent is that this routine will pull stuff out of the |
206 | * inqueue and process it. | 206 | * inqueue and process it. |
207 | */ | 207 | */ |
208 | void sctp_inq_set_th_handler(struct sctp_inq *q, | 208 | void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) |
209 | void (*callback)(void *), void *arg) | ||
210 | { | 209 | { |
211 | INIT_WORK(&q->immediate, callback, arg); | 210 | INIT_WORK(&q->immediate, callback); |
212 | } | 211 | } |
213 | 212 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece0..d96fd466a9a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; | |||
284 | static struct file_operations content_file_operations; | 284 | static struct file_operations content_file_operations; |
285 | static struct file_operations cache_flush_operations; | 285 | static struct file_operations cache_flush_operations; |
286 | 286 | ||
287 | static void do_cache_clean(void *data); | 287 | static void do_cache_clean(struct work_struct *work); |
288 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | 288 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); |
289 | 289 | ||
290 | void cache_register(struct cache_detail *cd) | 290 | void cache_register(struct cache_detail *cd) |
291 | { | 291 | { |
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) | |||
337 | spin_unlock(&cache_list_lock); | 337 | spin_unlock(&cache_list_lock); |
338 | 338 | ||
339 | /* start the cleaning process */ | 339 | /* start the cleaning process */ |
340 | schedule_work(&cache_cleaner); | 340 | schedule_delayed_work(&cache_cleaner, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | int cache_unregister(struct cache_detail *cd) | 343 | int cache_unregister(struct cache_detail *cd) |
@@ -461,7 +461,7 @@ static int cache_clean(void) | |||
461 | /* | 461 | /* |
462 | * We want to regularly clean the cache, so we need to schedule some work ... | 462 | * We want to regularly clean the cache, so we need to schedule some work ... |
463 | */ | 463 | */ |
464 | static void do_cache_clean(void *data) | 464 | static void do_cache_clean(struct work_struct *work) |
465 | { | 465 | { |
466 | int delay = 5; | 466 | int delay = 5; |
467 | if (cache_clean() == -1) | 467 | if (cache_clean() == -1) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f9..49dba5febbb 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | rpc_timeout_upcall_queue(void *data) | 57 | rpc_timeout_upcall_queue(struct work_struct *work) |
58 | { | 58 | { |
59 | LIST_HEAD(free_list); | 59 | LIST_HEAD(free_list); |
60 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 60 | struct rpc_inode *rpci = |
61 | container_of(work, struct rpc_inode, queue_timeout.work); | ||
61 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
62 | void (*destroy_msg)(struct rpc_pipe_msg *); | 63 | void (*destroy_msg)(struct rpc_pipe_msg *); |
63 | 64 | ||
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
837 | INIT_LIST_HEAD(&rpci->pipe); | 838 | INIT_LIST_HEAD(&rpci->pipe); |
838 | rpci->pipelen = 0; | 839 | rpci->pipelen = 0; |
839 | init_waitqueue_head(&rpci->waitq); | 840 | init_waitqueue_head(&rpci->waitq); |
840 | INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); | 841 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
842 | rpc_timeout_upcall_queue); | ||
841 | rpci->ops = NULL; | 843 | rpci->ops = NULL; |
842 | } | 844 | } |
843 | } | 845 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a1ab4eed41f..eff44bcdc95 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(struct work_struct *); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
305 | if (RPC_IS_ASYNC(task)) { | 305 | if (RPC_IS_ASYNC(task)) { |
306 | int status; | 306 | int status; |
307 | 307 | ||
308 | INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); | 308 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
309 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | 309 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
310 | if (status < 0) { | 310 | if (status < 0) { |
311 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | 311 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); |
@@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task) | |||
695 | return __rpc_execute(task); | 695 | return __rpc_execute(task); |
696 | } | 696 | } |
697 | 697 | ||
698 | static void rpc_async_schedule(void *arg) | 698 | static void rpc_async_schedule(struct work_struct *work) |
699 | { | 699 | { |
700 | __rpc_execute((struct rpc_task *)arg); | 700 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
701 | } | 701 | } |
702 | 702 | ||
703 | /** | 703 | /** |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 80857470dc1..4f9a5d9791f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
479 | return status; | 479 | return status; |
480 | } | 480 | } |
481 | 481 | ||
482 | static void xprt_autoclose(void *args) | 482 | static void xprt_autoclose(struct work_struct *work) |
483 | { | 483 | { |
484 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 484 | struct rpc_xprt *xprt = |
485 | container_of(work, struct rpc_xprt, task_cleanup); | ||
485 | 486 | ||
486 | xprt_disconnect(xprt); | 487 | xprt_disconnect(xprt); |
487 | xprt->ops->close(xprt); | 488 | xprt->ops->close(xprt); |
@@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
932 | 933 | ||
933 | INIT_LIST_HEAD(&xprt->free); | 934 | INIT_LIST_HEAD(&xprt->free); |
934 | INIT_LIST_HEAD(&xprt->recv); | 935 | INIT_LIST_HEAD(&xprt->recv); |
935 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); | 936 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
936 | init_timer(&xprt->timer); | 937 | init_timer(&xprt->timer); |
937 | xprt->timer.function = xprt_init_autodisconnect; | 938 | xprt->timer.function = xprt_init_autodisconnect; |
938 | xprt->timer.data = (unsigned long) xprt; | 939 | xprt->timer.data = (unsigned long) xprt; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 757fc91ef25..cfe3c15be94 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | |||
1060 | 1060 | ||
1061 | /** | 1061 | /** |
1062 | * xs_udp_connect_worker - set up a UDP socket | 1062 | * xs_udp_connect_worker - set up a UDP socket |
1063 | * @args: RPC transport to connect | 1063 | * @work: RPC transport to connect |
1064 | * | 1064 | * |
1065 | * Invoked by a work queue tasklet. | 1065 | * Invoked by a work queue tasklet. |
1066 | */ | 1066 | */ |
1067 | static void xs_udp_connect_worker(void *args) | 1067 | static void xs_udp_connect_worker(struct work_struct *work) |
1068 | { | 1068 | { |
1069 | struct rpc_xprt *xprt = (struct rpc_xprt *) args; | 1069 | struct rpc_xprt *xprt = |
1070 | container_of(work, struct rpc_xprt, connect_worker.work); | ||
1070 | struct socket *sock = xprt->sock; | 1071 | struct socket *sock = xprt->sock; |
1071 | int err, status = -EIO; | 1072 | int err, status = -EIO; |
1072 | 1073 | ||
@@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1144 | 1145 | ||
1145 | /** | 1146 | /** |
1146 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint | 1147 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
1147 | * @args: RPC transport to connect | 1148 | * @work: RPC transport to connect |
1148 | * | 1149 | * |
1149 | * Invoked by a work queue tasklet. | 1150 | * Invoked by a work queue tasklet. |
1150 | */ | 1151 | */ |
1151 | static void xs_tcp_connect_worker(void *args) | 1152 | static void xs_tcp_connect_worker(struct work_struct *work) |
1152 | { | 1153 | { |
1153 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 1154 | struct rpc_xprt *xprt = |
1155 | container_of(work, struct rpc_xprt, connect_worker.work); | ||
1154 | struct socket *sock = xprt->sock; | 1156 | struct socket *sock = xprt->sock; |
1155 | int err, status = -EIO; | 1157 | int err, status = -EIO; |
1156 | 1158 | ||
@@ -1262,7 +1264,7 @@ static void xs_connect(struct rpc_task *task) | |||
1262 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1264 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1263 | } else { | 1265 | } else { |
1264 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1266 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1265 | schedule_work(&xprt->connect_worker); | 1267 | schedule_delayed_work(&xprt->connect_worker, 0); |
1266 | 1268 | ||
1267 | /* flush_scheduled_work can sleep... */ | 1269 | /* flush_scheduled_work can sleep... */ |
1268 | if (!RPC_IS_ASYNC(task)) | 1270 | if (!RPC_IS_ASYNC(task)) |
@@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1375 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1377 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1376 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1378 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1377 | 1379 | ||
1378 | INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); | 1380 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); |
1379 | xprt->bind_timeout = XS_BIND_TO; | 1381 | xprt->bind_timeout = XS_BIND_TO; |
1380 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1382 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1381 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1383 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1420 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1422 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1421 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1423 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1422 | 1424 | ||
1423 | INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); | 1425 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); |
1424 | xprt->bind_timeout = XS_BIND_TO; | 1426 | xprt->bind_timeout = XS_BIND_TO; |
1425 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1427 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1426 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1428 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 64d3938f74c..f6c77bd36fd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | |||
392 | xfrm_pol_put(policy); | 392 | xfrm_pol_put(policy); |
393 | } | 393 | } |
394 | 394 | ||
395 | static void xfrm_policy_gc_task(void *data) | 395 | static void xfrm_policy_gc_task(struct work_struct *work) |
396 | { | 396 | { |
397 | struct xfrm_policy *policy; | 397 | struct xfrm_policy *policy; |
398 | struct hlist_node *entry, *tmp; | 398 | struct hlist_node *entry, *tmp; |
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) | |||
580 | 580 | ||
581 | static DEFINE_MUTEX(hash_resize_mutex); | 581 | static DEFINE_MUTEX(hash_resize_mutex); |
582 | 582 | ||
583 | static void xfrm_hash_resize(void *__unused) | 583 | static void xfrm_hash_resize(struct work_struct *__unused) |
584 | { | 584 | { |
585 | int dir, total; | 585 | int dir, total; |
586 | 586 | ||
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused) | |||
597 | mutex_unlock(&hash_resize_mutex); | 597 | mutex_unlock(&hash_resize_mutex); |
598 | } | 598 | } |
599 | 599 | ||
600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
601 | 601 | ||
602 | /* Generate new index... KAME seems to generate them ordered by cost | 602 | /* Generate new index... KAME seems to generate them ordered by cost |
603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ | 603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ |
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) | |||
2116 | panic("XFRM: failed to allocate bydst hash\n"); | 2116 | panic("XFRM: failed to allocate bydst hash\n"); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); | 2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); |
2120 | register_netdevice_notifier(&xfrm_dev_notifier); | 2120 | register_netdevice_notifier(&xfrm_dev_notifier); |
2121 | } | 2121 | } |
2122 | 2122 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 864962bbda9..da54a64ccfa 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) | |||
115 | 115 | ||
116 | static DEFINE_MUTEX(hash_resize_mutex); | 116 | static DEFINE_MUTEX(hash_resize_mutex); |
117 | 117 | ||
118 | static void xfrm_hash_resize(void *__unused) | 118 | static void xfrm_hash_resize(struct work_struct *__unused) |
119 | { | 119 | { |
120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; | 120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; |
121 | unsigned long nsize, osize; | 121 | unsigned long nsize, osize; |
@@ -168,7 +168,7 @@ out_unlock: | |||
168 | mutex_unlock(&hash_resize_mutex); | 168 | mutex_unlock(&hash_resize_mutex); |
169 | } | 169 | } |
170 | 170 | ||
171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
172 | 172 | ||
173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); | 173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); |
174 | EXPORT_SYMBOL(km_waitq); | 174 | EXPORT_SYMBOL(km_waitq); |
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
207 | kfree(x); | 207 | kfree(x); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void xfrm_state_gc_task(void *data) | 210 | static void xfrm_state_gc_task(struct work_struct *data) |
211 | { | 211 | { |
212 | struct xfrm_state *x; | 212 | struct xfrm_state *x; |
213 | struct hlist_node *entry, *tmp; | 213 | struct hlist_node *entry, *tmp; |
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) | |||
1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); | 1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); |
1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); | 1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); |
1570 | 1570 | ||
1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); | 1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); |
1572 | } | 1572 | } |
1573 | 1573 | ||
diff --git a/security/keys/key.c b/security/keys/key.c index 80de8c3e9cc..70eacbe5abd 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock); | |||
30 | static LIST_HEAD(key_types_list); | 30 | static LIST_HEAD(key_types_list); |
31 | static DECLARE_RWSEM(key_types_sem); | 31 | static DECLARE_RWSEM(key_types_sem); |
32 | 32 | ||
33 | static void key_cleanup(void *data); | 33 | static void key_cleanup(struct work_struct *work); |
34 | static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); | 34 | static DECLARE_WORK(key_cleanup_task, key_cleanup); |
35 | 35 | ||
36 | /* we serialise key instantiation and link */ | 36 | /* we serialise key instantiation and link */ |
37 | DECLARE_RWSEM(key_construction_sem); | 37 | DECLARE_RWSEM(key_construction_sem); |
@@ -552,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link); | |||
552 | * do cleaning up in process context so that we don't have to disable | 552 | * do cleaning up in process context so that we don't have to disable |
553 | * interrupts all over the place | 553 | * interrupts all over the place |
554 | */ | 554 | */ |
555 | static void key_cleanup(void *data) | 555 | static void key_cleanup(struct work_struct *work) |
556 | { | 556 | { |
557 | struct rb_node *_n; | 557 | struct rb_node *_n; |
558 | struct key *key; | 558 | struct key *key; |
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h index 3a61f311557..ee64f5de896 100644 --- a/sound/aoa/aoa-gpio.h +++ b/sound/aoa/aoa-gpio.h | |||
@@ -59,10 +59,10 @@ struct gpio_methods { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct gpio_notification { | 61 | struct gpio_notification { |
62 | struct delayed_work work; | ||
62 | notify_func_t notify; | 63 | notify_func_t notify; |
63 | void *data; | 64 | void *data; |
64 | void *gpio_private; | 65 | void *gpio_private; |
65 | struct work_struct work; | ||
66 | struct mutex mutex; | 66 | struct mutex mutex; |
67 | }; | 67 | }; |
68 | 68 | ||
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c index 40eb47eccf9..2b03bc798bc 100644 --- a/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/sound/aoa/core/snd-aoa-gpio-feature.c | |||
@@ -195,9 +195,10 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt) | |||
195 | ftr_gpio_set_lineout(rt, (s>>2)&1); | 195 | ftr_gpio_set_lineout(rt, (s>>2)&1); |
196 | } | 196 | } |
197 | 197 | ||
198 | static void ftr_handle_notify(void *data) | 198 | static void ftr_handle_notify(struct work_struct *work) |
199 | { | 199 | { |
200 | struct gpio_notification *notif = data; | 200 | struct gpio_notification *notif = |
201 | container_of(work, struct gpio_notification, work.work); | ||
201 | 202 | ||
202 | mutex_lock(¬if->mutex); | 203 | mutex_lock(¬if->mutex); |
203 | if (notif->notify) | 204 | if (notif->notify) |
@@ -253,12 +254,9 @@ static void ftr_gpio_init(struct gpio_runtime *rt) | |||
253 | 254 | ||
254 | ftr_gpio_all_amps_off(rt); | 255 | ftr_gpio_all_amps_off(rt); |
255 | rt->implementation_private = 0; | 256 | rt->implementation_private = 0; |
256 | INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, | 257 | INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); |
257 | &rt->headphone_notify); | 258 | INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); |
258 | INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, | 259 | INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); |
259 | &rt->line_in_notify); | ||
260 | INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify, | ||
261 | &rt->line_out_notify); | ||
262 | mutex_init(&rt->headphone_notify.mutex); | 260 | mutex_init(&rt->headphone_notify.mutex); |
263 | mutex_init(&rt->line_in_notify.mutex); | 261 | mutex_init(&rt->line_in_notify.mutex); |
264 | mutex_init(&rt->line_out_notify.mutex); | 262 | mutex_init(&rt->line_out_notify.mutex); |
@@ -287,7 +285,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data) | |||
287 | { | 285 | { |
288 | struct gpio_notification *notif = data; | 286 | struct gpio_notification *notif = data; |
289 | 287 | ||
290 | schedule_work(¬if->work); | 288 | schedule_delayed_work(¬if->work, 0); |
291 | 289 | ||
292 | return IRQ_HANDLED; | 290 | return IRQ_HANDLED; |
293 | } | 291 | } |
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c index 2836c321839..5ca2220eac7 100644 --- a/sound/aoa/core/snd-aoa-gpio-pmf.c +++ b/sound/aoa/core/snd-aoa-gpio-pmf.c | |||
@@ -69,9 +69,10 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) | |||
69 | pmf_gpio_set_lineout(rt, (s>>2)&1); | 69 | pmf_gpio_set_lineout(rt, (s>>2)&1); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void pmf_handle_notify(void *data) | 72 | static void pmf_handle_notify(struct work_struct *work) |
73 | { | 73 | { |
74 | struct gpio_notification *notif = data; | 74 | struct gpio_notification *notif = |
75 | container_of(work, struct gpio_notification, work.work); | ||
75 | 76 | ||
76 | mutex_lock(¬if->mutex); | 77 | mutex_lock(¬if->mutex); |
77 | if (notif->notify) | 78 | if (notif->notify) |
@@ -83,12 +84,9 @@ static void pmf_gpio_init(struct gpio_runtime *rt) | |||
83 | { | 84 | { |
84 | pmf_gpio_all_amps_off(rt); | 85 | pmf_gpio_all_amps_off(rt); |
85 | rt->implementation_private = 0; | 86 | rt->implementation_private = 0; |
86 | INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, | 87 | INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); |
87 | &rt->headphone_notify); | 88 | INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); |
88 | INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, | 89 | INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); |
89 | &rt->line_in_notify); | ||
90 | INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify, | ||
91 | &rt->line_out_notify); | ||
92 | mutex_init(&rt->headphone_notify.mutex); | 90 | mutex_init(&rt->headphone_notify.mutex); |
93 | mutex_init(&rt->line_in_notify.mutex); | 91 | mutex_init(&rt->line_in_notify.mutex); |
94 | mutex_init(&rt->line_out_notify.mutex); | 92 | mutex_init(&rt->line_out_notify.mutex); |
@@ -129,7 +127,7 @@ static void pmf_handle_notify_irq(void *data) | |||
129 | { | 127 | { |
130 | struct gpio_notification *notif = data; | 128 | struct gpio_notification *notif = data; |
131 | 129 | ||
132 | schedule_work(¬if->work); | 130 | schedule_delayed_work(¬if->work, 0); |
133 | } | 131 | } |
134 | 132 | ||
135 | static int pmf_set_notify(struct gpio_runtime *rt, | 133 | static int pmf_set_notify(struct gpio_runtime *rt, |
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c index 12ffffc9e81..d2f2c5078e6 100644 --- a/sound/i2c/other/ak4114.c +++ b/sound/i2c/other/ak4114.c | |||
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); | |||
35 | 35 | ||
36 | #define AK4114_ADDR 0x00 /* fixed address */ | 36 | #define AK4114_ADDR 0x00 /* fixed address */ |
37 | 37 | ||
38 | static void ak4114_stats(void *); | 38 | static void ak4114_stats(struct work_struct *work); |
39 | 39 | ||
40 | static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) | 40 | static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) |
41 | { | 41 | { |
@@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip) | |||
158 | reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); | 158 | reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); |
159 | /* bring up statistics / event queing */ | 159 | /* bring up statistics / event queing */ |
160 | chip->init = 0; | 160 | chip->init = 0; |
161 | INIT_WORK(&chip->work, ak4114_stats, chip); | 161 | INIT_DELAYED_WORK(&chip->work, ak4114_stats); |
162 | queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); | 162 | queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); |
163 | } | 163 | } |
164 | 164 | ||
@@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags) | |||
561 | return res; | 561 | return res; |
562 | } | 562 | } |
563 | 563 | ||
564 | static void ak4114_stats(void *data) | 564 | static void ak4114_stats(struct work_struct *work) |
565 | { | 565 | { |
566 | struct ak4114 *chip = (struct ak4114 *)data; | 566 | struct ak4114 *chip = container_of(work, struct ak4114, work.work); |
567 | 567 | ||
568 | if (chip->init) | 568 | if (chip->init) |
569 | return; | 569 | return; |
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 6577b232535..7abcb10b275 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c | |||
@@ -1927,9 +1927,10 @@ static int snd_ac97_dev_disconnect(struct snd_device *device) | |||
1927 | static struct snd_ac97_build_ops null_build_ops; | 1927 | static struct snd_ac97_build_ops null_build_ops; |
1928 | 1928 | ||
1929 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 1929 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
1930 | static void do_update_power(void *data) | 1930 | static void do_update_power(struct work_struct *work) |
1931 | { | 1931 | { |
1932 | update_power_regs(data); | 1932 | update_power_regs( |
1933 | container_of(work, struct snd_ac97, power_work.work)); | ||
1933 | } | 1934 | } |
1934 | #endif | 1935 | #endif |
1935 | 1936 | ||
@@ -1989,7 +1990,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template, | |||
1989 | mutex_init(&ac97->page_mutex); | 1990 | mutex_init(&ac97->page_mutex); |
1990 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 1991 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
1991 | ac97->power_workq = create_workqueue("ac97"); | 1992 | ac97->power_workq = create_workqueue("ac97"); |
1992 | INIT_WORK(&ac97->power_work, do_update_power, ac97); | 1993 | INIT_DELAYED_WORK(&ac97->power_work, do_update_power); |
1993 | #endif | 1994 | #endif |
1994 | 1995 | ||
1995 | #ifdef CONFIG_PCI | 1996 | #ifdef CONFIG_PCI |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 9c3d7ac0806..71482c15a85 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -272,10 +272,11 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event); | |||
272 | /* | 272 | /* |
273 | * process queueud unsolicited events | 273 | * process queueud unsolicited events |
274 | */ | 274 | */ |
275 | static void process_unsol_events(void *data) | 275 | static void process_unsol_events(struct work_struct *work) |
276 | { | 276 | { |
277 | struct hda_bus *bus = data; | 277 | struct hda_bus_unsolicited *unsol = |
278 | struct hda_bus_unsolicited *unsol = bus->unsol; | 278 | container_of(work, struct hda_bus_unsolicited, work); |
279 | struct hda_bus *bus = unsol->bus; | ||
279 | struct hda_codec *codec; | 280 | struct hda_codec *codec; |
280 | unsigned int rp, caddr, res; | 281 | unsigned int rp, caddr, res; |
281 | 282 | ||
@@ -314,7 +315,8 @@ static int init_unsol_queue(struct hda_bus *bus) | |||
314 | kfree(unsol); | 315 | kfree(unsol); |
315 | return -ENOMEM; | 316 | return -ENOMEM; |
316 | } | 317 | } |
317 | INIT_WORK(&unsol->work, process_unsol_events, bus); | 318 | INIT_WORK(&unsol->work, process_unsol_events); |
319 | unsol->bus = bus; | ||
318 | bus->unsol = unsol; | 320 | bus->unsol = unsol; |
319 | return 0; | 321 | return 0; |
320 | } | 322 | } |
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index f9416c36396..9ca1baf860b 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h | |||
@@ -206,6 +206,7 @@ struct hda_bus_unsolicited { | |||
206 | /* workqueue */ | 206 | /* workqueue */ |
207 | struct workqueue_struct *workq; | 207 | struct workqueue_struct *workq; |
208 | struct work_struct work; | 208 | struct work_struct work; |
209 | struct hda_bus *bus; | ||
209 | }; | 210 | }; |
210 | 211 | ||
211 | /* | 212 | /* |
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 2fbe1d183fc..8f074c7936e 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c | |||
@@ -942,10 +942,11 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int | |||
942 | } | 942 | } |
943 | 943 | ||
944 | static struct work_struct device_change; | 944 | static struct work_struct device_change; |
945 | static struct snd_pmac *device_change_chip; | ||
945 | 946 | ||
946 | static void device_change_handler(void *self) | 947 | static void device_change_handler(struct work_struct *work) |
947 | { | 948 | { |
948 | struct snd_pmac *chip = self; | 949 | struct snd_pmac *chip = device_change_chip; |
949 | struct pmac_tumbler *mix; | 950 | struct pmac_tumbler *mix; |
950 | int headphone, lineout; | 951 | int headphone, lineout; |
951 | 952 | ||
@@ -1417,7 +1418,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip) | |||
1417 | chip->resume = tumbler_resume; | 1418 | chip->resume = tumbler_resume; |
1418 | #endif | 1419 | #endif |
1419 | 1420 | ||
1420 | INIT_WORK(&device_change, device_change_handler, (void *)chip); | 1421 | INIT_WORK(&device_change, device_change_handler); |
1422 | device_change_chip = chip; | ||
1421 | 1423 | ||
1422 | #ifdef PMAC_SUPPORT_AUTOMUTE | 1424 | #ifdef PMAC_SUPPORT_AUTOMUTE |
1423 | if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) | 1425 | if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) |