diff options
Diffstat (limited to 'arch')
28 files changed, 120 insertions, 94 deletions
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 605dedf96790..b3599743093b 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c | |||
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void); | |||
60 | static int sharpsl_fatal_check(void); | 60 | static int sharpsl_fatal_check(void); |
61 | static int sharpsl_average_value(int ad); | 61 | static int sharpsl_average_value(int ad); |
62 | static void sharpsl_average_clear(void); | 62 | static void sharpsl_average_clear(void); |
63 | static void sharpsl_charge_toggle(void *private_); | 63 | static void sharpsl_charge_toggle(struct work_struct *private_); |
64 | static void sharpsl_battery_thread(void *private_); | 64 | static void sharpsl_battery_thread(struct work_struct *private_); |
65 | 65 | ||
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Variables | 68 | * Variables |
69 | */ | 69 | */ |
70 | struct sharpsl_pm_status sharpsl_pm; | 70 | struct sharpsl_pm_status sharpsl_pm; |
71 | DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); | 71 | DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); |
72 | DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); | 72 | DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); |
73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); | 73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); |
74 | 74 | ||
75 | 75 | ||
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void) | |||
116 | EXPORT_SYMBOL(sharpsl_battery_kick); | 116 | EXPORT_SYMBOL(sharpsl_battery_kick); |
117 | 117 | ||
118 | 118 | ||
119 | static void sharpsl_battery_thread(void *private_) | 119 | static void sharpsl_battery_thread(struct work_struct *private_) |
120 | { | 120 | { |
121 | int voltage, percent, apm_status, i = 0; | 121 | int voltage, percent, apm_status, i = 0; |
122 | 122 | ||
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_) | |||
128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ | 128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ |
129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) | 129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) |
130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) | 130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) |
131 | schedule_work(&toggle_charger); | 131 | schedule_delayed_work(&toggle_charger, 0); |
132 | 132 | ||
133 | while(1) { | 133 | while(1) { |
134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); | 134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); |
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void) | |||
212 | sharpsl_pm_led(SHARPSL_LED_OFF); | 212 | sharpsl_pm_led(SHARPSL_LED_OFF); |
213 | sharpsl_pm.charge_mode = CHRG_OFF; | 213 | sharpsl_pm.charge_mode = CHRG_OFF; |
214 | 214 | ||
215 | schedule_work(&sharpsl_bat); | 215 | schedule_delayed_work(&sharpsl_bat, 0); |
216 | } | 216 | } |
217 | 217 | ||
218 | static void sharpsl_charge_error(void) | 218 | static void sharpsl_charge_error(void) |
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void) | |||
222 | sharpsl_pm.charge_mode = CHRG_ERROR; | 222 | sharpsl_pm.charge_mode = CHRG_ERROR; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void sharpsl_charge_toggle(void *private_) | 225 | static void sharpsl_charge_toggle(struct work_struct *private_) |
226 | { | 226 | { |
227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); | 227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); |
228 | 228 | ||
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data) | |||
254 | else if (sharpsl_pm.charge_mode == CHRG_ON) | 254 | else if (sharpsl_pm.charge_mode == CHRG_ON) |
255 | sharpsl_charge_off(); | 255 | sharpsl_charge_off(); |
256 | 256 | ||
257 | schedule_work(&sharpsl_bat); | 257 | schedule_delayed_work(&sharpsl_bat, 0); |
258 | } | 258 | } |
259 | 259 | ||
260 | 260 | ||
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data) | |||
279 | sharpsl_charge_off(); | 279 | sharpsl_charge_off(); |
280 | } else if (sharpsl_pm.full_count < 2) { | 280 | } else if (sharpsl_pm.full_count < 2) { |
281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); | 281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); |
282 | schedule_work(&toggle_charger); | 282 | schedule_delayed_work(&toggle_charger, 0); |
283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { | 283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { |
284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); | 284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); |
285 | schedule_work(&toggle_charger); | 285 | schedule_delayed_work(&toggle_charger, 0); |
286 | } else { | 286 | } else { |
287 | sharpsl_charge_off(); | 287 | sharpsl_charge_off(); |
288 | sharpsl_pm.charge_mode = CHRG_DONE; | 288 | sharpsl_pm.charge_mode = CHRG_DONE; |
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index f225a083dee1..9d2346fb68f4 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c | |||
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode) | |||
323 | 323 | ||
324 | cancel_delayed_work(&irda_config->gpio_expa); | 324 | cancel_delayed_work(&irda_config->gpio_expa); |
325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
326 | schedule_work(&irda_config->gpio_expa); | 326 | #error this is not permitted - mode is an argument variable |
327 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
327 | 328 | ||
328 | return 0; | 329 | return 0; |
329 | } | 330 | } |
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index dbc555d209ff..cbe909bad79b 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c | |||
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = { | |||
74 | .rows = 8, | 74 | .rows = 8, |
75 | .cols = 8, | 75 | .cols = 8, |
76 | .keymap = nokia770_keymap, | 76 | .keymap = nokia770_keymap, |
77 | .keymapsize = ARRAY_SIZE(nokia770_keymap) | 77 | .keymapsize = ARRAY_SIZE(nokia770_keymap), |
78 | .delay = 4, | 78 | .delay = 4, |
79 | }; | 79 | }; |
80 | 80 | ||
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void) | |||
191 | printk("HP connected\n"); | 191 | printk("HP connected\n"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void codec_delayed_power_down(void *arg) | 194 | static void codec_delayed_power_down(struct work_struct *work) |
195 | { | 195 | { |
196 | down(&audio_pwr_sem); | 196 | down(&audio_pwr_sem); |
197 | if (audio_pwr_state == -1) | 197 | if (audio_pwr_state == -1) |
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg) | |||
200 | up(&audio_pwr_sem); | 200 | up(&audio_pwr_sem); |
201 | } | 201 | } |
202 | 202 | ||
203 | static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); | 203 | static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); |
204 | 204 | ||
205 | static void nokia770_audio_pwr_down(void) | 205 | static void nokia770_audio_pwr_down(void) |
206 | { | 206 | { |
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c index 3b29e59b0e6f..0cbf1b0071f8 100644 --- a/arch/arm/mach-omap1/leds-osk.c +++ b/arch/arm/mach-omap1/leds-osk.c | |||
@@ -35,7 +35,7 @@ static u8 hw_led_state; | |||
35 | 35 | ||
36 | static u8 tps_leds_change; | 36 | static u8 tps_leds_change; |
37 | 37 | ||
38 | static void tps_work(void *unused) | 38 | static void tps_work(struct work_struct *unused) |
39 | { | 39 | { |
40 | for (;;) { | 40 | for (;;) { |
41 | u8 leds; | 41 | u8 leds; |
@@ -61,7 +61,7 @@ static void tps_work(void *unused) | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static DECLARE_WORK(work, tps_work, NULL); | 64 | static DECLARE_WORK(work, tps_work); |
65 | 65 | ||
66 | #ifdef CONFIG_OMAP_OSK_MISTRAL | 66 | #ifdef CONFIG_OMAP_OSK_MISTRAL |
67 | 67 | ||
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 26a95a642ad7..3b1ad1d981a3 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c | |||
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode) | |||
206 | 206 | ||
207 | cancel_delayed_work(&irda_config->gpio_expa); | 207 | cancel_delayed_work(&irda_config->gpio_expa); |
208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
209 | schedule_work(&irda_config->gpio_expa); | 209 | #error this is not permitted - mode is an argument variable |
210 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
210 | 211 | ||
211 | return 0; | 212 | return 0; |
212 | } | 213 | } |
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c index 1b398742ab56..12d2fe0ceff6 100644 --- a/arch/arm/mach-pxa/akita-ioexp.c +++ b/arch/arm/mach-pxa/akita-ioexp.c | |||
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD; | |||
36 | 36 | ||
37 | static int max7310_write(struct i2c_client *client, int address, int data); | 37 | static int max7310_write(struct i2c_client *client, int address, int data); |
38 | static struct i2c_client max7310_template; | 38 | static struct i2c_client max7310_template; |
39 | static void akita_ioexp_work(void *private_); | 39 | static void akita_ioexp_work(struct work_struct *private_); |
40 | 40 | ||
41 | static struct device *akita_ioexp_device; | 41 | static struct device *akita_ioexp_device; |
42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; | 42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; |
43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); | 43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work); |
44 | 44 | ||
45 | 45 | ||
46 | /* | 46 | /* |
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit) | |||
158 | EXPORT_SYMBOL(akita_set_ioexp); | 158 | EXPORT_SYMBOL(akita_set_ioexp); |
159 | EXPORT_SYMBOL(akita_reset_ioexp); | 159 | EXPORT_SYMBOL(akita_reset_ioexp); |
160 | 160 | ||
161 | static void akita_ioexp_work(void *private_) | 161 | static void akita_ioexp_work(struct work_struct *private_) |
162 | { | 162 | { |
163 | if (akita_ioexp_device) | 163 | if (akita_ioexp_device) |
164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); | 164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); |
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c index 1f9153ae5b03..6b5d3518a1c0 100644 --- a/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c | |||
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info) | |||
51 | } | 51 | } |
52 | } | 52 | } |
53 | 53 | ||
54 | static void mce_work_fn(void *data); | 54 | static void mce_work_fn(struct work_struct *work); |
55 | static DECLARE_WORK(mce_work, mce_work_fn, NULL); | 55 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); |
56 | 56 | ||
57 | static void mce_work_fn(void *data) | 57 | static void mce_work_fn(struct work_struct *work) |
58 | { | 58 | { |
59 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 59 | on_each_cpu(mce_checkregs, NULL, 1, 1); |
60 | schedule_delayed_work(&mce_work, MCE_RATE); | 60 | schedule_delayed_work(&mce_work, MCE_RATE); |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 4bb8b77cd65b..02a9b66b6ac3 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void) | |||
1049 | 1049 | ||
1050 | struct warm_boot_cpu_info { | 1050 | struct warm_boot_cpu_info { |
1051 | struct completion *complete; | 1051 | struct completion *complete; |
1052 | struct work_struct task; | ||
1052 | int apicid; | 1053 | int apicid; |
1053 | int cpu; | 1054 | int cpu; |
1054 | }; | 1055 | }; |
1055 | 1056 | ||
1056 | static void __cpuinit do_warm_boot_cpu(void *p) | 1057 | static void __cpuinit do_warm_boot_cpu(struct work_struct *work) |
1057 | { | 1058 | { |
1058 | struct warm_boot_cpu_info *info = p; | 1059 | struct warm_boot_cpu_info *info = |
1060 | container_of(work, struct warm_boot_cpu_info, task); | ||
1059 | do_boot_cpu(info->apicid, info->cpu); | 1061 | do_boot_cpu(info->apicid, info->cpu); |
1060 | complete(info->complete); | 1062 | complete(info->complete); |
1061 | } | 1063 | } |
@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1064 | { | 1066 | { |
1065 | DECLARE_COMPLETION_ONSTACK(done); | 1067 | DECLARE_COMPLETION_ONSTACK(done); |
1066 | struct warm_boot_cpu_info info; | 1068 | struct warm_boot_cpu_info info; |
1067 | struct work_struct task; | ||
1068 | int apicid, ret; | 1069 | int apicid, ret; |
1069 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 1070 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
1070 | 1071 | ||
@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1089 | info.complete = &done; | 1090 | info.complete = &done; |
1090 | info.apicid = apicid; | 1091 | info.apicid = apicid; |
1091 | info.cpu = cpu; | 1092 | info.cpu = cpu; |
1092 | INIT_WORK(&task, do_warm_boot_cpu, &info); | 1093 | INIT_WORK(&info.task, do_warm_boot_cpu); |
1093 | 1094 | ||
1094 | tsc_sync_disabled = 1; | 1095 | tsc_sync_disabled = 1; |
1095 | 1096 | ||
@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1097 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | 1098 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
1098 | KERNEL_PGD_PTRS); | 1099 | KERNEL_PGD_PTRS); |
1099 | flush_tlb_all(); | 1100 | flush_tlb_all(); |
1100 | schedule_work(&task); | 1101 | schedule_work(&info.task); |
1101 | wait_for_completion(&done); | 1102 | wait_for_completion(&done); |
1102 | 1103 | ||
1103 | tsc_sync_disabled = 0; | 1104 | tsc_sync_disabled = 0; |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index fbc95828cd74..9810c8c90750 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
217 | static unsigned int cpufreq_init = 0; | 217 | static unsigned int cpufreq_init = 0; |
218 | static struct work_struct cpufreq_delayed_get_work; | 218 | static struct work_struct cpufreq_delayed_get_work; |
219 | 219 | ||
220 | static void handle_cpufreq_delayed_get(void *v) | 220 | static void handle_cpufreq_delayed_get(struct work_struct *work) |
221 | { | 221 | { |
222 | unsigned int cpu; | 222 | unsigned int cpu; |
223 | 223 | ||
@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void) | |||
306 | { | 306 | { |
307 | int ret; | 307 | int ret; |
308 | 308 | ||
309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | 310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, |
311 | CPUFREQ_TRANSITION_NOTIFIER); | 311 | CPUFREQ_TRANSITION_NOTIFIER); |
312 | if (!ret) | 312 | if (!ret) |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index caab986af70c..b62f0c4d2c7c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -209,7 +209,7 @@ static void do_serial_bh(void) | |||
209 | } | 209 | } |
210 | #endif | 210 | #endif |
211 | 211 | ||
212 | static void do_softint(void *private_) | 212 | static void do_softint(struct work_struct *private_) |
213 | { | 213 | { |
214 | printk(KERN_ERR "simserial: do_softint called\n"); | 214 | printk(KERN_ERR "simserial: do_softint called\n"); |
215 | } | 215 | } |
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) | |||
698 | info->flags = sstate->flags; | 698 | info->flags = sstate->flags; |
699 | info->xmit_fifo_size = sstate->xmit_fifo_size; | 699 | info->xmit_fifo_size = sstate->xmit_fifo_size; |
700 | info->line = line; | 700 | info->line = line; |
701 | INIT_WORK(&info->work, do_softint, info); | 701 | INIT_WORK(&info->work, do_softint); |
702 | info->state = sstate; | 702 | info->state = sstate; |
703 | if (sstate->info) { | 703 | if (sstate->info) { |
704 | kfree(info); | 704 | kfree(info); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7cfa63a98cb3..6bedd97570ca 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
678 | * disable the cmc interrupt vector. | 678 | * disable the cmc interrupt vector. |
679 | */ | 679 | */ |
680 | static void | 680 | static void |
681 | ia64_mca_cmc_vector_disable_keventd(void *unused) | 681 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
682 | { | 682 | { |
683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); |
684 | } | 684 | } |
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused) | |||
690 | * enable the cmc interrupt vector. | 690 | * enable the cmc interrupt vector. |
691 | */ | 691 | */ |
692 | static void | 692 | static void |
693 | ia64_mca_cmc_vector_enable_keventd(void *unused) | 693 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
694 | { | 694 | { |
695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); |
696 | } | 696 | } |
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1247 | monarch_cpu = -1; | 1247 | monarch_cpu = -1; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); |
1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); | 1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * ia64_mca_cmc_int_handler | 1254 | * ia64_mca_cmc_int_handler |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index f7d7f5668144..b21ddecea943 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | struct create_idle { | 465 | struct create_idle { |
466 | struct work_struct work; | ||
466 | struct task_struct *idle; | 467 | struct task_struct *idle; |
467 | struct completion done; | 468 | struct completion done; |
468 | int cpu; | 469 | int cpu; |
469 | }; | 470 | }; |
470 | 471 | ||
471 | void | 472 | void |
472 | do_fork_idle(void *_c_idle) | 473 | do_fork_idle(struct work_struct *work) |
473 | { | 474 | { |
474 | struct create_idle *c_idle = _c_idle; | 475 | struct create_idle *c_idle = |
476 | container_of(work, struct create_idle, work); | ||
475 | 477 | ||
476 | c_idle->idle = fork_idle(c_idle->cpu); | 478 | c_idle->idle = fork_idle(c_idle->cpu); |
477 | complete(&c_idle->done); | 479 | complete(&c_idle->done); |
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu) | |||
482 | { | 484 | { |
483 | int timeout; | 485 | int timeout; |
484 | struct create_idle c_idle = { | 486 | struct create_idle c_idle = { |
487 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
485 | .cpu = cpu, | 488 | .cpu = cpu, |
486 | .done = COMPLETION_INITIALIZER(c_idle.done), | 489 | .done = COMPLETION_INITIALIZER(c_idle.done), |
487 | }; | 490 | }; |
488 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
489 | 491 | ||
490 | c_idle.idle = get_idle_for_cpu(cpu); | 492 | c_idle.idle = get_idle_for_cpu(cpu); |
491 | if (c_idle.idle) { | 493 | if (c_idle.idle) { |
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu) | |||
497 | * We can't use kernel_thread since we must avoid to reschedule the child. | 499 | * We can't use kernel_thread since we must avoid to reschedule the child. |
498 | */ | 500 | */ |
499 | if (!keventd_up() || current_is_keventd()) | 501 | if (!keventd_up() || current_is_keventd()) |
500 | work.func(work.data); | 502 | c_idle.work.func(&c_idle.work); |
501 | else { | 503 | else { |
502 | schedule_work(&work); | 504 | schedule_work(&c_idle.work); |
503 | wait_for_completion(&c_idle.done); | 505 | wait_for_completion(&c_idle.done); |
504 | } | 506 | } |
505 | 507 | ||
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c index 24781f009337..e5668af19789 100644 --- a/arch/m68knommu/platform/5307/timers.c +++ b/arch/m68knommu/platform/5307/timers.c | |||
@@ -3,7 +3,7 @@ | |||
3 | /* | 3 | /* |
4 | * timers.c -- generic ColdFire hardware timer support. | 4 | * timers.c -- generic ColdFire hardware timer support. |
5 | * | 5 | * |
6 | * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com) | 6 | * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /***************************************************************************/ | 9 | /***************************************************************************/ |
@@ -44,6 +44,14 @@ unsigned int mcf_timerlevel = 5; | |||
44 | extern void mcf_settimericr(int timer, int level); | 44 | extern void mcf_settimericr(int timer, int level); |
45 | extern int mcf_timerirqpending(int timer); | 45 | extern int mcf_timerirqpending(int timer); |
46 | 46 | ||
47 | #if defined(CONFIG_M532x) | ||
48 | #define __raw_readtrr __raw_readl | ||
49 | #define __raw_writetrr __raw_writel | ||
50 | #else | ||
51 | #define __raw_readtrr __raw_readw | ||
52 | #define __raw_writetrr __raw_writew | ||
53 | #endif | ||
54 | |||
47 | /***************************************************************************/ | 55 | /***************************************************************************/ |
48 | 56 | ||
49 | void coldfire_tick(void) | 57 | void coldfire_tick(void) |
@@ -57,7 +65,7 @@ void coldfire_tick(void) | |||
57 | void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) | 65 | void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) |
58 | { | 66 | { |
59 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); | 67 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); |
60 | __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); | 68 | __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); |
61 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 69 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
62 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); | 70 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); |
63 | 71 | ||
@@ -76,7 +84,7 @@ unsigned long coldfire_timer_offset(void) | |||
76 | unsigned long trr, tcn, offset; | 84 | unsigned long trr, tcn, offset; |
77 | 85 | ||
78 | tcn = __raw_readw(TA(MCFTIMER_TCN)); | 86 | tcn = __raw_readw(TA(MCFTIMER_TCN)); |
79 | trr = __raw_readw(TA(MCFTIMER_TRR)); | 87 | trr = __raw_readtrr(TA(MCFTIMER_TRR)); |
80 | offset = (tcn * (1000000 / HZ)) / trr; | 88 | offset = (tcn * (1000000 / HZ)) / trr; |
81 | 89 | ||
82 | /* Check if we just wrapped the counters and maybe missed a tick */ | 90 | /* Check if we just wrapped the counters and maybe missed a tick */ |
@@ -120,7 +128,7 @@ void coldfire_profile_init(void) | |||
120 | /* Set up TIMER 2 as high speed profile clock */ | 128 | /* Set up TIMER 2 as high speed profile clock */ |
121 | __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); | 129 | __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); |
122 | 130 | ||
123 | __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); | 131 | __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); |
124 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 132 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
125 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); | 133 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); |
126 | 134 | ||
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c index c5482e3622eb..1b36f6261764 100644 --- a/arch/m68knommu/platform/68360/config.c +++ b/arch/m68knommu/platform/68360/config.c | |||
@@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp, | |||
114 | { | 114 | { |
115 | } | 115 | } |
116 | 116 | ||
117 | int BSP_hwclk(int op, struct hwclk_time *t) | 117 | int BSP_hwclk(int op, struct rtc_time *t) |
118 | { | 118 | { |
119 | if (!op) { | 119 | if (!op) { |
120 | /* read */ | 120 | /* read */ |
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index f06a144c7881..2c82412b9efe 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -319,7 +319,7 @@ static void sp_cleanup(void) | |||
319 | static int channel_open = 0; | 319 | static int channel_open = 0; |
320 | 320 | ||
321 | /* the work handler */ | 321 | /* the work handler */ |
322 | static void sp_work(void *data) | 322 | static void sp_work(struct work_struct *unused) |
323 | { | 323 | { |
324 | if (!channel_open) { | 324 | if (!channel_open) { |
325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { | 325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { |
@@ -354,7 +354,7 @@ static void startwork(int vpe) | |||
354 | return; | 354 | return; |
355 | } | 355 | } |
356 | 356 | ||
357 | INIT_WORK(&work, sp_work, NULL); | 357 | INIT_WORK(&work, sp_work); |
358 | queue_work(workqueue, &work); | 358 | queue_work(workqueue, &work); |
359 | } else | 359 | } else |
360 | queue_work(workqueue, &work); | 360 | queue_work(workqueue, &work); |
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 31bcdae84823..0e837762cc5b 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c | |||
@@ -14,7 +14,7 @@ static unsigned long avr_clock; | |||
14 | 14 | ||
15 | static struct work_struct wd_work; | 15 | static struct work_struct wd_work; |
16 | 16 | ||
17 | static void wd_stop(void *unused) | 17 | static void wd_stop(struct work_struct *unused) |
18 | { | 18 | { |
19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; | 19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; |
20 | int i = 0, rescue = 8; | 20 | int i = 0, rescue = 8; |
@@ -122,7 +122,7 @@ static int __init ls_uarts_init(void) | |||
122 | 122 | ||
123 | ls_uart_init(); | 123 | ls_uart_init(); |
124 | 124 | ||
125 | INIT_WORK(&wd_work, wd_stop, NULL); | 125 | INIT_WORK(&wd_work, wd_stop); |
126 | schedule_work(&wd_work); | 126 | schedule_work(&wd_work); |
127 | 127 | ||
128 | return 0; | 128 | return 0; |
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index afa593a8544a..c3a89414ddc0 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c | |||
@@ -18,11 +18,11 @@ | |||
18 | 18 | ||
19 | #define OLD_BACKLIGHT_MAX 15 | 19 | #define OLD_BACKLIGHT_MAX 15 |
20 | 20 | ||
21 | static void pmac_backlight_key_worker(void *data); | 21 | static void pmac_backlight_key_worker(struct work_struct *work); |
22 | static void pmac_backlight_set_legacy_worker(void *data); | 22 | static void pmac_backlight_set_legacy_worker(struct work_struct *work); |
23 | 23 | ||
24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); | 24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); |
25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); | 25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); |
26 | 26 | ||
27 | /* Although these variables are used in interrupt context, it makes no sense to | 27 | /* Although these variables are used in interrupt context, it makes no sense to |
28 | * protect them. No user is able to produce enough key events per second and | 28 | * protect them. No user is able to produce enough key events per second and |
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value) | |||
94 | return level; | 94 | return level; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void pmac_backlight_key_worker(void *data) | 97 | static void pmac_backlight_key_worker(struct work_struct *work) |
98 | { | 98 | { |
99 | if (atomic_read(&kernel_backlight_disabled)) | 99 | if (atomic_read(&kernel_backlight_disabled)) |
100 | return; | 100 | return; |
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness) | |||
166 | return error; | 166 | return error; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pmac_backlight_set_legacy_worker(void *data) | 169 | static void pmac_backlight_set_legacy_worker(struct work_struct *work) |
170 | { | 170 | { |
171 | if (atomic_read(&kernel_backlight_disabled)) | 171 | if (atomic_read(&kernel_backlight_disabled)) |
172 | return; | 172 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 137077451316..49037edf7d39 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -37,8 +37,8 @@ | |||
37 | /* EEH event workqueue setup. */ | 37 | /* EEH event workqueue setup. */ |
38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); | 38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); |
39 | LIST_HEAD(eeh_eventlist); | 39 | LIST_HEAD(eeh_eventlist); |
40 | static void eeh_thread_launcher(void *); | 40 | static void eeh_thread_launcher(struct work_struct *); |
41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); | 41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); |
42 | 42 | ||
43 | /* Serialize reset sequences for a given pci device */ | 43 | /* Serialize reset sequences for a given pci device */ |
44 | DEFINE_MUTEX(eeh_event_mutex); | 44 | DEFINE_MUTEX(eeh_event_mutex); |
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy) | |||
103 | * eeh_thread_launcher | 103 | * eeh_thread_launcher |
104 | * @dummy - unused | 104 | * @dummy - unused |
105 | */ | 105 | */ |
106 | static void eeh_thread_launcher(void *dummy) | 106 | static void eeh_thread_launcher(struct work_struct *dummy) |
107 | { | 107 | { |
108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) | 108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) |
109 | printk(KERN_ERR "Failed to start EEH daemon\n"); | 109 | printk(KERN_ERR "Failed to start EEH daemon\n"); |
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c index 2e1943e27819..709952c25f29 100644 --- a/arch/ppc/8260_io/fcc_enet.c +++ b/arch/ppc/8260_io/fcc_enet.c | |||
@@ -385,6 +385,7 @@ struct fcc_enet_private { | |||
385 | phy_info_t *phy; | 385 | phy_info_t *phy; |
386 | struct work_struct phy_relink; | 386 | struct work_struct phy_relink; |
387 | struct work_struct phy_display_config; | 387 | struct work_struct phy_display_config; |
388 | struct net_device *dev; | ||
388 | 389 | ||
389 | uint sequence_done; | 390 | uint sequence_done; |
390 | 391 | ||
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = { | |||
1391 | NULL | 1392 | NULL |
1392 | }; | 1393 | }; |
1393 | 1394 | ||
1394 | static void mii_display_status(void *data) | 1395 | static void mii_display_status(struct work_struct *work) |
1395 | { | 1396 | { |
1396 | struct net_device *dev = data; | 1397 | volatile struct fcc_enet_private *fep = |
1397 | volatile struct fcc_enet_private *fep = dev->priv; | 1398 | container_of(work, struct fcc_enet_private, phy_relink); |
1399 | struct net_device *dev = fep->dev; | ||
1398 | uint s = fep->phy_status; | 1400 | uint s = fep->phy_status; |
1399 | 1401 | ||
1400 | if (!fep->link && !fep->old_link) { | 1402 | if (!fep->link && !fep->old_link) { |
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data) | |||
1428 | printk(".\n"); | 1430 | printk(".\n"); |
1429 | } | 1431 | } |
1430 | 1432 | ||
1431 | static void mii_display_config(void *data) | 1433 | static void mii_display_config(struct work_struct *work) |
1432 | { | 1434 | { |
1433 | struct net_device *dev = data; | 1435 | volatile struct fcc_enet_private *fep = |
1434 | volatile struct fcc_enet_private *fep = dev->priv; | 1436 | container_of(work, struct fcc_enet_private, |
1437 | phy_display_config); | ||
1438 | struct net_device *dev = fep->dev; | ||
1435 | uint s = fep->phy_status; | 1439 | uint s = fep->phy_status; |
1436 | 1440 | ||
1437 | printk("%s: config: auto-negotiation ", dev->name); | 1441 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void) | |||
1758 | cep->phy_id_done = 0; | 1762 | cep->phy_id_done = 0; |
1759 | cep->phy_addr = fip->fc_phyaddr; | 1763 | cep->phy_addr = fip->fc_phyaddr; |
1760 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); | 1764 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); |
1761 | INIT_WORK(&cep->phy_relink, mii_display_status, dev); | 1765 | INIT_WORK(&cep->phy_relink, mii_display_status); |
1762 | INIT_WORK(&cep->phy_display_config, mii_display_config, dev); | 1766 | INIT_WORK(&cep->phy_display_config, mii_display_config); |
1767 | cep->dev = dev; | ||
1763 | #endif /* CONFIG_USE_MDIO */ | 1768 | #endif /* CONFIG_USE_MDIO */ |
1764 | 1769 | ||
1765 | fip++; | 1770 | fip++; |
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c index 2f9fa9e3d331..e6c28fb423b2 100644 --- a/arch/ppc/8xx_io/fec.c +++ b/arch/ppc/8xx_io/fec.c | |||
@@ -173,6 +173,7 @@ struct fec_enet_private { | |||
173 | uint phy_speed; | 173 | uint phy_speed; |
174 | phy_info_t *phy; | 174 | phy_info_t *phy; |
175 | struct work_struct phy_task; | 175 | struct work_struct phy_task; |
176 | struct net_device *dev; | ||
176 | 177 | ||
177 | uint sequence_done; | 178 | uint sequence_done; |
178 | 179 | ||
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev) | |||
1263 | printk(".\n"); | 1264 | printk(".\n"); |
1264 | } | 1265 | } |
1265 | 1266 | ||
1266 | static void mii_display_config(void *priv) | 1267 | static void mii_display_config(struct work_struct *work) |
1267 | { | 1268 | { |
1268 | struct net_device *dev = (struct net_device *)priv; | 1269 | struct fec_enet_private *fep = |
1269 | struct fec_enet_private *fep = dev->priv; | 1270 | container_of(work, struct fec_enet_private, phy_task); |
1271 | struct net_device *dev = fep->dev; | ||
1270 | volatile uint *s = &(fep->phy_status); | 1272 | volatile uint *s = &(fep->phy_status); |
1271 | 1273 | ||
1272 | printk("%s: config: auto-negotiation ", dev->name); | 1274 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv) | |||
1295 | fep->sequence_done = 1; | 1297 | fep->sequence_done = 1; |
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | static void mii_relink(void *priv) | 1300 | static void mii_relink(struct work_struct *work) |
1299 | { | 1301 | { |
1300 | struct net_device *dev = (struct net_device *)priv; | 1302 | struct fec_enet_private *fep = |
1301 | struct fec_enet_private *fep = dev->priv; | 1303 | container_of(work, struct fec_enet_private, phy_task); |
1304 | struct net_device *dev = fep->dev; | ||
1302 | int duplex; | 1305 | int duplex; |
1303 | 1306 | ||
1304 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; | 1307 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; |
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) | |||
1325 | { | 1328 | { |
1326 | struct fec_enet_private *fep = dev->priv; | 1329 | struct fec_enet_private *fep = dev->priv; |
1327 | 1330 | ||
1328 | INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); | 1331 | fep->dev = dev; |
1332 | INIT_WORK(&fep->phy_task, mii_relink); | ||
1329 | schedule_work(&fep->phy_task); | 1333 | schedule_work(&fep->phy_task); |
1330 | } | 1334 | } |
1331 | 1335 | ||
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) | |||
1333 | { | 1337 | { |
1334 | struct fec_enet_private *fep = dev->priv; | 1338 | struct fec_enet_private *fep = dev->priv; |
1335 | 1339 | ||
1336 | INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); | 1340 | fep->dev = dev; |
1341 | INIT_WORK(&fep->phy_task, mii_display_config); | ||
1337 | schedule_work(&fep->phy_task); | 1342 | schedule_work(&fep->phy_task); |
1338 | } | 1343 | } |
1339 | 1344 | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index af1e8fc7d985..67d5cf9cba83 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -92,8 +92,8 @@ static int appldata_timer_active; | |||
92 | * Work queue | 92 | * Work queue |
93 | */ | 93 | */ |
94 | static struct workqueue_struct *appldata_wq; | 94 | static struct workqueue_struct *appldata_wq; |
95 | static void appldata_work_fn(void *data); | 95 | static void appldata_work_fn(struct work_struct *work); |
96 | static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); | 96 | static DECLARE_WORK(appldata_work, appldata_work_fn); |
97 | 97 | ||
98 | 98 | ||
99 | /* | 99 | /* |
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) | |||
125 | * | 125 | * |
126 | * call data gathering function for each (active) module | 126 | * call data gathering function for each (active) module |
127 | */ | 127 | */ |
128 | static void appldata_work_fn(void *data) | 128 | static void appldata_work_fn(struct work_struct *work) |
129 | { | 129 | { |
130 | struct list_head *lh; | 130 | struct list_head *lh; |
131 | struct appldata_ops *ops; | 131 | struct appldata_ops *ops; |
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 3576b3cc505e..7d4190e55654 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c | |||
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans) | |||
638 | return -1; | 638 | return -1; |
639 | } | 639 | } |
640 | 640 | ||
641 | void chan_interrupt(struct list_head *chans, struct work_struct *task, | 641 | void chan_interrupt(struct list_head *chans, struct delayed_work *task, |
642 | struct tty_struct *tty, int irq) | 642 | struct tty_struct *tty, int irq) |
643 | { | 643 | { |
644 | struct list_head *ele, *next; | 644 | struct list_head *ele, *next; |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 7b172160fe04..96f0189327af 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = { | |||
56 | 56 | ||
57 | static LIST_HEAD(mc_requests); | 57 | static LIST_HEAD(mc_requests); |
58 | 58 | ||
59 | static void mc_work_proc(void *unused) | 59 | static void mc_work_proc(struct work_struct *unused) |
60 | { | 60 | { |
61 | struct mconsole_entry *req; | 61 | struct mconsole_entry *req; |
62 | unsigned long flags; | 62 | unsigned long flags; |
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused) | |||
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
75 | static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); | 75 | static DECLARE_WORK(mconsole_work, mc_work_proc); |
76 | 76 | ||
77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) | 77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) |
78 | { | 78 | { |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index ec9eb8bd9432..286bc0b3207f 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id) | |||
99 | * same device, since it tests for (dev->flags & IFF_UP). So | 99 | * same device, since it tests for (dev->flags & IFF_UP). So |
100 | * there's no harm in delaying the device shutdown. */ | 100 | * there's no harm in delaying the device shutdown. */ |
101 | schedule_work(&close_work); | 101 | schedule_work(&close_work); |
102 | #error this is not permitted - close_work will go out of scope | ||
102 | goto out; | 103 | goto out; |
103 | } | 104 | } |
104 | reactivate_fd(lp->fd, UM_ETH_IRQ); | 105 | reactivate_fd(lp->fd, UM_ETH_IRQ); |
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index ce9f3733f73e..6dfe632f1c14 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c | |||
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port) | |||
132 | DECLARE_MUTEX(ports_sem); | 132 | DECLARE_MUTEX(ports_sem); |
133 | struct list_head ports = LIST_HEAD_INIT(ports); | 133 | struct list_head ports = LIST_HEAD_INIT(ports); |
134 | 134 | ||
135 | void port_work_proc(void *unused) | 135 | void port_work_proc(struct work_struct *unused) |
136 | { | 136 | { |
137 | struct port_list *port; | 137 | struct port_list *port; |
138 | struct list_head *ele; | 138 | struct list_head *ele; |
@@ -150,7 +150,7 @@ void port_work_proc(void *unused) | |||
150 | local_irq_restore(flags); | 150 | local_irq_restore(flags); |
151 | } | 151 | } |
152 | 152 | ||
153 | DECLARE_WORK(port_work, port_work_proc, NULL); | 153 | DECLARE_WORK(port_work, port_work_proc); |
154 | 154 | ||
155 | static irqreturn_t port_interrupt(int irq, void *data) | 155 | static irqreturn_t port_interrupt(int irq, void *data) |
156 | { | 156 | { |
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index bbea88801d88..c7587fc39015 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
306 | */ | 306 | */ |
307 | 307 | ||
308 | static int check_interval = 5 * 60; /* 5 minutes */ | 308 | static int check_interval = 5 * 60; /* 5 minutes */ |
309 | static void mcheck_timer(void *data); | 309 | static void mcheck_timer(struct work_struct *work); |
310 | static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); | 310 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); |
311 | 311 | ||
312 | static void mcheck_check_cpu(void *info) | 312 | static void mcheck_check_cpu(void *info) |
313 | { | 313 | { |
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info) | |||
315 | do_machine_check(NULL, 0); | 315 | do_machine_check(NULL, 0); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void mcheck_timer(void *data) | 318 | static void mcheck_timer(struct work_struct *work) |
319 | { | 319 | { |
320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); |
321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); | 321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 62c2e747af58..9800147c4c68 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta | |||
753 | } | 753 | } |
754 | 754 | ||
755 | struct create_idle { | 755 | struct create_idle { |
756 | struct work_struct work; | ||
756 | struct task_struct *idle; | 757 | struct task_struct *idle; |
757 | struct completion done; | 758 | struct completion done; |
758 | int cpu; | 759 | int cpu; |
759 | }; | 760 | }; |
760 | 761 | ||
761 | void do_fork_idle(void *_c_idle) | 762 | void do_fork_idle(struct work_struct *work) |
762 | { | 763 | { |
763 | struct create_idle *c_idle = _c_idle; | 764 | struct create_idle *c_idle = |
765 | container_of(work, struct create_idle, work); | ||
764 | 766 | ||
765 | c_idle->idle = fork_idle(c_idle->cpu); | 767 | c_idle->idle = fork_idle(c_idle->cpu); |
766 | complete(&c_idle->done); | 768 | complete(&c_idle->done); |
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
775 | int timeout; | 777 | int timeout; |
776 | unsigned long start_rip; | 778 | unsigned long start_rip; |
777 | struct create_idle c_idle = { | 779 | struct create_idle c_idle = { |
780 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
778 | .cpu = cpu, | 781 | .cpu = cpu, |
779 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 782 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
780 | }; | 783 | }; |
781 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
782 | 784 | ||
783 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | 785 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
784 | if (!cpu_gdt_descr[cpu].address && | 786 | if (!cpu_gdt_descr[cpu].address && |
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
825 | * thread. | 827 | * thread. |
826 | */ | 828 | */ |
827 | if (!keventd_up() || current_is_keventd()) | 829 | if (!keventd_up() || current_is_keventd()) |
828 | work.func(work.data); | 830 | c_idle.work.func(&c_idle.work); |
829 | else { | 831 | else { |
830 | schedule_work(&work); | 832 | schedule_work(&c_idle.work); |
831 | wait_for_completion(&c_idle.done); | 833 | wait_for_completion(&c_idle.done); |
832 | } | 834 | } |
833 | 835 | ||
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index e3ef544d2cfb..9f05bc9b2dad 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
563 | static unsigned int cpufreq_init = 0; | 563 | static unsigned int cpufreq_init = 0; |
564 | static struct work_struct cpufreq_delayed_get_work; | 564 | static struct work_struct cpufreq_delayed_get_work; |
565 | 565 | ||
566 | static void handle_cpufreq_delayed_get(void *v) | 566 | static void handle_cpufreq_delayed_get(struct work_struct *v) |
567 | { | 567 | { |
568 | unsigned int cpu; | 568 | unsigned int cpu; |
569 | for_each_online_cpu(cpu) { | 569 | for_each_online_cpu(cpu) { |
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = { | |||
639 | 639 | ||
640 | static int __init cpufreq_tsc(void) | 640 | static int __init cpufreq_tsc(void) |
641 | { | 641 | { |
642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, | 643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, |
644 | CPUFREQ_TRANSITION_NOTIFIER)) | 644 | CPUFREQ_TRANSITION_NOTIFIER)) |
645 | cpufreq_init = 1; | 645 | cpufreq_init = 1; |