diff options
558 files changed, 8315 insertions, 5247 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile index db9499adbed4..36526a1e76d7 100644 --- a/Documentation/DocBook/Makefile +++ b/Documentation/DocBook/Makefile | |||
@@ -190,9 +190,13 @@ quiet_cmd_fig2png = FIG2PNG $@ | |||
190 | ### | 190 | ### |
191 | # Help targets as used by the top-level makefile | 191 | # Help targets as used by the top-level makefile |
192 | dochelp: | 192 | dochelp: |
193 | @echo ' Linux kernel internal documentation in different formats:' | 193 | @echo ' Linux kernel internal documentation in different formats:' |
194 | @echo ' xmldocs (XML DocBook), psdocs (Postscript), pdfdocs (PDF)' | 194 | @echo ' htmldocs - HTML' |
195 | @echo ' htmldocs (HTML), mandocs (man pages, use installmandocs to install)' | 195 | @echo ' installmandocs - install man pages generated by mandocs' |
196 | @echo ' mandocs - man pages' | ||
197 | @echo ' pdfdocs - PDF' | ||
198 | @echo ' psdocs - Postscript' | ||
199 | @echo ' xmldocs - XML DocBook' | ||
196 | 200 | ||
197 | ### | 201 | ### |
198 | # Temporary files left by various tools | 202 | # Temporary files left by various tools |
@@ -1,4 +1,4 @@ | |||
1 | Linux kernel release 2.6.xx <http://kernel.org> | 1 | Linux kernel release 2.6.xx <http://kernel.org/> |
2 | 2 | ||
3 | These are the release notes for Linux version 2.6. Read them carefully, | 3 | These are the release notes for Linux version 2.6. Read them carefully, |
4 | as they tell you what this is all about, explain how to install the | 4 | as they tell you what this is all about, explain how to install the |
@@ -22,15 +22,17 @@ ON WHAT HARDWARE DOES IT RUN? | |||
22 | 22 | ||
23 | Although originally developed first for 32-bit x86-based PCs (386 or higher), | 23 | Although originally developed first for 32-bit x86-based PCs (386 or higher), |
24 | today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and | 24 | today Linux also runs on (at least) the Compaq Alpha AXP, Sun SPARC and |
25 | UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, | 25 | UltraSPARC, Motorola 68000, PowerPC, PowerPC64, ARM, Hitachi SuperH, Cell, |
26 | IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS, | 26 | IBM S/390, MIPS, HP PA-RISC, Intel IA-64, DEC VAX, AMD x86-64, AXIS CRIS, |
27 | and Renesas M32R architectures. | 27 | Cris, Xtensa, AVR32 and Renesas M32R architectures. |
28 | 28 | ||
29 | Linux is easily portable to most general-purpose 32- or 64-bit architectures | 29 | Linux is easily portable to most general-purpose 32- or 64-bit architectures |
30 | as long as they have a paged memory management unit (PMMU) and a port of the | 30 | as long as they have a paged memory management unit (PMMU) and a port of the |
31 | GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has | 31 | GNU C compiler (gcc) (part of The GNU Compiler Collection, GCC). Linux has |
32 | also been ported to a number of architectures without a PMMU, although | 32 | also been ported to a number of architectures without a PMMU, although |
33 | functionality is then obviously somewhat limited. | 33 | functionality is then obviously somewhat limited. |
34 | Linux has also been ported to itself. You can now run the kernel as a | ||
35 | userspace application - this is called UserMode Linux (UML). | ||
34 | 36 | ||
35 | DOCUMENTATION: | 37 | DOCUMENTATION: |
36 | 38 | ||
@@ -113,6 +115,7 @@ INSTALLING the kernel: | |||
113 | version 2.6.12.2 and want to jump to 2.6.12.3, you must first | 115 | version 2.6.12.2 and want to jump to 2.6.12.3, you must first |
114 | reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying | 116 | reverse the 2.6.12.2 patch (that is, patch -R) _before_ applying |
115 | the 2.6.12.3 patch. | 117 | the 2.6.12.3 patch. |
118 | You can read more on this in Documentation/applying-patches.txt | ||
116 | 119 | ||
117 | - Make sure you have no stale .o files and dependencies lying around: | 120 | - Make sure you have no stale .o files and dependencies lying around: |
118 | 121 | ||
@@ -161,6 +164,7 @@ CONFIGURING the kernel: | |||
161 | only ask you for the answers to new questions. | 164 | only ask you for the answers to new questions. |
162 | 165 | ||
163 | - Alternate configuration commands are: | 166 | - Alternate configuration commands are: |
167 | "make config" Plain text interface. | ||
164 | "make menuconfig" Text based color menus, radiolists & dialogs. | 168 | "make menuconfig" Text based color menus, radiolists & dialogs. |
165 | "make xconfig" X windows (Qt) based configuration tool. | 169 | "make xconfig" X windows (Qt) based configuration tool. |
166 | "make gconfig" X windows (Gtk) based configuration tool. | 170 | "make gconfig" X windows (Gtk) based configuration tool. |
@@ -303,8 +307,9 @@ IF SOMETHING GOES WRONG: | |||
303 | 307 | ||
304 | - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump | 308 | - If you compiled the kernel with CONFIG_KALLSYMS you can send the dump |
305 | as is, otherwise you will have to use the "ksymoops" program to make | 309 | as is, otherwise you will have to use the "ksymoops" program to make |
306 | sense of the dump. This utility can be downloaded from | 310 | sense of the dump (but compiling with CONFIG_KALLSYMS is usually preferred). |
307 | ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops. | 311 | This utility can be downloaded from |
312 | ftp://ftp.<country>.kernel.org/pub/linux/utils/kernel/ksymoops/ . | ||
308 | Alternately you can do the dump lookup by hand: | 313 | Alternately you can do the dump lookup by hand: |
309 | 314 | ||
310 | - In debugging dumps like the above, it helps enormously if you can | 315 | - In debugging dumps like the above, it helps enormously if you can |
@@ -336,7 +341,7 @@ IF SOMETHING GOES WRONG: | |||
336 | 341 | ||
337 | If you for some reason cannot do the above (you have a pre-compiled | 342 | If you for some reason cannot do the above (you have a pre-compiled |
338 | kernel image or similar), telling me as much about your setup as | 343 | kernel image or similar), telling me as much about your setup as |
339 | possible will help. | 344 | possible will help. Please read the REPORTING-BUGS document for details. |
340 | 345 | ||
341 | - Alternately, you can use gdb on a running kernel. (read-only; i.e. you | 346 | - Alternately, you can use gdb on a running kernel. (read-only; i.e. you |
342 | cannot change values or set break points.) To do this, first compile the | 347 | cannot change values or set break points.) To do this, first compile the |
diff --git a/arch/arm/common/sharpsl_pm.c b/arch/arm/common/sharpsl_pm.c index 605dedf96790..b3599743093b 100644 --- a/arch/arm/common/sharpsl_pm.c +++ b/arch/arm/common/sharpsl_pm.c | |||
@@ -60,16 +60,16 @@ static int sharpsl_ac_check(void); | |||
60 | static int sharpsl_fatal_check(void); | 60 | static int sharpsl_fatal_check(void); |
61 | static int sharpsl_average_value(int ad); | 61 | static int sharpsl_average_value(int ad); |
62 | static void sharpsl_average_clear(void); | 62 | static void sharpsl_average_clear(void); |
63 | static void sharpsl_charge_toggle(void *private_); | 63 | static void sharpsl_charge_toggle(struct work_struct *private_); |
64 | static void sharpsl_battery_thread(void *private_); | 64 | static void sharpsl_battery_thread(struct work_struct *private_); |
65 | 65 | ||
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Variables | 68 | * Variables |
69 | */ | 69 | */ |
70 | struct sharpsl_pm_status sharpsl_pm; | 70 | struct sharpsl_pm_status sharpsl_pm; |
71 | DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL); | 71 | DECLARE_DELAYED_WORK(toggle_charger, sharpsl_charge_toggle); |
72 | DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL); | 72 | DECLARE_DELAYED_WORK(sharpsl_bat, sharpsl_battery_thread); |
73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); | 73 | DEFINE_LED_TRIGGER(sharpsl_charge_led_trigger); |
74 | 74 | ||
75 | 75 | ||
@@ -116,7 +116,7 @@ void sharpsl_battery_kick(void) | |||
116 | EXPORT_SYMBOL(sharpsl_battery_kick); | 116 | EXPORT_SYMBOL(sharpsl_battery_kick); |
117 | 117 | ||
118 | 118 | ||
119 | static void sharpsl_battery_thread(void *private_) | 119 | static void sharpsl_battery_thread(struct work_struct *private_) |
120 | { | 120 | { |
121 | int voltage, percent, apm_status, i = 0; | 121 | int voltage, percent, apm_status, i = 0; |
122 | 122 | ||
@@ -128,7 +128,7 @@ static void sharpsl_battery_thread(void *private_) | |||
128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ | 128 | /* Corgi cannot confirm when battery fully charged so periodically kick! */ |
129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) | 129 | if (!sharpsl_pm.machinfo->batfull_irq && (sharpsl_pm.charge_mode == CHRG_ON) |
130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) | 130 | && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL)) |
131 | schedule_work(&toggle_charger); | 131 | schedule_delayed_work(&toggle_charger, 0); |
132 | 132 | ||
133 | while(1) { | 133 | while(1) { |
134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); | 134 | voltage = sharpsl_pm.machinfo->read_devdata(SHARPSL_BATT_VOLT); |
@@ -212,7 +212,7 @@ static void sharpsl_charge_off(void) | |||
212 | sharpsl_pm_led(SHARPSL_LED_OFF); | 212 | sharpsl_pm_led(SHARPSL_LED_OFF); |
213 | sharpsl_pm.charge_mode = CHRG_OFF; | 213 | sharpsl_pm.charge_mode = CHRG_OFF; |
214 | 214 | ||
215 | schedule_work(&sharpsl_bat); | 215 | schedule_delayed_work(&sharpsl_bat, 0); |
216 | } | 216 | } |
217 | 217 | ||
218 | static void sharpsl_charge_error(void) | 218 | static void sharpsl_charge_error(void) |
@@ -222,7 +222,7 @@ static void sharpsl_charge_error(void) | |||
222 | sharpsl_pm.charge_mode = CHRG_ERROR; | 222 | sharpsl_pm.charge_mode = CHRG_ERROR; |
223 | } | 223 | } |
224 | 224 | ||
225 | static void sharpsl_charge_toggle(void *private_) | 225 | static void sharpsl_charge_toggle(struct work_struct *private_) |
226 | { | 226 | { |
227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); | 227 | dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies); |
228 | 228 | ||
@@ -254,7 +254,7 @@ static void sharpsl_ac_timer(unsigned long data) | |||
254 | else if (sharpsl_pm.charge_mode == CHRG_ON) | 254 | else if (sharpsl_pm.charge_mode == CHRG_ON) |
255 | sharpsl_charge_off(); | 255 | sharpsl_charge_off(); |
256 | 256 | ||
257 | schedule_work(&sharpsl_bat); | 257 | schedule_delayed_work(&sharpsl_bat, 0); |
258 | } | 258 | } |
259 | 259 | ||
260 | 260 | ||
@@ -279,10 +279,10 @@ static void sharpsl_chrg_full_timer(unsigned long data) | |||
279 | sharpsl_charge_off(); | 279 | sharpsl_charge_off(); |
280 | } else if (sharpsl_pm.full_count < 2) { | 280 | } else if (sharpsl_pm.full_count < 2) { |
281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); | 281 | dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n"); |
282 | schedule_work(&toggle_charger); | 282 | schedule_delayed_work(&toggle_charger, 0); |
283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { | 283 | } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) { |
284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); | 284 | dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n"); |
285 | schedule_work(&toggle_charger); | 285 | schedule_delayed_work(&toggle_charger, 0); |
286 | } else { | 286 | } else { |
287 | sharpsl_charge_off(); | 287 | sharpsl_charge_off(); |
288 | sharpsl_pm.charge_mode = CHRG_DONE; | 288 | sharpsl_pm.charge_mode = CHRG_DONE; |
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c index f225a083dee1..9d2346fb68f4 100644 --- a/arch/arm/mach-omap1/board-h3.c +++ b/arch/arm/mach-omap1/board-h3.c | |||
@@ -323,7 +323,8 @@ static int h3_transceiver_mode(struct device *dev, int mode) | |||
323 | 323 | ||
324 | cancel_delayed_work(&irda_config->gpio_expa); | 324 | cancel_delayed_work(&irda_config->gpio_expa); |
325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 325 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
326 | schedule_work(&irda_config->gpio_expa); | 326 | #error this is not permitted - mode is an argument variable |
327 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
327 | 328 | ||
328 | return 0; | 329 | return 0; |
329 | } | 330 | } |
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c index dbc555d209ff..cbe909bad79b 100644 --- a/arch/arm/mach-omap1/board-nokia770.c +++ b/arch/arm/mach-omap1/board-nokia770.c | |||
@@ -74,7 +74,7 @@ static struct omap_kp_platform_data nokia770_kp_data = { | |||
74 | .rows = 8, | 74 | .rows = 8, |
75 | .cols = 8, | 75 | .cols = 8, |
76 | .keymap = nokia770_keymap, | 76 | .keymap = nokia770_keymap, |
77 | .keymapsize = ARRAY_SIZE(nokia770_keymap) | 77 | .keymapsize = ARRAY_SIZE(nokia770_keymap), |
78 | .delay = 4, | 78 | .delay = 4, |
79 | }; | 79 | }; |
80 | 80 | ||
@@ -191,7 +191,7 @@ static void nokia770_audio_pwr_up(void) | |||
191 | printk("HP connected\n"); | 191 | printk("HP connected\n"); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void codec_delayed_power_down(void *arg) | 194 | static void codec_delayed_power_down(struct work_struct *work) |
195 | { | 195 | { |
196 | down(&audio_pwr_sem); | 196 | down(&audio_pwr_sem); |
197 | if (audio_pwr_state == -1) | 197 | if (audio_pwr_state == -1) |
@@ -200,7 +200,7 @@ static void codec_delayed_power_down(void *arg) | |||
200 | up(&audio_pwr_sem); | 200 | up(&audio_pwr_sem); |
201 | } | 201 | } |
202 | 202 | ||
203 | static DECLARE_WORK(codec_power_down_work, codec_delayed_power_down, NULL); | 203 | static DECLARE_DELAYED_WORK(codec_power_down_work, codec_delayed_power_down); |
204 | 204 | ||
205 | static void nokia770_audio_pwr_down(void) | 205 | static void nokia770_audio_pwr_down(void) |
206 | { | 206 | { |
diff --git a/arch/arm/mach-omap1/leds-osk.c b/arch/arm/mach-omap1/leds-osk.c index 3b29e59b0e6f..0cbf1b0071f8 100644 --- a/arch/arm/mach-omap1/leds-osk.c +++ b/arch/arm/mach-omap1/leds-osk.c | |||
@@ -35,7 +35,7 @@ static u8 hw_led_state; | |||
35 | 35 | ||
36 | static u8 tps_leds_change; | 36 | static u8 tps_leds_change; |
37 | 37 | ||
38 | static void tps_work(void *unused) | 38 | static void tps_work(struct work_struct *unused) |
39 | { | 39 | { |
40 | for (;;) { | 40 | for (;;) { |
41 | u8 leds; | 41 | u8 leds; |
@@ -61,7 +61,7 @@ static void tps_work(void *unused) | |||
61 | } | 61 | } |
62 | } | 62 | } |
63 | 63 | ||
64 | static DECLARE_WORK(work, tps_work, NULL); | 64 | static DECLARE_WORK(work, tps_work); |
65 | 65 | ||
66 | #ifdef CONFIG_OMAP_OSK_MISTRAL | 66 | #ifdef CONFIG_OMAP_OSK_MISTRAL |
67 | 67 | ||
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c index 26a95a642ad7..3b1ad1d981a3 100644 --- a/arch/arm/mach-omap2/board-h4.c +++ b/arch/arm/mach-omap2/board-h4.c | |||
@@ -206,7 +206,8 @@ static int h4_transceiver_mode(struct device *dev, int mode) | |||
206 | 206 | ||
207 | cancel_delayed_work(&irda_config->gpio_expa); | 207 | cancel_delayed_work(&irda_config->gpio_expa); |
208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); | 208 | PREPARE_WORK(&irda_config->gpio_expa, set_trans_mode, &mode); |
209 | schedule_work(&irda_config->gpio_expa); | 209 | #error this is not permitted - mode is an argument variable |
210 | schedule_delayed_work(&irda_config->gpio_expa, 0); | ||
210 | 211 | ||
211 | return 0; | 212 | return 0; |
212 | } | 213 | } |
diff --git a/arch/arm/mach-pxa/akita-ioexp.c b/arch/arm/mach-pxa/akita-ioexp.c index 1b398742ab56..12d2fe0ceff6 100644 --- a/arch/arm/mach-pxa/akita-ioexp.c +++ b/arch/arm/mach-pxa/akita-ioexp.c | |||
@@ -36,11 +36,11 @@ I2C_CLIENT_INSMOD; | |||
36 | 36 | ||
37 | static int max7310_write(struct i2c_client *client, int address, int data); | 37 | static int max7310_write(struct i2c_client *client, int address, int data); |
38 | static struct i2c_client max7310_template; | 38 | static struct i2c_client max7310_template; |
39 | static void akita_ioexp_work(void *private_); | 39 | static void akita_ioexp_work(struct work_struct *private_); |
40 | 40 | ||
41 | static struct device *akita_ioexp_device; | 41 | static struct device *akita_ioexp_device; |
42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; | 42 | static unsigned char ioexp_output_value = AKITA_IOEXP_IO_OUT; |
43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work, NULL); | 43 | DECLARE_WORK(akita_ioexp, akita_ioexp_work); |
44 | 44 | ||
45 | 45 | ||
46 | /* | 46 | /* |
@@ -158,7 +158,7 @@ void akita_reset_ioexp(struct device *dev, unsigned char bit) | |||
158 | EXPORT_SYMBOL(akita_set_ioexp); | 158 | EXPORT_SYMBOL(akita_set_ioexp); |
159 | EXPORT_SYMBOL(akita_reset_ioexp); | 159 | EXPORT_SYMBOL(akita_reset_ioexp); |
160 | 160 | ||
161 | static void akita_ioexp_work(void *private_) | 161 | static void akita_ioexp_work(struct work_struct *private_) |
162 | { | 162 | { |
163 | if (akita_ioexp_device) | 163 | if (akita_ioexp_device) |
164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); | 164 | max7310_set_ouputs(akita_ioexp_device, ioexp_output_value); |
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c index 1f9153ae5b03..6b5d3518a1c0 100644 --- a/arch/i386/kernel/cpu/mcheck/non-fatal.c +++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c | |||
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info) | |||
51 | } | 51 | } |
52 | } | 52 | } |
53 | 53 | ||
54 | static void mce_work_fn(void *data); | 54 | static void mce_work_fn(struct work_struct *work); |
55 | static DECLARE_WORK(mce_work, mce_work_fn, NULL); | 55 | static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); |
56 | 56 | ||
57 | static void mce_work_fn(void *data) | 57 | static void mce_work_fn(struct work_struct *work) |
58 | { | 58 | { |
59 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 59 | on_each_cpu(mce_checkregs, NULL, 1, 1); |
60 | schedule_delayed_work(&mce_work, MCE_RATE); | 60 | schedule_delayed_work(&mce_work, MCE_RATE); |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 4bb8b77cd65b..02a9b66b6ac3 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -1049,13 +1049,15 @@ void cpu_exit_clear(void) | |||
1049 | 1049 | ||
1050 | struct warm_boot_cpu_info { | 1050 | struct warm_boot_cpu_info { |
1051 | struct completion *complete; | 1051 | struct completion *complete; |
1052 | struct work_struct task; | ||
1052 | int apicid; | 1053 | int apicid; |
1053 | int cpu; | 1054 | int cpu; |
1054 | }; | 1055 | }; |
1055 | 1056 | ||
1056 | static void __cpuinit do_warm_boot_cpu(void *p) | 1057 | static void __cpuinit do_warm_boot_cpu(struct work_struct *work) |
1057 | { | 1058 | { |
1058 | struct warm_boot_cpu_info *info = p; | 1059 | struct warm_boot_cpu_info *info = |
1060 | container_of(work, struct warm_boot_cpu_info, task); | ||
1059 | do_boot_cpu(info->apicid, info->cpu); | 1061 | do_boot_cpu(info->apicid, info->cpu); |
1060 | complete(info->complete); | 1062 | complete(info->complete); |
1061 | } | 1063 | } |
@@ -1064,7 +1066,6 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1064 | { | 1066 | { |
1065 | DECLARE_COMPLETION_ONSTACK(done); | 1067 | DECLARE_COMPLETION_ONSTACK(done); |
1066 | struct warm_boot_cpu_info info; | 1068 | struct warm_boot_cpu_info info; |
1067 | struct work_struct task; | ||
1068 | int apicid, ret; | 1069 | int apicid, ret; |
1069 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); | 1070 | struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); |
1070 | 1071 | ||
@@ -1089,7 +1090,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1089 | info.complete = &done; | 1090 | info.complete = &done; |
1090 | info.apicid = apicid; | 1091 | info.apicid = apicid; |
1091 | info.cpu = cpu; | 1092 | info.cpu = cpu; |
1092 | INIT_WORK(&task, do_warm_boot_cpu, &info); | 1093 | INIT_WORK(&info.task, do_warm_boot_cpu); |
1093 | 1094 | ||
1094 | tsc_sync_disabled = 1; | 1095 | tsc_sync_disabled = 1; |
1095 | 1096 | ||
@@ -1097,7 +1098,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu) | |||
1097 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | 1098 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, |
1098 | KERNEL_PGD_PTRS); | 1099 | KERNEL_PGD_PTRS); |
1099 | flush_tlb_all(); | 1100 | flush_tlb_all(); |
1100 | schedule_work(&task); | 1101 | schedule_work(&info.task); |
1101 | wait_for_completion(&done); | 1102 | wait_for_completion(&done); |
1102 | 1103 | ||
1103 | tsc_sync_disabled = 0; | 1104 | tsc_sync_disabled = 0; |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index fbc95828cd74..9810c8c90750 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -217,7 +217,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
217 | static unsigned int cpufreq_init = 0; | 217 | static unsigned int cpufreq_init = 0; |
218 | static struct work_struct cpufreq_delayed_get_work; | 218 | static struct work_struct cpufreq_delayed_get_work; |
219 | 219 | ||
220 | static void handle_cpufreq_delayed_get(void *v) | 220 | static void handle_cpufreq_delayed_get(struct work_struct *work) |
221 | { | 221 | { |
222 | unsigned int cpu; | 222 | unsigned int cpu; |
223 | 223 | ||
@@ -306,7 +306,7 @@ static int __init cpufreq_tsc(void) | |||
306 | { | 306 | { |
307 | int ret; | 307 | int ret; |
308 | 308 | ||
309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 309 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, | 310 | ret = cpufreq_register_notifier(&time_cpufreq_notifier_block, |
311 | CPUFREQ_TRANSITION_NOTIFIER); | 311 | CPUFREQ_TRANSITION_NOTIFIER); |
312 | if (!ret) | 312 | if (!ret) |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index caab986af70c..b62f0c4d2c7c 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -209,7 +209,7 @@ static void do_serial_bh(void) | |||
209 | } | 209 | } |
210 | #endif | 210 | #endif |
211 | 211 | ||
212 | static void do_softint(void *private_) | 212 | static void do_softint(struct work_struct *private_) |
213 | { | 213 | { |
214 | printk(KERN_ERR "simserial: do_softint called\n"); | 214 | printk(KERN_ERR "simserial: do_softint called\n"); |
215 | } | 215 | } |
@@ -698,7 +698,7 @@ static int get_async_struct(int line, struct async_struct **ret_info) | |||
698 | info->flags = sstate->flags; | 698 | info->flags = sstate->flags; |
699 | info->xmit_fifo_size = sstate->xmit_fifo_size; | 699 | info->xmit_fifo_size = sstate->xmit_fifo_size; |
700 | info->line = line; | 700 | info->line = line; |
701 | INIT_WORK(&info->work, do_softint, info); | 701 | INIT_WORK(&info->work, do_softint); |
702 | info->state = sstate; | 702 | info->state = sstate; |
703 | if (sstate->info) { | 703 | if (sstate->info) { |
704 | kfree(info); | 704 | kfree(info); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7cfa63a98cb3..6bedd97570ca 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -678,7 +678,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
678 | * disable the cmc interrupt vector. | 678 | * disable the cmc interrupt vector. |
679 | */ | 679 | */ |
680 | static void | 680 | static void |
681 | ia64_mca_cmc_vector_disable_keventd(void *unused) | 681 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
682 | { | 682 | { |
683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 683 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); |
684 | } | 684 | } |
@@ -690,7 +690,7 @@ ia64_mca_cmc_vector_disable_keventd(void *unused) | |||
690 | * enable the cmc interrupt vector. | 690 | * enable the cmc interrupt vector. |
691 | */ | 691 | */ |
692 | static void | 692 | static void |
693 | ia64_mca_cmc_vector_enable_keventd(void *unused) | 693 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
694 | { | 694 | { |
695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 695 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); |
696 | } | 696 | } |
@@ -1247,8 +1247,8 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1247 | monarch_cpu = -1; | 1247 | monarch_cpu = -1; |
1248 | } | 1248 | } |
1249 | 1249 | ||
1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd, NULL); | 1250 | static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd); |
1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd, NULL); | 1251 | static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd); |
1252 | 1252 | ||
1253 | /* | 1253 | /* |
1254 | * ia64_mca_cmc_int_handler | 1254 | * ia64_mca_cmc_int_handler |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index f7d7f5668144..b21ddecea943 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -463,15 +463,17 @@ struct pt_regs * __devinit idle_regs(struct pt_regs *regs) | |||
463 | } | 463 | } |
464 | 464 | ||
465 | struct create_idle { | 465 | struct create_idle { |
466 | struct work_struct work; | ||
466 | struct task_struct *idle; | 467 | struct task_struct *idle; |
467 | struct completion done; | 468 | struct completion done; |
468 | int cpu; | 469 | int cpu; |
469 | }; | 470 | }; |
470 | 471 | ||
471 | void | 472 | void |
472 | do_fork_idle(void *_c_idle) | 473 | do_fork_idle(struct work_struct *work) |
473 | { | 474 | { |
474 | struct create_idle *c_idle = _c_idle; | 475 | struct create_idle *c_idle = |
476 | container_of(work, struct create_idle, work); | ||
475 | 477 | ||
476 | c_idle->idle = fork_idle(c_idle->cpu); | 478 | c_idle->idle = fork_idle(c_idle->cpu); |
477 | complete(&c_idle->done); | 479 | complete(&c_idle->done); |
@@ -482,10 +484,10 @@ do_boot_cpu (int sapicid, int cpu) | |||
482 | { | 484 | { |
483 | int timeout; | 485 | int timeout; |
484 | struct create_idle c_idle = { | 486 | struct create_idle c_idle = { |
487 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
485 | .cpu = cpu, | 488 | .cpu = cpu, |
486 | .done = COMPLETION_INITIALIZER(c_idle.done), | 489 | .done = COMPLETION_INITIALIZER(c_idle.done), |
487 | }; | 490 | }; |
488 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
489 | 491 | ||
490 | c_idle.idle = get_idle_for_cpu(cpu); | 492 | c_idle.idle = get_idle_for_cpu(cpu); |
491 | if (c_idle.idle) { | 493 | if (c_idle.idle) { |
@@ -497,9 +499,9 @@ do_boot_cpu (int sapicid, int cpu) | |||
497 | * We can't use kernel_thread since we must avoid to reschedule the child. | 499 | * We can't use kernel_thread since we must avoid to reschedule the child. |
498 | */ | 500 | */ |
499 | if (!keventd_up() || current_is_keventd()) | 501 | if (!keventd_up() || current_is_keventd()) |
500 | work.func(work.data); | 502 | c_idle.work.func(&c_idle.work); |
501 | else { | 503 | else { |
502 | schedule_work(&work); | 504 | schedule_work(&c_idle.work); |
503 | wait_for_completion(&c_idle.done); | 505 | wait_for_completion(&c_idle.done); |
504 | } | 506 | } |
505 | 507 | ||
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index f4edfbf27134..eb92cef9cd0d 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -564,8 +564,8 @@ pcibios_enable_device (struct pci_dev *dev, int mask) | |||
564 | void | 564 | void |
565 | pcibios_disable_device (struct pci_dev *dev) | 565 | pcibios_disable_device (struct pci_dev *dev) |
566 | { | 566 | { |
567 | if (dev->is_enabled) | 567 | BUG_ON(atomic_read(&dev->enable_cnt)); |
568 | acpi_pci_irq_disable(dev); | 568 | acpi_pci_irq_disable(dev); |
569 | } | 569 | } |
570 | 570 | ||
571 | void | 571 | void |
diff --git a/arch/m68knommu/platform/5307/timers.c b/arch/m68knommu/platform/5307/timers.c index 24781f009337..e5668af19789 100644 --- a/arch/m68knommu/platform/5307/timers.c +++ b/arch/m68knommu/platform/5307/timers.c | |||
@@ -3,7 +3,7 @@ | |||
3 | /* | 3 | /* |
4 | * timers.c -- generic ColdFire hardware timer support. | 4 | * timers.c -- generic ColdFire hardware timer support. |
5 | * | 5 | * |
6 | * Copyright (C) 1999-2003, Greg Ungerer (gerg@snapgear.com) | 6 | * Copyright (C) 1999-2006, Greg Ungerer (gerg@snapgear.com) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | /***************************************************************************/ | 9 | /***************************************************************************/ |
@@ -44,6 +44,14 @@ unsigned int mcf_timerlevel = 5; | |||
44 | extern void mcf_settimericr(int timer, int level); | 44 | extern void mcf_settimericr(int timer, int level); |
45 | extern int mcf_timerirqpending(int timer); | 45 | extern int mcf_timerirqpending(int timer); |
46 | 46 | ||
47 | #if defined(CONFIG_M532x) | ||
48 | #define __raw_readtrr __raw_readl | ||
49 | #define __raw_writetrr __raw_writel | ||
50 | #else | ||
51 | #define __raw_readtrr __raw_readw | ||
52 | #define __raw_writetrr __raw_writew | ||
53 | #endif | ||
54 | |||
47 | /***************************************************************************/ | 55 | /***************************************************************************/ |
48 | 56 | ||
49 | void coldfire_tick(void) | 57 | void coldfire_tick(void) |
@@ -57,7 +65,7 @@ void coldfire_tick(void) | |||
57 | void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) | 65 | void coldfire_timer_init(irqreturn_t (*handler)(int, void *, struct pt_regs *)) |
58 | { | 66 | { |
59 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); | 67 | __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); |
60 | __raw_writew(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); | 68 | __raw_writetrr(((MCF_BUSCLK / 16) / HZ), TA(MCFTIMER_TRR)); |
61 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 69 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
62 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); | 70 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); |
63 | 71 | ||
@@ -76,7 +84,7 @@ unsigned long coldfire_timer_offset(void) | |||
76 | unsigned long trr, tcn, offset; | 84 | unsigned long trr, tcn, offset; |
77 | 85 | ||
78 | tcn = __raw_readw(TA(MCFTIMER_TCN)); | 86 | tcn = __raw_readw(TA(MCFTIMER_TCN)); |
79 | trr = __raw_readw(TA(MCFTIMER_TRR)); | 87 | trr = __raw_readtrr(TA(MCFTIMER_TRR)); |
80 | offset = (tcn * (1000000 / HZ)) / trr; | 88 | offset = (tcn * (1000000 / HZ)) / trr; |
81 | 89 | ||
82 | /* Check if we just wrapped the counters and maybe missed a tick */ | 90 | /* Check if we just wrapped the counters and maybe missed a tick */ |
@@ -120,7 +128,7 @@ void coldfire_profile_init(void) | |||
120 | /* Set up TIMER 2 as high speed profile clock */ | 128 | /* Set up TIMER 2 as high speed profile clock */ |
121 | __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); | 129 | __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); |
122 | 130 | ||
123 | __raw_writew(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); | 131 | __raw_writetrr(((MCF_CLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); |
124 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | | 132 | __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | |
125 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); | 133 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); |
126 | 134 | ||
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c index c5482e3622eb..1b36f6261764 100644 --- a/arch/m68knommu/platform/68360/config.c +++ b/arch/m68knommu/platform/68360/config.c | |||
@@ -114,7 +114,7 @@ void BSP_gettod (int *yearp, int *monp, int *dayp, | |||
114 | { | 114 | { |
115 | } | 115 | } |
116 | 116 | ||
117 | int BSP_hwclk(int op, struct hwclk_time *t) | 117 | int BSP_hwclk(int op, struct rtc_time *t) |
118 | { | 118 | { |
119 | if (!op) { | 119 | if (!op) { |
120 | /* read */ | 120 | /* read */ |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4d64960be035..d8af858fe3f5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -242,6 +242,7 @@ config LASAT | |||
242 | select SYS_SUPPORTS_32BIT_KERNEL | 242 | select SYS_SUPPORTS_32BIT_KERNEL |
243 | select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL | 243 | select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL |
244 | select SYS_SUPPORTS_LITTLE_ENDIAN | 244 | select SYS_SUPPORTS_LITTLE_ENDIAN |
245 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
245 | 246 | ||
246 | config MIPS_ATLAS | 247 | config MIPS_ATLAS |
247 | bool "MIPS Atlas board" | 248 | bool "MIPS Atlas board" |
@@ -265,6 +266,7 @@ config MIPS_ATLAS | |||
265 | select SYS_SUPPORTS_BIG_ENDIAN | 266 | select SYS_SUPPORTS_BIG_ENDIAN |
266 | select SYS_SUPPORTS_LITTLE_ENDIAN | 267 | select SYS_SUPPORTS_LITTLE_ENDIAN |
267 | select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL | 268 | select SYS_SUPPORTS_MULTITHREADING if EXPERIMENTAL |
269 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
268 | help | 270 | help |
269 | This enables support for the MIPS Technologies Atlas evaluation | 271 | This enables support for the MIPS Technologies Atlas evaluation |
270 | board. | 272 | board. |
@@ -419,6 +421,7 @@ config MOMENCO_OCELOT_C | |||
419 | select SYS_SUPPORTS_32BIT_KERNEL | 421 | select SYS_SUPPORTS_32BIT_KERNEL |
420 | select SYS_SUPPORTS_64BIT_KERNEL | 422 | select SYS_SUPPORTS_64BIT_KERNEL |
421 | select SYS_SUPPORTS_BIG_ENDIAN | 423 | select SYS_SUPPORTS_BIG_ENDIAN |
424 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
422 | help | 425 | help |
423 | The Ocelot is a MIPS-based Single Board Computer (SBC) made by | 426 | The Ocelot is a MIPS-based Single Board Computer (SBC) made by |
424 | Momentum Computer <http://www.momenco.com/>. | 427 | Momentum Computer <http://www.momenco.com/>. |
@@ -569,6 +572,7 @@ config SGI_IP27 | |||
569 | select SYS_SUPPORTS_BIG_ENDIAN | 572 | select SYS_SUPPORTS_BIG_ENDIAN |
570 | select SYS_SUPPORTS_NUMA | 573 | select SYS_SUPPORTS_NUMA |
571 | select SYS_SUPPORTS_SMP | 574 | select SYS_SUPPORTS_SMP |
575 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
572 | help | 576 | help |
573 | This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics | 577 | This are the SGI Origin 200, Origin 2000 and Onyx 2 Graphics |
574 | workstations. To compile a Linux kernel that runs on these, say Y | 578 | workstations. To compile a Linux kernel that runs on these, say Y |
@@ -835,6 +839,10 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER | |||
835 | bool | 839 | bool |
836 | default y | 840 | default y |
837 | 841 | ||
842 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
843 | bool | ||
844 | default n | ||
845 | |||
838 | # | 846 | # |
839 | # Select some configuration options automatically based on user selections. | 847 | # Select some configuration options automatically based on user selections. |
840 | # | 848 | # |
@@ -996,6 +1004,7 @@ config SOC_PNX8550 | |||
996 | select HW_HAS_PCI | 1004 | select HW_HAS_PCI |
997 | select SYS_HAS_CPU_MIPS32_R1 | 1005 | select SYS_HAS_CPU_MIPS32_R1 |
998 | select SYS_SUPPORTS_32BIT_KERNEL | 1006 | select SYS_SUPPORTS_32BIT_KERNEL |
1007 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
999 | 1008 | ||
1000 | config SWAP_IO_SPACE | 1009 | config SWAP_IO_SPACE |
1001 | bool | 1010 | bool |
diff --git a/arch/mips/dec/ecc-berr.c b/arch/mips/dec/ecc-berr.c index c8430c07355e..6d55e8aab668 100644 --- a/arch/mips/dec/ecc-berr.c +++ b/arch/mips/dec/ecc-berr.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/cpu.h> | 25 | #include <asm/cpu.h> |
26 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
27 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
28 | #include <asm/ptrace.h> | ||
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
29 | #include <asm/traps.h> | 30 | #include <asm/traps.h> |
30 | 31 | ||
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c index 269b22b34313..4c7cb4048d35 100644 --- a/arch/mips/dec/ioasic-irq.c +++ b/arch/mips/dec/ioasic-irq.c | |||
@@ -67,7 +67,6 @@ static struct irq_chip ioasic_irq_type = { | |||
67 | .mask = mask_ioasic_irq, | 67 | .mask = mask_ioasic_irq, |
68 | .mask_ack = ack_ioasic_irq, | 68 | .mask_ack = ack_ioasic_irq, |
69 | .unmask = unmask_ioasic_irq, | 69 | .unmask = unmask_ioasic_irq, |
70 | .end = end_ioasic_irq, | ||
71 | }; | 70 | }; |
72 | 71 | ||
73 | 72 | ||
@@ -106,8 +105,7 @@ void __init init_ioasic_irqs(int base) | |||
106 | set_irq_chip_and_handler(i, &ioasic_irq_type, | 105 | set_irq_chip_and_handler(i, &ioasic_irq_type, |
107 | handle_level_irq); | 106 | handle_level_irq); |
108 | for (; i < base + IO_IRQ_LINES; i++) | 107 | for (; i < base + IO_IRQ_LINES; i++) |
109 | set_irq_chip_and_handler(i, &ioasic_dma_irq_type, | 108 | set_irq_chip(i, &ioasic_dma_irq_type); |
110 | handle_level_irq); | ||
111 | 109 | ||
112 | ioasic_irq_base = base; | 110 | ioasic_irq_base = base; |
113 | } | 111 | } |
diff --git a/arch/mips/dec/kn01-berr.c b/arch/mips/dec/kn01-berr.c index f19b4617a0a6..d3b8002bf1e7 100644 --- a/arch/mips/dec/kn01-berr.c +++ b/arch/mips/dec/kn01-berr.c | |||
@@ -20,8 +20,10 @@ | |||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | 21 | ||
22 | #include <asm/inst.h> | 22 | #include <asm/inst.h> |
23 | #include <asm/irq_regs.h> | ||
23 | #include <asm/mipsregs.h> | 24 | #include <asm/mipsregs.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
26 | #include <asm/ptrace.h> | ||
25 | #include <asm/system.h> | 27 | #include <asm/system.h> |
26 | #include <asm/traps.h> | 28 | #include <asm/traps.h> |
27 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
diff --git a/arch/mips/dec/kn02-irq.c b/arch/mips/dec/kn02-irq.c index 5a9be4c93584..916e46b8ccd8 100644 --- a/arch/mips/dec/kn02-irq.c +++ b/arch/mips/dec/kn02-irq.c | |||
@@ -57,19 +57,12 @@ static void ack_kn02_irq(unsigned int irq) | |||
57 | iob(); | 57 | iob(); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void end_kn02_irq(unsigned int irq) | ||
61 | { | ||
62 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
63 | unmask_kn02_irq(irq); | ||
64 | } | ||
65 | |||
66 | static struct irq_chip kn02_irq_type = { | 60 | static struct irq_chip kn02_irq_type = { |
67 | .typename = "KN02-CSR", | 61 | .typename = "KN02-CSR", |
68 | .ack = ack_kn02_irq, | 62 | .ack = ack_kn02_irq, |
69 | .mask = mask_kn02_irq, | 63 | .mask = mask_kn02_irq, |
70 | .mask_ack = ack_kn02_irq, | 64 | .mask_ack = ack_kn02_irq, |
71 | .unmask = unmask_kn02_irq, | 65 | .unmask = unmask_kn02_irq, |
72 | .end = end_kn02_irq, | ||
73 | }; | 66 | }; |
74 | 67 | ||
75 | 68 | ||
diff --git a/arch/mips/emma2rh/common/irq_emma2rh.c b/arch/mips/emma2rh/common/irq_emma2rh.c index 59b98299c896..8d880f0b06ec 100644 --- a/arch/mips/emma2rh/common/irq_emma2rh.c +++ b/arch/mips/emma2rh/common/irq_emma2rh.c | |||
@@ -56,19 +56,12 @@ static void emma2rh_irq_disable(unsigned int irq) | |||
56 | ll_emma2rh_irq_disable(irq - emma2rh_irq_base); | 56 | ll_emma2rh_irq_disable(irq - emma2rh_irq_base); |
57 | } | 57 | } |
58 | 58 | ||
59 | static void emma2rh_irq_end(unsigned int irq) | ||
60 | { | ||
61 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
62 | ll_emma2rh_irq_enable(irq - emma2rh_irq_base); | ||
63 | } | ||
64 | |||
65 | struct irq_chip emma2rh_irq_controller = { | 59 | struct irq_chip emma2rh_irq_controller = { |
66 | .typename = "emma2rh_irq", | 60 | .typename = "emma2rh_irq", |
67 | .ack = emma2rh_irq_disable, | 61 | .ack = emma2rh_irq_disable, |
68 | .mask = emma2rh_irq_disable, | 62 | .mask = emma2rh_irq_disable, |
69 | .mask_ack = emma2rh_irq_disable, | 63 | .mask_ack = emma2rh_irq_disable, |
70 | .unmask = emma2rh_irq_enable, | 64 | .unmask = emma2rh_irq_enable, |
71 | .end = emma2rh_irq_end, | ||
72 | }; | 65 | }; |
73 | 66 | ||
74 | void emma2rh_irq_init(u32 irq_base) | 67 | void emma2rh_irq_init(u32 irq_base) |
diff --git a/arch/mips/emma2rh/markeins/irq_markeins.c b/arch/mips/emma2rh/markeins/irq_markeins.c index 3ac4e405ecdc..2116d9be5fa9 100644 --- a/arch/mips/emma2rh/markeins/irq_markeins.c +++ b/arch/mips/emma2rh/markeins/irq_markeins.c | |||
@@ -48,19 +48,12 @@ static void emma2rh_sw_irq_disable(unsigned int irq) | |||
48 | ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base); | 48 | ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void emma2rh_sw_irq_end(unsigned int irq) | ||
52 | { | ||
53 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
54 | ll_emma2rh_sw_irq_enable(irq - emma2rh_sw_irq_base); | ||
55 | } | ||
56 | |||
57 | struct irq_chip emma2rh_sw_irq_controller = { | 51 | struct irq_chip emma2rh_sw_irq_controller = { |
58 | .typename = "emma2rh_sw_irq", | 52 | .typename = "emma2rh_sw_irq", |
59 | .ack = emma2rh_sw_irq_disable, | 53 | .ack = emma2rh_sw_irq_disable, |
60 | .mask = emma2rh_sw_irq_disable, | 54 | .mask = emma2rh_sw_irq_disable, |
61 | .mask_ack = emma2rh_sw_irq_disable, | 55 | .mask_ack = emma2rh_sw_irq_disable, |
62 | .unmask = emma2rh_sw_irq_enable, | 56 | .unmask = emma2rh_sw_irq_enable, |
63 | .end = emma2rh_sw_irq_end, | ||
64 | }; | 57 | }; |
65 | 58 | ||
66 | void emma2rh_sw_irq_init(u32 irq_base) | 59 | void emma2rh_sw_irq_init(u32 irq_base) |
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c index 5c4f50cdf157..f8d417b5c2bb 100644 --- a/arch/mips/jazz/irq.c +++ b/arch/mips/jazz/irq.c | |||
@@ -39,19 +39,12 @@ void disable_r4030_irq(unsigned int irq) | |||
39 | spin_unlock_irqrestore(&r4030_lock, flags); | 39 | spin_unlock_irqrestore(&r4030_lock, flags); |
40 | } | 40 | } |
41 | 41 | ||
42 | static void end_r4030_irq(unsigned int irq) | ||
43 | { | ||
44 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
45 | enable_r4030_irq(irq); | ||
46 | } | ||
47 | |||
48 | static struct irq_chip r4030_irq_type = { | 42 | static struct irq_chip r4030_irq_type = { |
49 | .typename = "R4030", | 43 | .typename = "R4030", |
50 | .ack = disable_r4030_irq, | 44 | .ack = disable_r4030_irq, |
51 | .mask = disable_r4030_irq, | 45 | .mask = disable_r4030_irq, |
52 | .mask_ack = disable_r4030_irq, | 46 | .mask_ack = disable_r4030_irq, |
53 | .unmask = enable_r4030_irq, | 47 | .unmask = enable_r4030_irq, |
54 | .end = end_r4030_irq, | ||
55 | }; | 48 | }; |
56 | 49 | ||
57 | void __init init_r4030_ints(void) | 50 | void __init init_r4030_ints(void) |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 2526c0ca4d81..b59a676c6d0e 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -19,9 +19,6 @@ | |||
19 | #include <asm/i8259.h> | 19 | #include <asm/i8259.h> |
20 | #include <asm/io.h> | 20 | #include <asm/io.h> |
21 | 21 | ||
22 | void enable_8259A_irq(unsigned int irq); | ||
23 | void disable_8259A_irq(unsigned int irq); | ||
24 | |||
25 | /* | 22 | /* |
26 | * This is the 'legacy' 8259A Programmable Interrupt Controller, | 23 | * This is the 'legacy' 8259A Programmable Interrupt Controller, |
27 | * present in the majority of PC/AT boxes. | 24 | * present in the majority of PC/AT boxes. |
@@ -31,23 +28,16 @@ void disable_8259A_irq(unsigned int irq); | |||
31 | * moves to arch independent land | 28 | * moves to arch independent land |
32 | */ | 29 | */ |
33 | 30 | ||
31 | static int i8259A_auto_eoi; | ||
34 | DEFINE_SPINLOCK(i8259A_lock); | 32 | DEFINE_SPINLOCK(i8259A_lock); |
35 | 33 | /* some platforms call this... */ | |
36 | static void end_8259A_irq (unsigned int irq) | ||
37 | { | ||
38 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && | ||
39 | irq_desc[irq].action) | ||
40 | enable_8259A_irq(irq); | ||
41 | } | ||
42 | |||
43 | void mask_and_ack_8259A(unsigned int); | 34 | void mask_and_ack_8259A(unsigned int); |
44 | 35 | ||
45 | static struct irq_chip i8259A_irq_type = { | 36 | static struct irq_chip i8259A_chip = { |
46 | .typename = "XT-PIC", | 37 | .name = "XT-PIC", |
47 | .enable = enable_8259A_irq, | 38 | .mask = disable_8259A_irq, |
48 | .disable = disable_8259A_irq, | 39 | .unmask = enable_8259A_irq, |
49 | .ack = mask_and_ack_8259A, | 40 | .mask_ack = mask_and_ack_8259A, |
50 | .end = end_8259A_irq, | ||
51 | }; | 41 | }; |
52 | 42 | ||
53 | /* | 43 | /* |
@@ -59,8 +49,8 @@ static struct irq_chip i8259A_irq_type = { | |||
59 | */ | 49 | */ |
60 | static unsigned int cached_irq_mask = 0xffff; | 50 | static unsigned int cached_irq_mask = 0xffff; |
61 | 51 | ||
62 | #define cached_21 (cached_irq_mask) | 52 | #define cached_master_mask (cached_irq_mask) |
63 | #define cached_A1 (cached_irq_mask >> 8) | 53 | #define cached_slave_mask (cached_irq_mask >> 8) |
64 | 54 | ||
65 | void disable_8259A_irq(unsigned int irq) | 55 | void disable_8259A_irq(unsigned int irq) |
66 | { | 56 | { |
@@ -70,9 +60,9 @@ void disable_8259A_irq(unsigned int irq) | |||
70 | spin_lock_irqsave(&i8259A_lock, flags); | 60 | spin_lock_irqsave(&i8259A_lock, flags); |
71 | cached_irq_mask |= mask; | 61 | cached_irq_mask |= mask; |
72 | if (irq & 8) | 62 | if (irq & 8) |
73 | outb(cached_A1,0xA1); | 63 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
74 | else | 64 | else |
75 | outb(cached_21,0x21); | 65 | outb(cached_master_mask, PIC_MASTER_IMR); |
76 | spin_unlock_irqrestore(&i8259A_lock, flags); | 66 | spin_unlock_irqrestore(&i8259A_lock, flags); |
77 | } | 67 | } |
78 | 68 | ||
@@ -84,9 +74,9 @@ void enable_8259A_irq(unsigned int irq) | |||
84 | spin_lock_irqsave(&i8259A_lock, flags); | 74 | spin_lock_irqsave(&i8259A_lock, flags); |
85 | cached_irq_mask &= mask; | 75 | cached_irq_mask &= mask; |
86 | if (irq & 8) | 76 | if (irq & 8) |
87 | outb(cached_A1,0xA1); | 77 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
88 | else | 78 | else |
89 | outb(cached_21,0x21); | 79 | outb(cached_master_mask, PIC_MASTER_IMR); |
90 | spin_unlock_irqrestore(&i8259A_lock, flags); | 80 | spin_unlock_irqrestore(&i8259A_lock, flags); |
91 | } | 81 | } |
92 | 82 | ||
@@ -98,9 +88,9 @@ int i8259A_irq_pending(unsigned int irq) | |||
98 | 88 | ||
99 | spin_lock_irqsave(&i8259A_lock, flags); | 89 | spin_lock_irqsave(&i8259A_lock, flags); |
100 | if (irq < 8) | 90 | if (irq < 8) |
101 | ret = inb(0x20) & mask; | 91 | ret = inb(PIC_MASTER_CMD) & mask; |
102 | else | 92 | else |
103 | ret = inb(0xA0) & (mask >> 8); | 93 | ret = inb(PIC_SLAVE_CMD) & (mask >> 8); |
104 | spin_unlock_irqrestore(&i8259A_lock, flags); | 94 | spin_unlock_irqrestore(&i8259A_lock, flags); |
105 | 95 | ||
106 | return ret; | 96 | return ret; |
@@ -109,7 +99,7 @@ int i8259A_irq_pending(unsigned int irq) | |||
109 | void make_8259A_irq(unsigned int irq) | 99 | void make_8259A_irq(unsigned int irq) |
110 | { | 100 | { |
111 | disable_irq_nosync(irq); | 101 | disable_irq_nosync(irq); |
112 | set_irq_chip(irq, &i8259A_irq_type); | 102 | set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); |
113 | enable_irq(irq); | 103 | enable_irq(irq); |
114 | } | 104 | } |
115 | 105 | ||
@@ -125,14 +115,14 @@ static inline int i8259A_irq_real(unsigned int irq) | |||
125 | int irqmask = 1 << irq; | 115 | int irqmask = 1 << irq; |
126 | 116 | ||
127 | if (irq < 8) { | 117 | if (irq < 8) { |
128 | outb(0x0B,0x20); /* ISR register */ | 118 | outb(0x0B,PIC_MASTER_CMD); /* ISR register */ |
129 | value = inb(0x20) & irqmask; | 119 | value = inb(PIC_MASTER_CMD) & irqmask; |
130 | outb(0x0A,0x20); /* back to the IRR register */ | 120 | outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */ |
131 | return value; | 121 | return value; |
132 | } | 122 | } |
133 | outb(0x0B,0xA0); /* ISR register */ | 123 | outb(0x0B,PIC_SLAVE_CMD); /* ISR register */ |
134 | value = inb(0xA0) & (irqmask >> 8); | 124 | value = inb(PIC_SLAVE_CMD) & (irqmask >> 8); |
135 | outb(0x0A,0xA0); /* back to the IRR register */ | 125 | outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */ |
136 | return value; | 126 | return value; |
137 | } | 127 | } |
138 | 128 | ||
@@ -149,17 +139,19 @@ void mask_and_ack_8259A(unsigned int irq) | |||
149 | 139 | ||
150 | spin_lock_irqsave(&i8259A_lock, flags); | 140 | spin_lock_irqsave(&i8259A_lock, flags); |
151 | /* | 141 | /* |
152 | * Lightweight spurious IRQ detection. We do not want to overdo | 142 | * Lightweight spurious IRQ detection. We do not want |
153 | * spurious IRQ handling - it's usually a sign of hardware problems, so | 143 | * to overdo spurious IRQ handling - it's usually a sign |
154 | * we only do the checks we can do without slowing down good hardware | 144 | * of hardware problems, so we only do the checks we can |
155 | * nnecesserily. | 145 | * do without slowing down good hardware unnecessarily. |
156 | * | 146 | * |
157 | * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting | 147 | * Note that IRQ7 and IRQ15 (the two spurious IRQs |
158 | * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A. | 148 | * usually resulting from the 8259A-1|2 PICs) occur |
159 | * Thus we can check spurious 8259A IRQs without doing the quite slow | 149 | * even if the IRQ is masked in the 8259A. Thus we |
160 | * i8259A_irq_real() call for every IRQ. This does not cover 100% of | 150 | * can check spurious 8259A IRQs without doing the |
161 | * spurious interrupts, but should be enough to warn the user that | 151 | * quite slow i8259A_irq_real() call for every IRQ. |
162 | * there is something bad going on ... | 152 | * This does not cover 100% of spurious interrupts, |
153 | * but should be enough to warn the user that there | ||
154 | * is something bad going on ... | ||
163 | */ | 155 | */ |
164 | if (cached_irq_mask & irqmask) | 156 | if (cached_irq_mask & irqmask) |
165 | goto spurious_8259A_irq; | 157 | goto spurious_8259A_irq; |
@@ -167,14 +159,14 @@ void mask_and_ack_8259A(unsigned int irq) | |||
167 | 159 | ||
168 | handle_real_irq: | 160 | handle_real_irq: |
169 | if (irq & 8) { | 161 | if (irq & 8) { |
170 | inb(0xA1); /* DUMMY - (do we need this?) */ | 162 | inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */ |
171 | outb(cached_A1,0xA1); | 163 | outb(cached_slave_mask, PIC_SLAVE_IMR); |
172 | outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */ | 164 | outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */ |
173 | outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ | 165 | outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */ |
174 | } else { | 166 | } else { |
175 | inb(0x21); /* DUMMY - (do we need this?) */ | 167 | inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */ |
176 | outb(cached_21,0x21); | 168 | outb(cached_master_mask, PIC_MASTER_IMR); |
177 | outb(0x60+irq,0x20); /* 'Specific EOI' to master */ | 169 | outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */ |
178 | } | 170 | } |
179 | #ifdef CONFIG_MIPS_MT_SMTC | 171 | #ifdef CONFIG_MIPS_MT_SMTC |
180 | if (irq_hwmask[irq] & ST0_IM) | 172 | if (irq_hwmask[irq] & ST0_IM) |
@@ -195,7 +187,7 @@ spurious_8259A_irq: | |||
195 | goto handle_real_irq; | 187 | goto handle_real_irq; |
196 | 188 | ||
197 | { | 189 | { |
198 | static int spurious_irq_mask = 0; | 190 | static int spurious_irq_mask; |
199 | /* | 191 | /* |
200 | * At this point we can be sure the IRQ is spurious, | 192 | * At this point we can be sure the IRQ is spurious, |
201 | * lets ACK and report it. [once per IRQ] | 193 | * lets ACK and report it. [once per IRQ] |
@@ -216,13 +208,25 @@ spurious_8259A_irq: | |||
216 | 208 | ||
217 | static int i8259A_resume(struct sys_device *dev) | 209 | static int i8259A_resume(struct sys_device *dev) |
218 | { | 210 | { |
219 | init_8259A(0); | 211 | init_8259A(i8259A_auto_eoi); |
212 | return 0; | ||
213 | } | ||
214 | |||
215 | static int i8259A_shutdown(struct sys_device *dev) | ||
216 | { | ||
217 | /* Put the i8259A into a quiescent state that | ||
218 | * the kernel initialization code can get it | ||
219 | * out of. | ||
220 | */ | ||
221 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | ||
222 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ | ||
220 | return 0; | 223 | return 0; |
221 | } | 224 | } |
222 | 225 | ||
223 | static struct sysdev_class i8259_sysdev_class = { | 226 | static struct sysdev_class i8259_sysdev_class = { |
224 | set_kset_name("i8259"), | 227 | set_kset_name("i8259"), |
225 | .resume = i8259A_resume, | 228 | .resume = i8259A_resume, |
229 | .shutdown = i8259A_shutdown, | ||
226 | }; | 230 | }; |
227 | 231 | ||
228 | static struct sys_device device_i8259A = { | 232 | static struct sys_device device_i8259A = { |
@@ -244,41 +248,41 @@ void __init init_8259A(int auto_eoi) | |||
244 | { | 248 | { |
245 | unsigned long flags; | 249 | unsigned long flags; |
246 | 250 | ||
251 | i8259A_auto_eoi = auto_eoi; | ||
252 | |||
247 | spin_lock_irqsave(&i8259A_lock, flags); | 253 | spin_lock_irqsave(&i8259A_lock, flags); |
248 | 254 | ||
249 | outb(0xff, 0x21); /* mask all of 8259A-1 */ | 255 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
250 | outb(0xff, 0xA1); /* mask all of 8259A-2 */ | 256 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ |
251 | 257 | ||
252 | /* | 258 | /* |
253 | * outb_p - this has to work on a wide range of PC hardware. | 259 | * outb_p - this has to work on a wide range of PC hardware. |
254 | */ | 260 | */ |
255 | outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ | 261 | outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */ |
256 | outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */ | 262 | outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */ |
257 | outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ | 263 | outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */ |
258 | if (auto_eoi) | 264 | if (auto_eoi) /* master does Auto EOI */ |
259 | outb_p(0x03, 0x21); /* master does Auto EOI */ | 265 | outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR); |
260 | else | 266 | else /* master expects normal EOI */ |
261 | outb_p(0x01, 0x21); /* master expects normal EOI */ | 267 | outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR); |
262 | 268 | ||
263 | outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ | 269 | outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */ |
264 | outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */ | 270 | outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */ |
265 | outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ | 271 | outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */ |
266 | outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode | 272 | outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */ |
267 | is to be investigated) */ | ||
268 | |||
269 | if (auto_eoi) | 273 | if (auto_eoi) |
270 | /* | 274 | /* |
271 | * in AEOI mode we just have to mask the interrupt | 275 | * In AEOI mode we just have to mask the interrupt |
272 | * when acking. | 276 | * when acking. |
273 | */ | 277 | */ |
274 | i8259A_irq_type.ack = disable_8259A_irq; | 278 | i8259A_chip.mask_ack = disable_8259A_irq; |
275 | else | 279 | else |
276 | i8259A_irq_type.ack = mask_and_ack_8259A; | 280 | i8259A_chip.mask_ack = mask_and_ack_8259A; |
277 | 281 | ||
278 | udelay(100); /* wait for 8259A to initialize */ | 282 | udelay(100); /* wait for 8259A to initialize */ |
279 | 283 | ||
280 | outb(cached_21, 0x21); /* restore master IRQ mask */ | 284 | outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ |
281 | outb(cached_A1, 0xA1); /* restore slave IRQ mask */ | 285 | outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ |
282 | 286 | ||
283 | spin_unlock_irqrestore(&i8259A_lock, flags); | 287 | spin_unlock_irqrestore(&i8259A_lock, flags); |
284 | } | 288 | } |
@@ -291,11 +295,17 @@ static struct irqaction irq2 = { | |||
291 | }; | 295 | }; |
292 | 296 | ||
293 | static struct resource pic1_io_resource = { | 297 | static struct resource pic1_io_resource = { |
294 | .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY | 298 | .name = "pic1", |
299 | .start = PIC_MASTER_CMD, | ||
300 | .end = PIC_MASTER_IMR, | ||
301 | .flags = IORESOURCE_BUSY | ||
295 | }; | 302 | }; |
296 | 303 | ||
297 | static struct resource pic2_io_resource = { | 304 | static struct resource pic2_io_resource = { |
298 | .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY | 305 | .name = "pic2", |
306 | .start = PIC_SLAVE_CMD, | ||
307 | .end = PIC_SLAVE_IMR, | ||
308 | .flags = IORESOURCE_BUSY | ||
299 | }; | 309 | }; |
300 | 310 | ||
301 | /* | 311 | /* |
@@ -313,7 +323,7 @@ void __init init_i8259_irqs (void) | |||
313 | init_8259A(0); | 323 | init_8259A(0); |
314 | 324 | ||
315 | for (i = 0; i < 16; i++) | 325 | for (i = 0; i < 16; i++) |
316 | set_irq_chip(i, &i8259A_irq_type); | 326 | set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); |
317 | 327 | ||
318 | setup_irq(2, &irq2); | 328 | setup_irq(PIC_CASCADE_IR, &irq2); |
319 | } | 329 | } |
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c index 6cfb31cafde2..efbd219845b5 100644 --- a/arch/mips/kernel/irq-mv6434x.c +++ b/arch/mips/kernel/irq-mv6434x.c | |||
@@ -67,15 +67,6 @@ static inline void unmask_mv64340_irq(unsigned int irq) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * End IRQ processing | ||
71 | */ | ||
72 | static void end_mv64340_irq(unsigned int irq) | ||
73 | { | ||
74 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
75 | unmask_mv64340_irq(irq); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Interrupt handler for interrupts coming from the Marvell chip. | 70 | * Interrupt handler for interrupts coming from the Marvell chip. |
80 | * It could be built in ethernet ports etc... | 71 | * It could be built in ethernet ports etc... |
81 | */ | 72 | */ |
@@ -106,7 +97,6 @@ struct irq_chip mv64340_irq_type = { | |||
106 | .mask = mask_mv64340_irq, | 97 | .mask = mask_mv64340_irq, |
107 | .mask_ack = mask_mv64340_irq, | 98 | .mask_ack = mask_mv64340_irq, |
108 | .unmask = unmask_mv64340_irq, | 99 | .unmask = unmask_mv64340_irq, |
109 | .end = end_mv64340_irq, | ||
110 | }; | 100 | }; |
111 | 101 | ||
112 | void __init mv64340_irq_init(unsigned int base) | 102 | void __init mv64340_irq_init(unsigned int base) |
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index ddcc2a5f8a06..123324ba8c14 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -29,19 +29,12 @@ static inline void mask_rm7k_irq(unsigned int irq) | |||
29 | clear_c0_intcontrol(0x100 << (irq - irq_base)); | 29 | clear_c0_intcontrol(0x100 << (irq - irq_base)); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void rm7k_cpu_irq_end(unsigned int irq) | ||
33 | { | ||
34 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
35 | unmask_rm7k_irq(irq); | ||
36 | } | ||
37 | |||
38 | static struct irq_chip rm7k_irq_controller = { | 32 | static struct irq_chip rm7k_irq_controller = { |
39 | .typename = "RM7000", | 33 | .typename = "RM7000", |
40 | .ack = mask_rm7k_irq, | 34 | .ack = mask_rm7k_irq, |
41 | .mask = mask_rm7k_irq, | 35 | .mask = mask_rm7k_irq, |
42 | .mask_ack = mask_rm7k_irq, | 36 | .mask_ack = mask_rm7k_irq, |
43 | .unmask = unmask_rm7k_irq, | 37 | .unmask = unmask_rm7k_irq, |
44 | .end = rm7k_cpu_irq_end, | ||
45 | }; | 38 | }; |
46 | 39 | ||
47 | void __init rm7k_cpu_irq_init(int base) | 40 | void __init rm7k_cpu_irq_init(int base) |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ba6440c88abd..0e6f4c5349d2 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -80,19 +80,12 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | |||
80 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); | 80 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); |
81 | } | 81 | } |
82 | 82 | ||
83 | static void rm9k_cpu_irq_end(unsigned int irq) | ||
84 | { | ||
85 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
86 | unmask_rm9k_irq(irq); | ||
87 | } | ||
88 | |||
89 | static struct irq_chip rm9k_irq_controller = { | 83 | static struct irq_chip rm9k_irq_controller = { |
90 | .typename = "RM9000", | 84 | .typename = "RM9000", |
91 | .ack = mask_rm9k_irq, | 85 | .ack = mask_rm9k_irq, |
92 | .mask = mask_rm9k_irq, | 86 | .mask = mask_rm9k_irq, |
93 | .mask_ack = mask_rm9k_irq, | 87 | .mask_ack = mask_rm9k_irq, |
94 | .unmask = unmask_rm9k_irq, | 88 | .unmask = unmask_rm9k_irq, |
95 | .end = rm9k_cpu_irq_end, | ||
96 | }; | 89 | }; |
97 | 90 | ||
98 | static struct irq_chip rm9k_perfcounter_irq = { | 91 | static struct irq_chip rm9k_perfcounter_irq = { |
@@ -103,7 +96,6 @@ static struct irq_chip rm9k_perfcounter_irq = { | |||
103 | .mask = mask_rm9k_irq, | 96 | .mask = mask_rm9k_irq, |
104 | .mask_ack = mask_rm9k_irq, | 97 | .mask_ack = mask_rm9k_irq, |
105 | .unmask = unmask_rm9k_irq, | 98 | .unmask = unmask_rm9k_irq, |
106 | .end = rm9k_cpu_irq_end, | ||
107 | }; | 99 | }; |
108 | 100 | ||
109 | unsigned int rm9000_perfcount_irq; | 101 | unsigned int rm9000_perfcount_irq; |
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index b339798b3172..2fe4c868a801 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c | |||
@@ -117,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
117 | for_each_online_cpu(j) | 117 | for_each_online_cpu(j) |
118 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 118 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
119 | #endif | 119 | #endif |
120 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | 120 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
121 | seq_printf(p, " %s", action->name); | 121 | seq_printf(p, " %s", action->name); |
122 | 122 | ||
123 | for (action=action->next; action; action = action->next) | 123 | for (action=action->next; action; action = action->next) |
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index be5ac23d3812..fcc86b96ccf6 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c | |||
@@ -50,12 +50,6 @@ static inline void mask_mips_irq(unsigned int irq) | |||
50 | irq_disable_hazard(); | 50 | irq_disable_hazard(); |
51 | } | 51 | } |
52 | 52 | ||
53 | static void mips_cpu_irq_end(unsigned int irq) | ||
54 | { | ||
55 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
56 | unmask_mips_irq(irq); | ||
57 | } | ||
58 | |||
59 | static struct irq_chip mips_cpu_irq_controller = { | 53 | static struct irq_chip mips_cpu_irq_controller = { |
60 | .typename = "MIPS", | 54 | .typename = "MIPS", |
61 | .ack = mask_mips_irq, | 55 | .ack = mask_mips_irq, |
@@ -63,7 +57,6 @@ static struct irq_chip mips_cpu_irq_controller = { | |||
63 | .mask_ack = mask_mips_irq, | 57 | .mask_ack = mask_mips_irq, |
64 | .unmask = unmask_mips_irq, | 58 | .unmask = unmask_mips_irq, |
65 | .eoi = unmask_mips_irq, | 59 | .eoi = unmask_mips_irq, |
66 | .end = mips_cpu_irq_end, | ||
67 | }; | 60 | }; |
68 | 61 | ||
69 | /* | 62 | /* |
@@ -96,8 +89,6 @@ static void mips_mt_cpu_irq_ack(unsigned int irq) | |||
96 | mask_mips_mt_irq(irq); | 89 | mask_mips_mt_irq(irq); |
97 | } | 90 | } |
98 | 91 | ||
99 | #define mips_mt_cpu_irq_end mips_cpu_irq_end | ||
100 | |||
101 | static struct irq_chip mips_mt_cpu_irq_controller = { | 92 | static struct irq_chip mips_mt_cpu_irq_controller = { |
102 | .typename = "MIPS", | 93 | .typename = "MIPS", |
103 | .startup = mips_mt_cpu_irq_startup, | 94 | .startup = mips_mt_cpu_irq_startup, |
@@ -106,7 +97,6 @@ static struct irq_chip mips_mt_cpu_irq_controller = { | |||
106 | .mask_ack = mips_mt_cpu_irq_ack, | 97 | .mask_ack = mips_mt_cpu_irq_ack, |
107 | .unmask = unmask_mips_mt_irq, | 98 | .unmask = unmask_mips_mt_irq, |
108 | .eoi = unmask_mips_mt_irq, | 99 | .eoi = unmask_mips_mt_irq, |
109 | .end = mips_mt_cpu_irq_end, | ||
110 | }; | 100 | }; |
111 | 101 | ||
112 | void __init mips_cpu_irq_init(int irq_base) | 102 | void __init mips_cpu_irq_init(int irq_base) |
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index f06a144c7881..2c82412b9efe 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -319,7 +319,7 @@ static void sp_cleanup(void) | |||
319 | static int channel_open = 0; | 319 | static int channel_open = 0; |
320 | 320 | ||
321 | /* the work handler */ | 321 | /* the work handler */ |
322 | static void sp_work(void *data) | 322 | static void sp_work(struct work_struct *unused) |
323 | { | 323 | { |
324 | if (!channel_open) { | 324 | if (!channel_open) { |
325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { | 325 | if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { |
@@ -354,7 +354,7 @@ static void startwork(int vpe) | |||
354 | return; | 354 | return; |
355 | } | 355 | } |
356 | 356 | ||
357 | INIT_WORK(&work, sp_work, NULL); | 357 | INIT_WORK(&work, sp_work); |
358 | queue_work(workqueue, &work); | 358 | queue_work(workqueue, &work); |
359 | } else | 359 | } else |
360 | queue_work(workqueue, &work); | 360 | queue_work(workqueue, &work); |
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index 4a84a7beac53..2affa5ff171c 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c | |||
@@ -44,19 +44,12 @@ void enable_lasat_irq(unsigned int irq_nr) | |||
44 | *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; | 44 | *lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift; |
45 | } | 45 | } |
46 | 46 | ||
47 | static void end_lasat_irq(unsigned int irq) | ||
48 | { | ||
49 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
50 | enable_lasat_irq(irq); | ||
51 | } | ||
52 | |||
53 | static struct irq_chip lasat_irq_type = { | 47 | static struct irq_chip lasat_irq_type = { |
54 | .typename = "Lasat", | 48 | .typename = "Lasat", |
55 | .ack = disable_lasat_irq, | 49 | .ack = disable_lasat_irq, |
56 | .mask = disable_lasat_irq, | 50 | .mask = disable_lasat_irq, |
57 | .mask_ack = disable_lasat_irq, | 51 | .mask_ack = disable_lasat_irq, |
58 | .unmask = enable_lasat_irq, | 52 | .unmask = enable_lasat_irq, |
59 | .end = end_lasat_irq, | ||
60 | }; | 53 | }; |
61 | 54 | ||
62 | static inline int ls1bit32(unsigned int x) | 55 | static inline int ls1bit32(unsigned int x) |
diff --git a/arch/mips/momentum/ocelot_c/cpci-irq.c b/arch/mips/momentum/ocelot_c/cpci-irq.c index e5a4a0a8a7f0..bb11fef08472 100644 --- a/arch/mips/momentum/ocelot_c/cpci-irq.c +++ b/arch/mips/momentum/ocelot_c/cpci-irq.c | |||
@@ -66,15 +66,6 @@ static inline void unmask_cpci_irq(unsigned int irq) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * End IRQ processing | ||
70 | */ | ||
71 | static void end_cpci_irq(unsigned int irq) | ||
72 | { | ||
73 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
74 | unmask_cpci_irq(irq); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Interrupt handler for interrupts coming from the FPGA chip. | 69 | * Interrupt handler for interrupts coming from the FPGA chip. |
79 | * It could be built in ethernet ports etc... | 70 | * It could be built in ethernet ports etc... |
80 | */ | 71 | */ |
@@ -98,7 +89,6 @@ struct irq_chip cpci_irq_type = { | |||
98 | .mask = mask_cpci_irq, | 89 | .mask = mask_cpci_irq, |
99 | .mask_ack = mask_cpci_irq, | 90 | .mask_ack = mask_cpci_irq, |
100 | .unmask = unmask_cpci_irq, | 91 | .unmask = unmask_cpci_irq, |
101 | .end = end_cpci_irq, | ||
102 | }; | 92 | }; |
103 | 93 | ||
104 | void cpci_irq_init(void) | 94 | void cpci_irq_init(void) |
diff --git a/arch/mips/momentum/ocelot_c/uart-irq.c b/arch/mips/momentum/ocelot_c/uart-irq.c index 0029f0008dea..a7a80c0da569 100644 --- a/arch/mips/momentum/ocelot_c/uart-irq.c +++ b/arch/mips/momentum/ocelot_c/uart-irq.c | |||
@@ -60,15 +60,6 @@ static inline void unmask_uart_irq(unsigned int irq) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * End IRQ processing | ||
64 | */ | ||
65 | static void end_uart_irq(unsigned int irq) | ||
66 | { | ||
67 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
68 | unmask_uart_irq(irq); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Interrupt handler for interrupts coming from the FPGA chip. | 63 | * Interrupt handler for interrupts coming from the FPGA chip. |
73 | */ | 64 | */ |
74 | void ll_uart_irq(void) | 65 | void ll_uart_irq(void) |
@@ -91,7 +82,6 @@ struct irq_chip uart_irq_type = { | |||
91 | .mask = mask_uart_irq, | 82 | .mask = mask_uart_irq, |
92 | .mask_ack = mask_uart_irq, | 83 | .mask_ack = mask_uart_irq, |
93 | .unmask = unmask_uart_irq, | 84 | .unmask = unmask_uart_irq, |
94 | .end = end_uart_irq, | ||
95 | }; | 85 | }; |
96 | 86 | ||
97 | void uart_irq_init(void) | 87 | void uart_irq_init(void) |
diff --git a/arch/mips/philips/pnx8550/common/int.c b/arch/mips/philips/pnx8550/common/int.c index 0dc23930edbd..2c36c108c4d6 100644 --- a/arch/mips/philips/pnx8550/common/int.c +++ b/arch/mips/philips/pnx8550/common/int.c | |||
@@ -158,20 +158,12 @@ int pnx8550_set_gic_priority(int irq, int priority) | |||
158 | return prev_priority; | 158 | return prev_priority; |
159 | } | 159 | } |
160 | 160 | ||
161 | static void end_irq(unsigned int irq) | ||
162 | { | ||
163 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) { | ||
164 | unmask_irq(irq); | ||
165 | } | ||
166 | } | ||
167 | |||
168 | static struct irq_chip level_irq_type = { | 161 | static struct irq_chip level_irq_type = { |
169 | .typename = "PNX Level IRQ", | 162 | .typename = "PNX Level IRQ", |
170 | .ack = mask_irq, | 163 | .ack = mask_irq, |
171 | .mask = mask_irq, | 164 | .mask = mask_irq, |
172 | .mask_ack = mask_irq, | 165 | .mask_ack = mask_irq, |
173 | .unmask = unmask_irq, | 166 | .unmask = unmask_irq, |
174 | .end = end_irq, | ||
175 | }; | 167 | }; |
176 | 168 | ||
177 | static struct irqaction gic_action = { | 169 | static struct irqaction gic_action = { |
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index c7b138053159..c44f8be0644f 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c | |||
@@ -51,19 +51,12 @@ static void disable_local0_irq(unsigned int irq) | |||
51 | sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0)); | 51 | sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0)); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void end_local0_irq (unsigned int irq) | ||
55 | { | ||
56 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
57 | enable_local0_irq(irq); | ||
58 | } | ||
59 | |||
60 | static struct irq_chip ip22_local0_irq_type = { | 54 | static struct irq_chip ip22_local0_irq_type = { |
61 | .typename = "IP22 local 0", | 55 | .typename = "IP22 local 0", |
62 | .ack = disable_local0_irq, | 56 | .ack = disable_local0_irq, |
63 | .mask = disable_local0_irq, | 57 | .mask = disable_local0_irq, |
64 | .mask_ack = disable_local0_irq, | 58 | .mask_ack = disable_local0_irq, |
65 | .unmask = enable_local0_irq, | 59 | .unmask = enable_local0_irq, |
66 | .end = end_local0_irq, | ||
67 | }; | 60 | }; |
68 | 61 | ||
69 | static void enable_local1_irq(unsigned int irq) | 62 | static void enable_local1_irq(unsigned int irq) |
@@ -79,19 +72,12 @@ void disable_local1_irq(unsigned int irq) | |||
79 | sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1)); | 72 | sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1)); |
80 | } | 73 | } |
81 | 74 | ||
82 | static void end_local1_irq (unsigned int irq) | ||
83 | { | ||
84 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
85 | enable_local1_irq(irq); | ||
86 | } | ||
87 | |||
88 | static struct irq_chip ip22_local1_irq_type = { | 75 | static struct irq_chip ip22_local1_irq_type = { |
89 | .typename = "IP22 local 1", | 76 | .typename = "IP22 local 1", |
90 | .ack = disable_local1_irq, | 77 | .ack = disable_local1_irq, |
91 | .mask = disable_local1_irq, | 78 | .mask = disable_local1_irq, |
92 | .mask_ack = disable_local1_irq, | 79 | .mask_ack = disable_local1_irq, |
93 | .unmask = enable_local1_irq, | 80 | .unmask = enable_local1_irq, |
94 | .end = end_local1_irq, | ||
95 | }; | 81 | }; |
96 | 82 | ||
97 | static void enable_local2_irq(unsigned int irq) | 83 | static void enable_local2_irq(unsigned int irq) |
@@ -107,19 +93,12 @@ void disable_local2_irq(unsigned int irq) | |||
107 | sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0)); | 93 | sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0)); |
108 | } | 94 | } |
109 | 95 | ||
110 | static void end_local2_irq (unsigned int irq) | ||
111 | { | ||
112 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
113 | enable_local2_irq(irq); | ||
114 | } | ||
115 | |||
116 | static struct irq_chip ip22_local2_irq_type = { | 96 | static struct irq_chip ip22_local2_irq_type = { |
117 | .typename = "IP22 local 2", | 97 | .typename = "IP22 local 2", |
118 | .ack = disable_local2_irq, | 98 | .ack = disable_local2_irq, |
119 | .mask = disable_local2_irq, | 99 | .mask = disable_local2_irq, |
120 | .mask_ack = disable_local2_irq, | 100 | .mask_ack = disable_local2_irq, |
121 | .unmask = enable_local2_irq, | 101 | .unmask = enable_local2_irq, |
122 | .end = end_local2_irq, | ||
123 | }; | 102 | }; |
124 | 103 | ||
125 | static void enable_local3_irq(unsigned int irq) | 104 | static void enable_local3_irq(unsigned int irq) |
@@ -135,19 +114,12 @@ void disable_local3_irq(unsigned int irq) | |||
135 | sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1)); | 114 | sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1)); |
136 | } | 115 | } |
137 | 116 | ||
138 | static void end_local3_irq (unsigned int irq) | ||
139 | { | ||
140 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
141 | enable_local3_irq(irq); | ||
142 | } | ||
143 | |||
144 | static struct irq_chip ip22_local3_irq_type = { | 117 | static struct irq_chip ip22_local3_irq_type = { |
145 | .typename = "IP22 local 3", | 118 | .typename = "IP22 local 3", |
146 | .ack = disable_local3_irq, | 119 | .ack = disable_local3_irq, |
147 | .mask = disable_local3_irq, | 120 | .mask = disable_local3_irq, |
148 | .mask_ack = disable_local3_irq, | 121 | .mask_ack = disable_local3_irq, |
149 | .unmask = enable_local3_irq, | 122 | .unmask = enable_local3_irq, |
150 | .end = end_local3_irq, | ||
151 | }; | 123 | }; |
152 | 124 | ||
153 | static void indy_local0_irqdispatch(void) | 125 | static void indy_local0_irqdispatch(void) |
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c index 5f8835b4e84a..319f8803ef6f 100644 --- a/arch/mips/sgi-ip27/ip27-irq.c +++ b/arch/mips/sgi-ip27/ip27-irq.c | |||
@@ -332,13 +332,6 @@ static inline void disable_bridge_irq(unsigned int irq) | |||
332 | intr_disconnect_level(cpu, swlevel); | 332 | intr_disconnect_level(cpu, swlevel); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void end_bridge_irq(unsigned int irq) | ||
336 | { | ||
337 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && | ||
338 | irq_desc[irq].action) | ||
339 | enable_bridge_irq(irq); | ||
340 | } | ||
341 | |||
342 | static struct irq_chip bridge_irq_type = { | 335 | static struct irq_chip bridge_irq_type = { |
343 | .typename = "bridge", | 336 | .typename = "bridge", |
344 | .startup = startup_bridge_irq, | 337 | .startup = startup_bridge_irq, |
@@ -347,7 +340,6 @@ static struct irq_chip bridge_irq_type = { | |||
347 | .mask = disable_bridge_irq, | 340 | .mask = disable_bridge_irq, |
348 | .mask_ack = disable_bridge_irq, | 341 | .mask_ack = disable_bridge_irq, |
349 | .unmask = enable_bridge_irq, | 342 | .unmask = enable_bridge_irq, |
350 | .end = end_bridge_irq, | ||
351 | }; | 343 | }; |
352 | 344 | ||
353 | void __devinit register_bridge_irq(unsigned int irq) | 345 | void __devinit register_bridge_irq(unsigned int irq) |
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 7d361726bbfb..c20e9899b34b 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c | |||
@@ -180,10 +180,6 @@ static void disable_rt_irq(unsigned int irq) | |||
180 | { | 180 | { |
181 | } | 181 | } |
182 | 182 | ||
183 | static void end_rt_irq(unsigned int irq) | ||
184 | { | ||
185 | } | ||
186 | |||
187 | static struct irq_chip rt_irq_type = { | 183 | static struct irq_chip rt_irq_type = { |
188 | .typename = "SN HUB RT timer", | 184 | .typename = "SN HUB RT timer", |
189 | .ack = disable_rt_irq, | 185 | .ack = disable_rt_irq, |
@@ -191,7 +187,6 @@ static struct irq_chip rt_irq_type = { | |||
191 | .mask_ack = disable_rt_irq, | 187 | .mask_ack = disable_rt_irq, |
192 | .unmask = enable_rt_irq, | 188 | .unmask = enable_rt_irq, |
193 | .eoi = enable_rt_irq, | 189 | .eoi = enable_rt_irq, |
194 | .end = end_rt_irq, | ||
195 | }; | 190 | }; |
196 | 191 | ||
197 | static struct irqaction rt_irqaction = { | 192 | static struct irqaction rt_irqaction = { |
diff --git a/arch/mips/tx4927/common/tx4927_irq.c b/arch/mips/tx4927/common/tx4927_irq.c index 21873de49aa8..ed4a19adf361 100644 --- a/arch/mips/tx4927/common/tx4927_irq.c +++ b/arch/mips/tx4927/common/tx4927_irq.c | |||
@@ -66,12 +66,10 @@ | |||
66 | #define TX4927_IRQ_CP0_INIT ( 1 << 10 ) | 66 | #define TX4927_IRQ_CP0_INIT ( 1 << 10 ) |
67 | #define TX4927_IRQ_CP0_ENABLE ( 1 << 13 ) | 67 | #define TX4927_IRQ_CP0_ENABLE ( 1 << 13 ) |
68 | #define TX4927_IRQ_CP0_DISABLE ( 1 << 14 ) | 68 | #define TX4927_IRQ_CP0_DISABLE ( 1 << 14 ) |
69 | #define TX4927_IRQ_CP0_ENDIRQ ( 1 << 16 ) | ||
70 | 69 | ||
71 | #define TX4927_IRQ_PIC_INIT ( 1 << 20 ) | 70 | #define TX4927_IRQ_PIC_INIT ( 1 << 20 ) |
72 | #define TX4927_IRQ_PIC_ENABLE ( 1 << 23 ) | 71 | #define TX4927_IRQ_PIC_ENABLE ( 1 << 23 ) |
73 | #define TX4927_IRQ_PIC_DISABLE ( 1 << 24 ) | 72 | #define TX4927_IRQ_PIC_DISABLE ( 1 << 24 ) |
74 | #define TX4927_IRQ_PIC_ENDIRQ ( 1 << 26 ) | ||
75 | 73 | ||
76 | #define TX4927_IRQ_ALL 0xffffffff | 74 | #define TX4927_IRQ_ALL 0xffffffff |
77 | #endif | 75 | #endif |
@@ -82,12 +80,10 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE | |||
82 | | TX4927_IRQ_WARN | TX4927_IRQ_EROR | 80 | | TX4927_IRQ_WARN | TX4927_IRQ_EROR |
83 | // | TX4927_IRQ_CP0_INIT | 81 | // | TX4927_IRQ_CP0_INIT |
84 | // | TX4927_IRQ_CP0_ENABLE | 82 | // | TX4927_IRQ_CP0_ENABLE |
85 | // | TX4927_IRQ_CP0_DISABLE | ||
86 | // | TX4927_IRQ_CP0_ENDIRQ | 83 | // | TX4927_IRQ_CP0_ENDIRQ |
87 | // | TX4927_IRQ_PIC_INIT | 84 | // | TX4927_IRQ_PIC_INIT |
88 | // | TX4927_IRQ_PIC_ENABLE | 85 | // | TX4927_IRQ_PIC_ENABLE |
89 | // | TX4927_IRQ_PIC_DISABLE | 86 | // | TX4927_IRQ_PIC_DISABLE |
90 | // | TX4927_IRQ_PIC_ENDIRQ | ||
91 | // | TX4927_IRQ_INIT | 87 | // | TX4927_IRQ_INIT |
92 | // | TX4927_IRQ_NEST1 | 88 | // | TX4927_IRQ_NEST1 |
93 | // | TX4927_IRQ_NEST2 | 89 | // | TX4927_IRQ_NEST2 |
@@ -114,11 +110,9 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE | |||
114 | 110 | ||
115 | static void tx4927_irq_cp0_enable(unsigned int irq); | 111 | static void tx4927_irq_cp0_enable(unsigned int irq); |
116 | static void tx4927_irq_cp0_disable(unsigned int irq); | 112 | static void tx4927_irq_cp0_disable(unsigned int irq); |
117 | static void tx4927_irq_cp0_end(unsigned int irq); | ||
118 | 113 | ||
119 | static void tx4927_irq_pic_enable(unsigned int irq); | 114 | static void tx4927_irq_pic_enable(unsigned int irq); |
120 | static void tx4927_irq_pic_disable(unsigned int irq); | 115 | static void tx4927_irq_pic_disable(unsigned int irq); |
121 | static void tx4927_irq_pic_end(unsigned int irq); | ||
122 | 116 | ||
123 | /* | 117 | /* |
124 | * Kernel structs for all pic's | 118 | * Kernel structs for all pic's |
@@ -131,7 +125,6 @@ static struct irq_chip tx4927_irq_cp0_type = { | |||
131 | .mask = tx4927_irq_cp0_disable, | 125 | .mask = tx4927_irq_cp0_disable, |
132 | .mask_ack = tx4927_irq_cp0_disable, | 126 | .mask_ack = tx4927_irq_cp0_disable, |
133 | .unmask = tx4927_irq_cp0_enable, | 127 | .unmask = tx4927_irq_cp0_enable, |
134 | .end = tx4927_irq_cp0_end, | ||
135 | }; | 128 | }; |
136 | 129 | ||
137 | #define TX4927_PIC_NAME "TX4927-PIC" | 130 | #define TX4927_PIC_NAME "TX4927-PIC" |
@@ -141,7 +134,6 @@ static struct irq_chip tx4927_irq_pic_type = { | |||
141 | .mask = tx4927_irq_pic_disable, | 134 | .mask = tx4927_irq_pic_disable, |
142 | .mask_ack = tx4927_irq_pic_disable, | 135 | .mask_ack = tx4927_irq_pic_disable, |
143 | .unmask = tx4927_irq_pic_enable, | 136 | .unmask = tx4927_irq_pic_enable, |
144 | .end = tx4927_irq_pic_end, | ||
145 | }; | 137 | }; |
146 | 138 | ||
147 | #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL } | 139 | #define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL } |
@@ -214,15 +206,6 @@ static void tx4927_irq_cp0_disable(unsigned int irq) | |||
214 | tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0); | 206 | tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0); |
215 | } | 207 | } |
216 | 208 | ||
217 | static void tx4927_irq_cp0_end(unsigned int irq) | ||
218 | { | ||
219 | TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENDIRQ, "irq=%d \n", irq); | ||
220 | |||
221 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
222 | tx4927_irq_cp0_enable(irq); | ||
223 | } | ||
224 | } | ||
225 | |||
226 | /* | 209 | /* |
227 | * Functions for pic | 210 | * Functions for pic |
228 | */ | 211 | */ |
@@ -376,15 +359,6 @@ static void tx4927_irq_pic_disable(unsigned int irq) | |||
376 | tx4927_irq_pic_mask(irq), 0); | 359 | tx4927_irq_pic_mask(irq), 0); |
377 | } | 360 | } |
378 | 361 | ||
379 | static void tx4927_irq_pic_end(unsigned int irq) | ||
380 | { | ||
381 | TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENDIRQ, "irq=%d\n", irq); | ||
382 | |||
383 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
384 | tx4927_irq_pic_enable(irq); | ||
385 | } | ||
386 | } | ||
387 | |||
388 | /* | 362 | /* |
389 | * Main init functions | 363 | * Main init functions |
390 | */ | 364 | */ |
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c index 34cdb2a240e9..5a5ea6c0b9f6 100644 --- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c +++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c | |||
@@ -153,7 +153,6 @@ JP7 is not bus master -- do NOT use -- only 4 pci bus master's allowed -- SouthB | |||
153 | #define TOSHIBA_RBTX4927_IRQ_IOC_INIT ( 1 << 10 ) | 153 | #define TOSHIBA_RBTX4927_IRQ_IOC_INIT ( 1 << 10 ) |
154 | #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE ( 1 << 13 ) | 154 | #define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE ( 1 << 13 ) |
155 | #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE ( 1 << 14 ) | 155 | #define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE ( 1 << 14 ) |
156 | #define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ ( 1 << 16 ) | ||
157 | 156 | ||
158 | #define TOSHIBA_RBTX4927_IRQ_ISA_INIT ( 1 << 20 ) | 157 | #define TOSHIBA_RBTX4927_IRQ_ISA_INIT ( 1 << 20 ) |
159 | #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE ( 1 << 23 ) | 158 | #define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE ( 1 << 23 ) |
@@ -172,7 +171,6 @@ static const u32 toshiba_rbtx4927_irq_debug_flag = | |||
172 | // | TOSHIBA_RBTX4927_IRQ_IOC_INIT | 171 | // | TOSHIBA_RBTX4927_IRQ_IOC_INIT |
173 | // | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE | 172 | // | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE |
174 | // | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE | 173 | // | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE |
175 | // | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ | ||
176 | // | TOSHIBA_RBTX4927_IRQ_ISA_INIT | 174 | // | TOSHIBA_RBTX4927_IRQ_ISA_INIT |
177 | // | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE | 175 | // | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE |
178 | // | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE | 176 | // | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE |
@@ -223,7 +221,6 @@ extern void mask_and_ack_8259A(unsigned int irq); | |||
223 | 221 | ||
224 | static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq); | 222 | static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq); |
225 | static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq); | 223 | static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq); |
226 | static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq); | ||
227 | 224 | ||
228 | #ifdef CONFIG_TOSHIBA_FPCIB0 | 225 | #ifdef CONFIG_TOSHIBA_FPCIB0 |
229 | static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq); | 226 | static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq); |
@@ -239,7 +236,6 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = { | |||
239 | .mask = toshiba_rbtx4927_irq_ioc_disable, | 236 | .mask = toshiba_rbtx4927_irq_ioc_disable, |
240 | .mask_ack = toshiba_rbtx4927_irq_ioc_disable, | 237 | .mask_ack = toshiba_rbtx4927_irq_ioc_disable, |
241 | .unmask = toshiba_rbtx4927_irq_ioc_enable, | 238 | .unmask = toshiba_rbtx4927_irq_ioc_enable, |
242 | .end = toshiba_rbtx4927_irq_ioc_end, | ||
243 | }; | 239 | }; |
244 | #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000 | 240 | #define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000 |
245 | #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006 | 241 | #define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006 |
@@ -388,23 +384,6 @@ static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq) | |||
388 | TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v); | 384 | TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v); |
389 | } | 385 | } |
390 | 386 | ||
391 | static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq) | ||
392 | { | ||
393 | TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ, | ||
394 | "irq=%d\n", irq); | ||
395 | |||
396 | if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG | ||
397 | || irq > TOSHIBA_RBTX4927_IRQ_IOC_END) { | ||
398 | TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR, | ||
399 | "bad irq=%d\n", irq); | ||
400 | panic("\n"); | ||
401 | } | ||
402 | |||
403 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
404 | toshiba_rbtx4927_irq_ioc_enable(irq); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | 387 | ||
409 | /**********************************************************************************/ | 388 | /**********************************************************************************/ |
410 | /* Functions for isa */ | 389 | /* Functions for isa */ |
diff --git a/arch/mips/tx4938/common/irq.c b/arch/mips/tx4938/common/irq.c index 42e127683ae9..a347b424d91c 100644 --- a/arch/mips/tx4938/common/irq.c +++ b/arch/mips/tx4938/common/irq.c | |||
@@ -39,11 +39,9 @@ | |||
39 | 39 | ||
40 | static void tx4938_irq_cp0_enable(unsigned int irq); | 40 | static void tx4938_irq_cp0_enable(unsigned int irq); |
41 | static void tx4938_irq_cp0_disable(unsigned int irq); | 41 | static void tx4938_irq_cp0_disable(unsigned int irq); |
42 | static void tx4938_irq_cp0_end(unsigned int irq); | ||
43 | 42 | ||
44 | static void tx4938_irq_pic_enable(unsigned int irq); | 43 | static void tx4938_irq_pic_enable(unsigned int irq); |
45 | static void tx4938_irq_pic_disable(unsigned int irq); | 44 | static void tx4938_irq_pic_disable(unsigned int irq); |
46 | static void tx4938_irq_pic_end(unsigned int irq); | ||
47 | 45 | ||
48 | /**********************************************************************************/ | 46 | /**********************************************************************************/ |
49 | /* Kernel structs for all pic's */ | 47 | /* Kernel structs for all pic's */ |
@@ -56,7 +54,6 @@ static struct irq_chip tx4938_irq_cp0_type = { | |||
56 | .mask = tx4938_irq_cp0_disable, | 54 | .mask = tx4938_irq_cp0_disable, |
57 | .mask_ack = tx4938_irq_cp0_disable, | 55 | .mask_ack = tx4938_irq_cp0_disable, |
58 | .unmask = tx4938_irq_cp0_enable, | 56 | .unmask = tx4938_irq_cp0_enable, |
59 | .end = tx4938_irq_cp0_end, | ||
60 | }; | 57 | }; |
61 | 58 | ||
62 | #define TX4938_PIC_NAME "TX4938-PIC" | 59 | #define TX4938_PIC_NAME "TX4938-PIC" |
@@ -66,7 +63,6 @@ static struct irq_chip tx4938_irq_pic_type = { | |||
66 | .mask = tx4938_irq_pic_disable, | 63 | .mask = tx4938_irq_pic_disable, |
67 | .mask_ack = tx4938_irq_pic_disable, | 64 | .mask_ack = tx4938_irq_pic_disable, |
68 | .unmask = tx4938_irq_pic_enable, | 65 | .unmask = tx4938_irq_pic_enable, |
69 | .end = tx4938_irq_pic_end, | ||
70 | }; | 66 | }; |
71 | 67 | ||
72 | static struct irqaction tx4938_irq_pic_action = { | 68 | static struct irqaction tx4938_irq_pic_action = { |
@@ -104,14 +100,6 @@ tx4938_irq_cp0_disable(unsigned int irq) | |||
104 | clear_c0_status(tx4938_irq_cp0_mask(irq)); | 100 | clear_c0_status(tx4938_irq_cp0_mask(irq)); |
105 | } | 101 | } |
106 | 102 | ||
107 | static void | ||
108 | tx4938_irq_cp0_end(unsigned int irq) | ||
109 | { | ||
110 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
111 | tx4938_irq_cp0_enable(irq); | ||
112 | } | ||
113 | } | ||
114 | |||
115 | /**********************************************************************************/ | 103 | /**********************************************************************************/ |
116 | /* Functions for pic */ | 104 | /* Functions for pic */ |
117 | /**********************************************************************************/ | 105 | /**********************************************************************************/ |
@@ -269,14 +257,6 @@ tx4938_irq_pic_disable(unsigned int irq) | |||
269 | tx4938_irq_pic_mask(irq), 0); | 257 | tx4938_irq_pic_mask(irq), 0); |
270 | } | 258 | } |
271 | 259 | ||
272 | static void | ||
273 | tx4938_irq_pic_end(unsigned int irq) | ||
274 | { | ||
275 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
276 | tx4938_irq_pic_enable(irq); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | /**********************************************************************************/ | 260 | /**********************************************************************************/ |
281 | /* Main init functions */ | 261 | /* Main init functions */ |
282 | /**********************************************************************************/ | 262 | /**********************************************************************************/ |
diff --git a/arch/mips/tx4938/toshiba_rbtx4938/irq.c b/arch/mips/tx4938/toshiba_rbtx4938/irq.c index 8c87a35f3068..b6f363d08011 100644 --- a/arch/mips/tx4938/toshiba_rbtx4938/irq.c +++ b/arch/mips/tx4938/toshiba_rbtx4938/irq.c | |||
@@ -89,7 +89,6 @@ IRQ Device | |||
89 | 89 | ||
90 | static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq); | 90 | static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq); |
91 | static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq); | 91 | static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq); |
92 | static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq); | ||
93 | 92 | ||
94 | #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" | 93 | #define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC" |
95 | static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { | 94 | static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { |
@@ -98,7 +97,6 @@ static struct irq_chip toshiba_rbtx4938_irq_ioc_type = { | |||
98 | .mask = toshiba_rbtx4938_irq_ioc_disable, | 97 | .mask = toshiba_rbtx4938_irq_ioc_disable, |
99 | .mask_ack = toshiba_rbtx4938_irq_ioc_disable, | 98 | .mask_ack = toshiba_rbtx4938_irq_ioc_disable, |
100 | .unmask = toshiba_rbtx4938_irq_ioc_enable, | 99 | .unmask = toshiba_rbtx4938_irq_ioc_enable, |
101 | .end = toshiba_rbtx4938_irq_ioc_end, | ||
102 | }; | 100 | }; |
103 | 101 | ||
104 | #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000 | 102 | #define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000 |
@@ -167,14 +165,6 @@ toshiba_rbtx4938_irq_ioc_disable(unsigned int irq) | |||
167 | TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB); | 165 | TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB); |
168 | } | 166 | } |
169 | 167 | ||
170 | static void | ||
171 | toshiba_rbtx4938_irq_ioc_end(unsigned int irq) | ||
172 | { | ||
173 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) { | ||
174 | toshiba_rbtx4938_irq_ioc_enable(irq); | ||
175 | } | ||
176 | } | ||
177 | |||
178 | extern void __init txx9_spi_irqinit(int irc_irq); | 168 | extern void __init txx9_spi_irqinit(int irc_irq); |
179 | 169 | ||
180 | void __init arch_init_irq(void) | 170 | void __init arch_init_irq(void) |
diff --git a/arch/mips/vr41xx/Kconfig b/arch/mips/vr41xx/Kconfig index 92f41f6f934a..c8dfd8092cab 100644 --- a/arch/mips/vr41xx/Kconfig +++ b/arch/mips/vr41xx/Kconfig | |||
@@ -6,6 +6,7 @@ config CASIO_E55 | |||
6 | select ISA | 6 | select ISA |
7 | select SYS_SUPPORTS_32BIT_KERNEL | 7 | select SYS_SUPPORTS_32BIT_KERNEL |
8 | select SYS_SUPPORTS_LITTLE_ENDIAN | 8 | select SYS_SUPPORTS_LITTLE_ENDIAN |
9 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
9 | 10 | ||
10 | config IBM_WORKPAD | 11 | config IBM_WORKPAD |
11 | bool "Support for IBM WorkPad z50" | 12 | bool "Support for IBM WorkPad z50" |
@@ -15,6 +16,7 @@ config IBM_WORKPAD | |||
15 | select ISA | 16 | select ISA |
16 | select SYS_SUPPORTS_32BIT_KERNEL | 17 | select SYS_SUPPORTS_32BIT_KERNEL |
17 | select SYS_SUPPORTS_LITTLE_ENDIAN | 18 | select SYS_SUPPORTS_LITTLE_ENDIAN |
19 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
18 | 20 | ||
19 | config NEC_CMBVR4133 | 21 | config NEC_CMBVR4133 |
20 | bool "Support for NEC CMB-VR4133" | 22 | bool "Support for NEC CMB-VR4133" |
@@ -39,6 +41,7 @@ config TANBAC_TB022X | |||
39 | select IRQ_CPU | 41 | select IRQ_CPU |
40 | select SYS_SUPPORTS_32BIT_KERNEL | 42 | select SYS_SUPPORTS_32BIT_KERNEL |
41 | select SYS_SUPPORTS_LITTLE_ENDIAN | 43 | select SYS_SUPPORTS_LITTLE_ENDIAN |
44 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
42 | help | 45 | help |
43 | The TANBAC VR4131 multichip module(TB0225) and | 46 | The TANBAC VR4131 multichip module(TB0225) and |
44 | the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms | 47 | the TANBAC VR4131DIMM(TB0229) are MIPS-based platforms |
@@ -71,6 +74,7 @@ config VICTOR_MPC30X | |||
71 | select IRQ_CPU | 74 | select IRQ_CPU |
72 | select SYS_SUPPORTS_32BIT_KERNEL | 75 | select SYS_SUPPORTS_32BIT_KERNEL |
73 | select SYS_SUPPORTS_LITTLE_ENDIAN | 76 | select SYS_SUPPORTS_LITTLE_ENDIAN |
77 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
74 | 78 | ||
75 | config ZAO_CAPCELLA | 79 | config ZAO_CAPCELLA |
76 | bool "Support for ZAO Networks Capcella" | 80 | bool "Support for ZAO Networks Capcella" |
@@ -80,6 +84,7 @@ config ZAO_CAPCELLA | |||
80 | select IRQ_CPU | 84 | select IRQ_CPU |
81 | select SYS_SUPPORTS_32BIT_KERNEL | 85 | select SYS_SUPPORTS_32BIT_KERNEL |
82 | select SYS_SUPPORTS_LITTLE_ENDIAN | 86 | select SYS_SUPPORTS_LITTLE_ENDIAN |
87 | select GENERIC_HARDIRQS_NO__DO_IRQ | ||
83 | 88 | ||
84 | config PCI_VR41XX | 89 | config PCI_VR41XX |
85 | bool "Add PCI control unit support of NEC VR4100 series" | 90 | bool "Add PCI control unit support of NEC VR4100 series" |
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 54b92a74c7ac..c075261976c5 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c | |||
@@ -427,19 +427,12 @@ static void enable_sysint1_irq(unsigned int irq) | |||
427 | icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); | 427 | icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); |
428 | } | 428 | } |
429 | 429 | ||
430 | static void end_sysint1_irq(unsigned int irq) | ||
431 | { | ||
432 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
433 | icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq)); | ||
434 | } | ||
435 | |||
436 | static struct irq_chip sysint1_irq_type = { | 430 | static struct irq_chip sysint1_irq_type = { |
437 | .typename = "SYSINT1", | 431 | .typename = "SYSINT1", |
438 | .ack = disable_sysint1_irq, | 432 | .ack = disable_sysint1_irq, |
439 | .mask = disable_sysint1_irq, | 433 | .mask = disable_sysint1_irq, |
440 | .mask_ack = disable_sysint1_irq, | 434 | .mask_ack = disable_sysint1_irq, |
441 | .unmask = enable_sysint1_irq, | 435 | .unmask = enable_sysint1_irq, |
442 | .end = end_sysint1_irq, | ||
443 | }; | 436 | }; |
444 | 437 | ||
445 | static void disable_sysint2_irq(unsigned int irq) | 438 | static void disable_sysint2_irq(unsigned int irq) |
@@ -452,19 +445,12 @@ static void enable_sysint2_irq(unsigned int irq) | |||
452 | icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); | 445 | icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); |
453 | } | 446 | } |
454 | 447 | ||
455 | static void end_sysint2_irq(unsigned int irq) | ||
456 | { | ||
457 | if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) | ||
458 | icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq)); | ||
459 | } | ||
460 | |||
461 | static struct irq_chip sysint2_irq_type = { | 448 | static struct irq_chip sysint2_irq_type = { |
462 | .typename = "SYSINT2", | 449 | .typename = "SYSINT2", |
463 | .ack = disable_sysint2_irq, | 450 | .ack = disable_sysint2_irq, |
464 | .mask = disable_sysint2_irq, | 451 | .mask = disable_sysint2_irq, |
465 | .mask_ack = disable_sysint2_irq, | 452 | .mask_ack = disable_sysint2_irq, |
466 | .unmask = enable_sysint2_irq, | 453 | .unmask = enable_sysint2_irq, |
467 | .end = end_sysint2_irq, | ||
468 | }; | 454 | }; |
469 | 455 | ||
470 | static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) | 456 | static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) |
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c index 31bcdae84823..0e837762cc5b 100644 --- a/arch/powerpc/platforms/embedded6xx/ls_uart.c +++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c | |||
@@ -14,7 +14,7 @@ static unsigned long avr_clock; | |||
14 | 14 | ||
15 | static struct work_struct wd_work; | 15 | static struct work_struct wd_work; |
16 | 16 | ||
17 | static void wd_stop(void *unused) | 17 | static void wd_stop(struct work_struct *unused) |
18 | { | 18 | { |
19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; | 19 | const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK"; |
20 | int i = 0, rescue = 8; | 20 | int i = 0, rescue = 8; |
@@ -122,7 +122,7 @@ static int __init ls_uarts_init(void) | |||
122 | 122 | ||
123 | ls_uart_init(); | 123 | ls_uart_init(); |
124 | 124 | ||
125 | INIT_WORK(&wd_work, wd_stop, NULL); | 125 | INIT_WORK(&wd_work, wd_stop); |
126 | schedule_work(&wd_work); | 126 | schedule_work(&wd_work); |
127 | 127 | ||
128 | return 0; | 128 | return 0; |
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c index afa593a8544a..c3a89414ddc0 100644 --- a/arch/powerpc/platforms/powermac/backlight.c +++ b/arch/powerpc/platforms/powermac/backlight.c | |||
@@ -18,11 +18,11 @@ | |||
18 | 18 | ||
19 | #define OLD_BACKLIGHT_MAX 15 | 19 | #define OLD_BACKLIGHT_MAX 15 |
20 | 20 | ||
21 | static void pmac_backlight_key_worker(void *data); | 21 | static void pmac_backlight_key_worker(struct work_struct *work); |
22 | static void pmac_backlight_set_legacy_worker(void *data); | 22 | static void pmac_backlight_set_legacy_worker(struct work_struct *work); |
23 | 23 | ||
24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker, NULL); | 24 | static DECLARE_WORK(pmac_backlight_key_work, pmac_backlight_key_worker); |
25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker, NULL); | 25 | static DECLARE_WORK(pmac_backlight_set_legacy_work, pmac_backlight_set_legacy_worker); |
26 | 26 | ||
27 | /* Although these variables are used in interrupt context, it makes no sense to | 27 | /* Although these variables are used in interrupt context, it makes no sense to |
28 | * protect them. No user is able to produce enough key events per second and | 28 | * protect them. No user is able to produce enough key events per second and |
@@ -94,7 +94,7 @@ int pmac_backlight_curve_lookup(struct fb_info *info, int value) | |||
94 | return level; | 94 | return level; |
95 | } | 95 | } |
96 | 96 | ||
97 | static void pmac_backlight_key_worker(void *data) | 97 | static void pmac_backlight_key_worker(struct work_struct *work) |
98 | { | 98 | { |
99 | if (atomic_read(&kernel_backlight_disabled)) | 99 | if (atomic_read(&kernel_backlight_disabled)) |
100 | return; | 100 | return; |
@@ -166,7 +166,7 @@ static int __pmac_backlight_set_legacy_brightness(int brightness) | |||
166 | return error; | 166 | return error; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pmac_backlight_set_legacy_worker(void *data) | 169 | static void pmac_backlight_set_legacy_worker(struct work_struct *work) |
170 | { | 170 | { |
171 | if (atomic_read(&kernel_backlight_disabled)) | 171 | if (atomic_read(&kernel_backlight_disabled)) |
172 | return; | 172 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c index 137077451316..49037edf7d39 100644 --- a/arch/powerpc/platforms/pseries/eeh_event.c +++ b/arch/powerpc/platforms/pseries/eeh_event.c | |||
@@ -37,8 +37,8 @@ | |||
37 | /* EEH event workqueue setup. */ | 37 | /* EEH event workqueue setup. */ |
38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); | 38 | static DEFINE_SPINLOCK(eeh_eventlist_lock); |
39 | LIST_HEAD(eeh_eventlist); | 39 | LIST_HEAD(eeh_eventlist); |
40 | static void eeh_thread_launcher(void *); | 40 | static void eeh_thread_launcher(struct work_struct *); |
41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL); | 41 | DECLARE_WORK(eeh_event_wq, eeh_thread_launcher); |
42 | 42 | ||
43 | /* Serialize reset sequences for a given pci device */ | 43 | /* Serialize reset sequences for a given pci device */ |
44 | DEFINE_MUTEX(eeh_event_mutex); | 44 | DEFINE_MUTEX(eeh_event_mutex); |
@@ -103,7 +103,7 @@ static int eeh_event_handler(void * dummy) | |||
103 | * eeh_thread_launcher | 103 | * eeh_thread_launcher |
104 | * @dummy - unused | 104 | * @dummy - unused |
105 | */ | 105 | */ |
106 | static void eeh_thread_launcher(void *dummy) | 106 | static void eeh_thread_launcher(struct work_struct *dummy) |
107 | { | 107 | { |
108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) | 108 | if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0) |
109 | printk(KERN_ERR "Failed to start EEH daemon\n"); | 109 | printk(KERN_ERR "Failed to start EEH daemon\n"); |
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c index 2e1943e27819..709952c25f29 100644 --- a/arch/ppc/8260_io/fcc_enet.c +++ b/arch/ppc/8260_io/fcc_enet.c | |||
@@ -385,6 +385,7 @@ struct fcc_enet_private { | |||
385 | phy_info_t *phy; | 385 | phy_info_t *phy; |
386 | struct work_struct phy_relink; | 386 | struct work_struct phy_relink; |
387 | struct work_struct phy_display_config; | 387 | struct work_struct phy_display_config; |
388 | struct net_device *dev; | ||
388 | 389 | ||
389 | uint sequence_done; | 390 | uint sequence_done; |
390 | 391 | ||
@@ -1391,10 +1392,11 @@ static phy_info_t *phy_info[] = { | |||
1391 | NULL | 1392 | NULL |
1392 | }; | 1393 | }; |
1393 | 1394 | ||
1394 | static void mii_display_status(void *data) | 1395 | static void mii_display_status(struct work_struct *work) |
1395 | { | 1396 | { |
1396 | struct net_device *dev = data; | 1397 | volatile struct fcc_enet_private *fep = |
1397 | volatile struct fcc_enet_private *fep = dev->priv; | 1398 | container_of(work, struct fcc_enet_private, phy_relink); |
1399 | struct net_device *dev = fep->dev; | ||
1398 | uint s = fep->phy_status; | 1400 | uint s = fep->phy_status; |
1399 | 1401 | ||
1400 | if (!fep->link && !fep->old_link) { | 1402 | if (!fep->link && !fep->old_link) { |
@@ -1428,10 +1430,12 @@ static void mii_display_status(void *data) | |||
1428 | printk(".\n"); | 1430 | printk(".\n"); |
1429 | } | 1431 | } |
1430 | 1432 | ||
1431 | static void mii_display_config(void *data) | 1433 | static void mii_display_config(struct work_struct *work) |
1432 | { | 1434 | { |
1433 | struct net_device *dev = data; | 1435 | volatile struct fcc_enet_private *fep = |
1434 | volatile struct fcc_enet_private *fep = dev->priv; | 1436 | container_of(work, struct fcc_enet_private, |
1437 | phy_display_config); | ||
1438 | struct net_device *dev = fep->dev; | ||
1435 | uint s = fep->phy_status; | 1439 | uint s = fep->phy_status; |
1436 | 1440 | ||
1437 | printk("%s: config: auto-negotiation ", dev->name); | 1441 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1758,8 +1762,9 @@ static int __init fec_enet_init(void) | |||
1758 | cep->phy_id_done = 0; | 1762 | cep->phy_id_done = 0; |
1759 | cep->phy_addr = fip->fc_phyaddr; | 1763 | cep->phy_addr = fip->fc_phyaddr; |
1760 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); | 1764 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); |
1761 | INIT_WORK(&cep->phy_relink, mii_display_status, dev); | 1765 | INIT_WORK(&cep->phy_relink, mii_display_status); |
1762 | INIT_WORK(&cep->phy_display_config, mii_display_config, dev); | 1766 | INIT_WORK(&cep->phy_display_config, mii_display_config); |
1767 | cep->dev = dev; | ||
1763 | #endif /* CONFIG_USE_MDIO */ | 1768 | #endif /* CONFIG_USE_MDIO */ |
1764 | 1769 | ||
1765 | fip++; | 1770 | fip++; |
diff --git a/arch/ppc/8xx_io/fec.c b/arch/ppc/8xx_io/fec.c index 2f9fa9e3d331..e6c28fb423b2 100644 --- a/arch/ppc/8xx_io/fec.c +++ b/arch/ppc/8xx_io/fec.c | |||
@@ -173,6 +173,7 @@ struct fec_enet_private { | |||
173 | uint phy_speed; | 173 | uint phy_speed; |
174 | phy_info_t *phy; | 174 | phy_info_t *phy; |
175 | struct work_struct phy_task; | 175 | struct work_struct phy_task; |
176 | struct net_device *dev; | ||
176 | 177 | ||
177 | uint sequence_done; | 178 | uint sequence_done; |
178 | 179 | ||
@@ -1263,10 +1264,11 @@ static void mii_display_status(struct net_device *dev) | |||
1263 | printk(".\n"); | 1264 | printk(".\n"); |
1264 | } | 1265 | } |
1265 | 1266 | ||
1266 | static void mii_display_config(void *priv) | 1267 | static void mii_display_config(struct work_struct *work) |
1267 | { | 1268 | { |
1268 | struct net_device *dev = (struct net_device *)priv; | 1269 | struct fec_enet_private *fep = |
1269 | struct fec_enet_private *fep = dev->priv; | 1270 | container_of(work, struct fec_enet_private, phy_task); |
1271 | struct net_device *dev = fep->dev; | ||
1270 | volatile uint *s = &(fep->phy_status); | 1272 | volatile uint *s = &(fep->phy_status); |
1271 | 1273 | ||
1272 | printk("%s: config: auto-negotiation ", dev->name); | 1274 | printk("%s: config: auto-negotiation ", dev->name); |
@@ -1295,10 +1297,11 @@ static void mii_display_config(void *priv) | |||
1295 | fep->sequence_done = 1; | 1297 | fep->sequence_done = 1; |
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | static void mii_relink(void *priv) | 1300 | static void mii_relink(struct work_struct *work) |
1299 | { | 1301 | { |
1300 | struct net_device *dev = (struct net_device *)priv; | 1302 | struct fec_enet_private *fep = |
1301 | struct fec_enet_private *fep = dev->priv; | 1303 | container_of(work, struct fec_enet_private, phy_task); |
1304 | struct net_device *dev = fep->dev; | ||
1302 | int duplex; | 1305 | int duplex; |
1303 | 1306 | ||
1304 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; | 1307 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; |
@@ -1325,7 +1328,8 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) | |||
1325 | { | 1328 | { |
1326 | struct fec_enet_private *fep = dev->priv; | 1329 | struct fec_enet_private *fep = dev->priv; |
1327 | 1330 | ||
1328 | INIT_WORK(&fep->phy_task, mii_relink, (void *)dev); | 1331 | fep->dev = dev; |
1332 | INIT_WORK(&fep->phy_task, mii_relink); | ||
1329 | schedule_work(&fep->phy_task); | 1333 | schedule_work(&fep->phy_task); |
1330 | } | 1334 | } |
1331 | 1335 | ||
@@ -1333,7 +1337,8 @@ static void mii_queue_config(uint mii_reg, struct net_device *dev) | |||
1333 | { | 1337 | { |
1334 | struct fec_enet_private *fep = dev->priv; | 1338 | struct fec_enet_private *fep = dev->priv; |
1335 | 1339 | ||
1336 | INIT_WORK(&fep->phy_task, mii_display_config, (void *)dev); | 1340 | fep->dev = dev; |
1341 | INIT_WORK(&fep->phy_task, mii_display_config); | ||
1337 | schedule_work(&fep->phy_task); | 1342 | schedule_work(&fep->phy_task); |
1338 | } | 1343 | } |
1339 | 1344 | ||
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index af1e8fc7d985..67d5cf9cba83 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -92,8 +92,8 @@ static int appldata_timer_active; | |||
92 | * Work queue | 92 | * Work queue |
93 | */ | 93 | */ |
94 | static struct workqueue_struct *appldata_wq; | 94 | static struct workqueue_struct *appldata_wq; |
95 | static void appldata_work_fn(void *data); | 95 | static void appldata_work_fn(struct work_struct *work); |
96 | static DECLARE_WORK(appldata_work, appldata_work_fn, NULL); | 96 | static DECLARE_WORK(appldata_work, appldata_work_fn); |
97 | 97 | ||
98 | 98 | ||
99 | /* | 99 | /* |
@@ -125,7 +125,7 @@ static void appldata_timer_function(unsigned long data) | |||
125 | * | 125 | * |
126 | * call data gathering function for each (active) module | 126 | * call data gathering function for each (active) module |
127 | */ | 127 | */ |
128 | static void appldata_work_fn(void *data) | 128 | static void appldata_work_fn(struct work_struct *work) |
129 | { | 129 | { |
130 | struct list_head *lh; | 130 | struct list_head *lh; |
131 | struct appldata_ops *ops; | 131 | struct appldata_ops *ops; |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index bffc7e176970..d83d64af31f2 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -51,6 +51,14 @@ config GENERIC_TIME | |||
51 | config ARCH_MAY_HAVE_PC_FDC | 51 | config ARCH_MAY_HAVE_PC_FDC |
52 | bool | 52 | bool |
53 | 53 | ||
54 | config STACKTRACE_SUPPORT | ||
55 | bool | ||
56 | default y | ||
57 | |||
58 | config LOCKDEP_SUPPORT | ||
59 | bool | ||
60 | default y | ||
61 | |||
54 | source "init/Kconfig" | 62 | source "init/Kconfig" |
55 | 63 | ||
56 | menu "System type" | 64 | menu "System type" |
@@ -219,6 +227,20 @@ config SH_SHMIN | |||
219 | help | 227 | help |
220 | Select SHMIN if configuring for the SHMIN board. | 228 | Select SHMIN if configuring for the SHMIN board. |
221 | 229 | ||
230 | config SH_7206_SOLUTION_ENGINE | ||
231 | bool "SolutionEngine7206" | ||
232 | select CPU_SUBTYPE_SH7206 | ||
233 | help | ||
234 | Select 7206 SolutionEngine if configuring for a Hitachi SH7206 | ||
235 | evaluation board. | ||
236 | |||
237 | config SH_7619_SOLUTION_ENGINE | ||
238 | bool "SolutionEngine7619" | ||
239 | select CPU_SUBTYPE_SH7619 | ||
240 | help | ||
241 | Select 7619 SolutionEngine if configuring for a Hitachi SH7619 | ||
242 | evaluation board. | ||
243 | |||
222 | config SH_UNKNOWN | 244 | config SH_UNKNOWN |
223 | bool "BareCPU" | 245 | bool "BareCPU" |
224 | help | 246 | help |
@@ -280,12 +302,20 @@ config CF_BASE_ADDR | |||
280 | 302 | ||
281 | menu "Processor features" | 303 | menu "Processor features" |
282 | 304 | ||
283 | config CPU_LITTLE_ENDIAN | 305 | choice |
284 | bool "Little Endian" | 306 | prompt "Endianess selection" |
307 | default CPU_LITTLE_ENDIAN | ||
285 | help | 308 | help |
286 | Some SuperH machines can be configured for either little or big | 309 | Some SuperH machines can be configured for either little or big |
287 | endian byte order. These modes require different kernels. Say Y if | 310 | endian byte order. These modes require different kernels. |
288 | your machine is little endian, N if it's a big endian machine. | 311 | |
312 | config CPU_LITTLE_ENDIAN | ||
313 | bool "Little Endian" | ||
314 | |||
315 | config CPU_BIG_ENDIAN | ||
316 | bool "Big Endian" | ||
317 | |||
318 | endchoice | ||
289 | 319 | ||
290 | config SH_FPU | 320 | config SH_FPU |
291 | bool "FPU support" | 321 | bool "FPU support" |
@@ -345,6 +375,9 @@ config CPU_HAS_MASKREG_IRQ | |||
345 | config CPU_HAS_INTC2_IRQ | 375 | config CPU_HAS_INTC2_IRQ |
346 | bool | 376 | bool |
347 | 377 | ||
378 | config CPU_HAS_IPR_IRQ | ||
379 | bool | ||
380 | |||
348 | config CPU_HAS_SR_RB | 381 | config CPU_HAS_SR_RB |
349 | bool "CPU has SR.RB" | 382 | bool "CPU has SR.RB" |
350 | depends on CPU_SH3 || CPU_SH4 | 383 | depends on CPU_SH3 || CPU_SH4 |
@@ -357,6 +390,9 @@ config CPU_HAS_SR_RB | |||
357 | See <file:Documentation/sh/register-banks.txt> for further | 390 | See <file:Documentation/sh/register-banks.txt> for further |
358 | information on SR.RB and register banking in the kernel in general. | 391 | information on SR.RB and register banking in the kernel in general. |
359 | 392 | ||
393 | config CPU_HAS_PTEA | ||
394 | bool | ||
395 | |||
360 | endmenu | 396 | endmenu |
361 | 397 | ||
362 | menu "Timer support" | 398 | menu "Timer support" |
@@ -364,10 +400,25 @@ depends on !GENERIC_TIME | |||
364 | 400 | ||
365 | config SH_TMU | 401 | config SH_TMU |
366 | bool "TMU timer support" | 402 | bool "TMU timer support" |
403 | depends on CPU_SH3 || CPU_SH4 | ||
367 | default y | 404 | default y |
368 | help | 405 | help |
369 | This enables the use of the TMU as the system timer. | 406 | This enables the use of the TMU as the system timer. |
370 | 407 | ||
408 | config SH_CMT | ||
409 | bool "CMT timer support" | ||
410 | depends on CPU_SH2 | ||
411 | default y | ||
412 | help | ||
413 | This enables the use of the CMT as the system timer. | ||
414 | |||
415 | config SH_MTU2 | ||
416 | bool "MTU2 timer support" | ||
417 | depends on CPU_SH2A | ||
418 | default n | ||
419 | help | ||
420 | This enables the use of the MTU2 as the system timer. | ||
421 | |||
371 | endmenu | 422 | endmenu |
372 | 423 | ||
373 | source "arch/sh/boards/renesas/hs7751rvoip/Kconfig" | 424 | source "arch/sh/boards/renesas/hs7751rvoip/Kconfig" |
@@ -376,19 +427,52 @@ source "arch/sh/boards/renesas/rts7751r2d/Kconfig" | |||
376 | 427 | ||
377 | source "arch/sh/boards/renesas/r7780rp/Kconfig" | 428 | source "arch/sh/boards/renesas/r7780rp/Kconfig" |
378 | 429 | ||
430 | config SH_TIMER_IRQ | ||
431 | int | ||
432 | default "28" if CPU_SUBTYPE_SH7780 | ||
433 | default "86" if CPU_SUBTYPE_SH7619 | ||
434 | default "140" if CPU_SUBTYPE_SH7206 | ||
435 | default "16" | ||
436 | |||
437 | config NO_IDLE_HZ | ||
438 | bool "Dynamic tick timer" | ||
439 | help | ||
440 | Select this option if you want to disable continuous timer ticks | ||
441 | and have them programmed to occur as required. This option saves | ||
442 | power as the system can remain in idle state for longer. | ||
443 | |||
444 | By default dynamic tick is disabled during the boot, and can be | ||
445 | manually enabled with: | ||
446 | |||
447 | echo 1 > /sys/devices/system/timer/timer0/dyn_tick | ||
448 | |||
449 | Alternatively, if you want dynamic tick automatically enabled | ||
450 | during boot, pass "dyntick=enable" via the kernel command string. | ||
451 | |||
452 | Please note that dynamic tick may affect the accuracy of | ||
453 | timekeeping on some platforms depending on the implementation. | ||
454 | |||
379 | config SH_PCLK_FREQ | 455 | config SH_PCLK_FREQ |
380 | int "Peripheral clock frequency (in Hz)" | 456 | int "Peripheral clock frequency (in Hz)" |
457 | default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 | ||
458 | default "31250000" if CPU_SUBTYPE_SH7619 | ||
459 | default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \ | ||
460 | CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 || \ | ||
461 | CPU_SUBTYPE_SH7206 | ||
381 | default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780 | 462 | default "50000000" if CPU_SUBTYPE_SH7750 || CPU_SUBTYPE_SH7780 |
382 | default "60000000" if CPU_SUBTYPE_SH7751 | 463 | default "60000000" if CPU_SUBTYPE_SH7751 |
383 | default "33333333" if CPU_SUBTYPE_SH7300 || CPU_SUBTYPE_SH7770 || \ | ||
384 | CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7705 | ||
385 | default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 | ||
386 | default "66000000" if CPU_SUBTYPE_SH4_202 | 464 | default "66000000" if CPU_SUBTYPE_SH4_202 |
387 | help | 465 | help |
388 | This option is used to specify the peripheral clock frequency. | 466 | This option is used to specify the peripheral clock frequency. |
389 | This is necessary for determining the reference clock value on | 467 | This is necessary for determining the reference clock value on |
390 | platforms lacking an RTC. | 468 | platforms lacking an RTC. |
391 | 469 | ||
470 | config SH_CLK_MD | ||
471 | int "CPU Mode Pin Setting" | ||
472 | depends on CPU_SUBTYPE_SH7619 || CPU_SUBTYPE_SH7206 | ||
473 | help | ||
474 | MD2 - MD0 Setting. | ||
475 | |||
392 | menu "CPU Frequency scaling" | 476 | menu "CPU Frequency scaling" |
393 | 477 | ||
394 | source "drivers/cpufreq/Kconfig" | 478 | source "drivers/cpufreq/Kconfig" |
@@ -421,6 +505,8 @@ config HEARTBEAT | |||
421 | behavior is platform-dependent, but normally the flash frequency is | 505 | behavior is platform-dependent, but normally the flash frequency is |
422 | a hyperbolic function of the 5-minute load average. | 506 | a hyperbolic function of the 5-minute load average. |
423 | 507 | ||
508 | source "arch/sh/drivers/Kconfig" | ||
509 | |||
424 | endmenu | 510 | endmenu |
425 | 511 | ||
426 | config ISA_DMA_API | 512 | config ISA_DMA_API |
diff --git a/arch/sh/Kconfig.debug b/arch/sh/Kconfig.debug index 48479e014dac..66a25ef4ef1b 100644 --- a/arch/sh/Kconfig.debug +++ b/arch/sh/Kconfig.debug | |||
@@ -1,5 +1,9 @@ | |||
1 | menu "Kernel hacking" | 1 | menu "Kernel hacking" |
2 | 2 | ||
3 | config TRACE_IRQFLAGS_SUPPORT | ||
4 | bool | ||
5 | default y | ||
6 | |||
3 | source "lib/Kconfig.debug" | 7 | source "lib/Kconfig.debug" |
4 | 8 | ||
5 | config SH_STANDARD_BIOS | 9 | config SH_STANDARD_BIOS |
@@ -17,7 +21,18 @@ config SH_STANDARD_BIOS | |||
17 | 21 | ||
18 | config EARLY_SCIF_CONSOLE | 22 | config EARLY_SCIF_CONSOLE |
19 | bool "Use early SCIF console" | 23 | bool "Use early SCIF console" |
20 | depends on CPU_SH4 || CPU_SH2A && !SH_STANDARD_BIOS | 24 | help |
25 | This enables an early console using a fixed SCIF port. This can | ||
26 | be used by platforms that are either not running the SH | ||
27 | standard BIOS, or do not wish to use the BIOS callbacks for the | ||
28 | serial I/O. | ||
29 | |||
30 | config EARLY_SCIF_CONSOLE_PORT | ||
31 | hex "SCIF port for early console" | ||
32 | depends on EARLY_SCIF_CONSOLE | ||
33 | default "0xffe00000" if CPU_SUBTYPE_SH7780 | ||
34 | default "0xfffe9800" if CPU_SUBTYPE_SH72060 | ||
35 | default "0xffe80000" if CPU_SH4 | ||
21 | 36 | ||
22 | config EARLY_PRINTK | 37 | config EARLY_PRINTK |
23 | bool "Early printk support" | 38 | bool "Early printk support" |
@@ -30,6 +45,11 @@ config EARLY_PRINTK | |||
30 | when the kernel may crash or hang before the serial console is | 45 | when the kernel may crash or hang before the serial console is |
31 | initialised. If unsure, say N. | 46 | initialised. If unsure, say N. |
32 | 47 | ||
48 | On devices that are running SH-IPL and want to keep the port | ||
49 | initialization consistent while not using the BIOS callbacks, | ||
50 | select both the EARLY_SCIF_CONSOLE and SH_STANDARD_BIOS, using | ||
51 | the kernel command line option to toggle back and forth. | ||
52 | |||
33 | config DEBUG_STACKOVERFLOW | 53 | config DEBUG_STACKOVERFLOW |
34 | bool "Check for stack overflows" | 54 | bool "Check for stack overflows" |
35 | depends on DEBUG_KERNEL | 55 | depends on DEBUG_KERNEL |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 26d62ff51a64..d10bba5e1074 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -13,10 +13,6 @@ | |||
13 | # for "archclean" and "archdep" for cleaning up and making dependencies for | 13 | # for "archclean" and "archdep" for cleaning up and making dependencies for |
14 | # this architecture | 14 | # this architecture |
15 | # | 15 | # |
16 | |||
17 | cflags-y := -mb | ||
18 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) := -ml | ||
19 | |||
20 | isa-y := any | 16 | isa-y := any |
21 | isa-$(CONFIG_SH_DSP) := sh | 17 | isa-$(CONFIG_SH_DSP) := sh |
22 | isa-$(CONFIG_CPU_SH2) := sh2 | 18 | isa-$(CONFIG_CPU_SH2) := sh2 |
@@ -38,13 +34,16 @@ isa-y := $(isa-y)-nofpu | |||
38 | endif | 34 | endif |
39 | endif | 35 | endif |
40 | 36 | ||
41 | cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),) | 37 | cflags-$(CONFIG_CPU_SH2) := -m2 |
42 | 38 | cflags-$(CONFIG_CPU_SH3) := -m3 | |
43 | cflags-$(CONFIG_CPU_SH2) += -m2 | 39 | cflags-$(CONFIG_CPU_SH4) := -m4 \ |
44 | cflags-$(CONFIG_CPU_SH3) += -m3 | ||
45 | cflags-$(CONFIG_CPU_SH4) += -m4 \ | ||
46 | $(call cc-option,-mno-implicit-fp,-m4-nofpu) | 40 | $(call cc-option,-mno-implicit-fp,-m4-nofpu) |
47 | cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a-nofpu,) | 41 | cflags-$(CONFIG_CPU_SH4A) := -m4a $(call cc-option,-m4a-nofpu,) |
42 | |||
43 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mb | ||
44 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -ml | ||
45 | |||
46 | cflags-y += $(call as-option,-Wa$(comma)-isa=$(isa-y),) -ffreestanding | ||
48 | 47 | ||
49 | cflags-$(CONFIG_SH_DSP) += -Wa,-dsp | 48 | cflags-$(CONFIG_SH_DSP) += -Wa,-dsp |
50 | cflags-$(CONFIG_SH_KGDB) += -g | 49 | cflags-$(CONFIG_SH_KGDB) += -g |
@@ -59,7 +58,9 @@ OBJCOPYFLAGS := -O binary -R .note -R .comment -R .stab -R .stabstr -S | |||
59 | # never be used by anyone. Use a board-specific defconfig that has a | 58 | # never be used by anyone. Use a board-specific defconfig that has a |
60 | # reasonable chance of being current instead. | 59 | # reasonable chance of being current instead. |
61 | # | 60 | # |
62 | KBUILD_DEFCONFIG := rts7751r2d_defconfig | 61 | KBUILD_DEFCONFIG := r7780rp_defconfig |
62 | |||
63 | KBUILD_IMAGE := arch/sh/boot/zImage | ||
63 | 64 | ||
64 | # | 65 | # |
65 | # Choosing incompatible machines durings configuration will result in | 66 | # Choosing incompatible machines durings configuration will result in |
@@ -109,6 +110,8 @@ machdir-$(CONFIG_SH_SH4202_MICRODEV) := superh/microdev | |||
109 | machdir-$(CONFIG_SH_LANDISK) := landisk | 110 | machdir-$(CONFIG_SH_LANDISK) := landisk |
110 | machdir-$(CONFIG_SH_TITAN) := titan | 111 | machdir-$(CONFIG_SH_TITAN) := titan |
111 | machdir-$(CONFIG_SH_SHMIN) := shmin | 112 | machdir-$(CONFIG_SH_SHMIN) := shmin |
113 | machdir-$(CONFIG_SH_7206_SOLUTION_ENGINE) := se/7206 | ||
114 | machdir-$(CONFIG_SH_7619_SOLUTION_ENGINE) := se/7619 | ||
112 | machdir-$(CONFIG_SH_UNKNOWN) := unknown | 115 | machdir-$(CONFIG_SH_UNKNOWN) := unknown |
113 | 116 | ||
114 | incdir-y := $(notdir $(machdir-y)) | 117 | incdir-y := $(notdir $(machdir-y)) |
@@ -124,6 +127,7 @@ core-$(CONFIG_HD64465) += arch/sh/cchips/hd6446x/hd64465/ | |||
124 | core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/ | 127 | core-$(CONFIG_VOYAGERGX) += arch/sh/cchips/voyagergx/ |
125 | 128 | ||
126 | cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2 | 129 | cpuincdir-$(CONFIG_CPU_SH2) := cpu-sh2 |
130 | cpuincdir-$(CONFIG_CPU_SH2A) := cpu-sh2a | ||
127 | cpuincdir-$(CONFIG_CPU_SH3) := cpu-sh3 | 131 | cpuincdir-$(CONFIG_CPU_SH3) := cpu-sh3 |
128 | cpuincdir-$(CONFIG_CPU_SH4) := cpu-sh4 | 132 | cpuincdir-$(CONFIG_CPU_SH4) := cpu-sh4 |
129 | 133 | ||
diff --git a/arch/sh/boards/renesas/r7780rp/Makefile b/arch/sh/boards/renesas/r7780rp/Makefile index f1776d027978..574b0316ed56 100644 --- a/arch/sh/boards/renesas/r7780rp/Makefile +++ b/arch/sh/boards/renesas/r7780rp/Makefile | |||
@@ -3,4 +3,6 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := setup.o io.o irq.o | 5 | obj-y := setup.o io.o irq.o |
6 | obj-$(CONFIG_HEARTBEAT) += led.o | 6 | |
7 | obj-$(CONFIG_HEARTBEAT) += led.o | ||
8 | obj-$(CONFIG_PUSH_SWITCH) += psw.o | ||
diff --git a/arch/sh/boards/renesas/r7780rp/irq.c b/arch/sh/boards/renesas/r7780rp/irq.c index aa15ec5bc69e..cc381e197783 100644 --- a/arch/sh/boards/renesas/r7780rp/irq.c +++ b/arch/sh/boards/renesas/r7780rp/irq.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | 14 | #include <linux/io.h> |
14 | #include <asm/r7780rp.h> | 15 | #include <asm/r7780rp.h> |
15 | 16 | ||
diff --git a/arch/sh/boards/renesas/r7780rp/psw.c b/arch/sh/boards/renesas/r7780rp/psw.c new file mode 100644 index 000000000000..c844dfa5d58d --- /dev/null +++ b/arch/sh/boards/renesas/r7780rp/psw.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * arch/sh/boards/renesas/r7780rp/psw.c | ||
3 | * | ||
4 | * push switch support for RDBRP-1/RDBREVRP-1 debug boards. | ||
5 | * | ||
6 | * Copyright (C) 2006 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/io.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <asm/mach/r7780rp.h> | ||
17 | #include <asm/push-switch.h> | ||
18 | |||
19 | static irqreturn_t psw_irq_handler(int irq, void *arg) | ||
20 | { | ||
21 | struct platform_device *pdev = arg; | ||
22 | struct push_switch *psw = platform_get_drvdata(pdev); | ||
23 | struct push_switch_platform_info *psw_info = pdev->dev.platform_data; | ||
24 | unsigned int l, mask; | ||
25 | int ret = 0; | ||
26 | |||
27 | l = ctrl_inw(PA_DBSW); | ||
28 | |||
29 | /* Nothing to do if there's no state change */ | ||
30 | if (psw->state) { | ||
31 | ret = 1; | ||
32 | goto out; | ||
33 | } | ||
34 | |||
35 | mask = l & 0x70; | ||
36 | /* Figure out who raised it */ | ||
37 | if (mask & (1 << psw_info->bit)) { | ||
38 | psw->state = !!(mask & (1 << psw_info->bit)); | ||
39 | if (psw->state) /* debounce */ | ||
40 | mod_timer(&psw->debounce, jiffies + 50); | ||
41 | |||
42 | ret = 1; | ||
43 | } | ||
44 | |||
45 | out: | ||
46 | /* Clear the switch IRQs */ | ||
47 | l |= (0x7 << 12); | ||
48 | ctrl_outw(l, PA_DBSW); | ||
49 | |||
50 | return IRQ_RETVAL(ret); | ||
51 | } | ||
52 | |||
53 | static struct resource psw_resources[] = { | ||
54 | [0] = { | ||
55 | .start = IRQ_PSW, | ||
56 | .flags = IORESOURCE_IRQ, | ||
57 | }, | ||
58 | }; | ||
59 | |||
60 | static struct push_switch_platform_info s2_platform_data = { | ||
61 | .name = "s2", | ||
62 | .bit = 6, | ||
63 | .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | | ||
64 | IRQF_SHARED, | ||
65 | .irq_handler = psw_irq_handler, | ||
66 | }; | ||
67 | |||
68 | static struct platform_device s2_switch_device = { | ||
69 | .name = "push-switch", | ||
70 | .id = 0, | ||
71 | .num_resources = ARRAY_SIZE(psw_resources), | ||
72 | .resource = psw_resources, | ||
73 | .dev = { | ||
74 | .platform_data = &s2_platform_data, | ||
75 | }, | ||
76 | }; | ||
77 | |||
78 | static struct push_switch_platform_info s3_platform_data = { | ||
79 | .name = "s3", | ||
80 | .bit = 5, | ||
81 | .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | | ||
82 | IRQF_SHARED, | ||
83 | .irq_handler = psw_irq_handler, | ||
84 | }; | ||
85 | |||
86 | static struct platform_device s3_switch_device = { | ||
87 | .name = "push-switch", | ||
88 | .id = 1, | ||
89 | .num_resources = ARRAY_SIZE(psw_resources), | ||
90 | .resource = psw_resources, | ||
91 | .dev = { | ||
92 | .platform_data = &s3_platform_data, | ||
93 | }, | ||
94 | }; | ||
95 | |||
96 | static struct push_switch_platform_info s4_platform_data = { | ||
97 | .name = "s4", | ||
98 | .bit = 4, | ||
99 | .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | | ||
100 | IRQF_SHARED, | ||
101 | .irq_handler = psw_irq_handler, | ||
102 | }; | ||
103 | |||
104 | static struct platform_device s4_switch_device = { | ||
105 | .name = "push-switch", | ||
106 | .id = 2, | ||
107 | .num_resources = ARRAY_SIZE(psw_resources), | ||
108 | .resource = psw_resources, | ||
109 | .dev = { | ||
110 | .platform_data = &s4_platform_data, | ||
111 | }, | ||
112 | }; | ||
113 | |||
114 | static struct platform_device *psw_devices[] = { | ||
115 | &s2_switch_device, &s3_switch_device, &s4_switch_device, | ||
116 | }; | ||
117 | |||
118 | static int __init psw_init(void) | ||
119 | { | ||
120 | return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices)); | ||
121 | } | ||
122 | module_init(psw_init); | ||
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c index c331caeb694b..9f89c8de9db9 100644 --- a/arch/sh/boards/renesas/r7780rp/setup.c +++ b/arch/sh/boards/renesas/r7780rp/setup.c | |||
@@ -44,8 +44,37 @@ static struct platform_device m66596_usb_host_device = { | |||
44 | .resource = m66596_usb_host_resources, | 44 | .resource = m66596_usb_host_resources, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static struct resource cf_ide_resources[] = { | ||
48 | [0] = { | ||
49 | .start = 0x1f0, | ||
50 | .end = 0x1f0 + 8, | ||
51 | .flags = IORESOURCE_IO, | ||
52 | }, | ||
53 | [1] = { | ||
54 | .start = 0x1f0 + 0x206, | ||
55 | .end = 0x1f0 + 8 + 0x206 + 8, | ||
56 | .flags = IORESOURCE_IO, | ||
57 | }, | ||
58 | [2] = { | ||
59 | #ifdef CONFIG_SH_R7780MP | ||
60 | .start = 1, | ||
61 | #else | ||
62 | .start = 4, | ||
63 | #endif | ||
64 | .flags = IORESOURCE_IRQ, | ||
65 | }, | ||
66 | }; | ||
67 | |||
68 | static struct platform_device cf_ide_device = { | ||
69 | .name = "pata_platform", | ||
70 | .id = -1, | ||
71 | .num_resources = ARRAY_SIZE(cf_ide_resources), | ||
72 | .resource = cf_ide_resources, | ||
73 | }; | ||
74 | |||
47 | static struct platform_device *r7780rp_devices[] __initdata = { | 75 | static struct platform_device *r7780rp_devices[] __initdata = { |
48 | &m66596_usb_host_device, | 76 | &m66596_usb_host_device, |
77 | &cf_ide_device, | ||
49 | }; | 78 | }; |
50 | 79 | ||
51 | static int __init r7780rp_devices_setup(void) | 80 | static int __init r7780rp_devices_setup(void) |
diff --git a/arch/sh/boards/se/7206/Makefile b/arch/sh/boards/se/7206/Makefile new file mode 100644 index 000000000000..63950f4f2453 --- /dev/null +++ b/arch/sh/boards/se/7206/Makefile | |||
@@ -0,0 +1,7 @@ | |||
1 | # | ||
2 | # Makefile for the 7206 SolutionEngine specific parts of the kernel | ||
3 | # | ||
4 | |||
5 | obj-y := setup.o io.o irq.o | ||
6 | obj-$(CONFIG_HEARTBEAT) += led.o | ||
7 | |||
diff --git a/arch/sh/boards/se/7206/io.c b/arch/sh/boards/se/7206/io.c new file mode 100644 index 000000000000..b557273e0cbe --- /dev/null +++ b/arch/sh/boards/se/7206/io.c | |||
@@ -0,0 +1,123 @@ | |||
1 | /* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $ | ||
2 | * | ||
3 | * linux/arch/sh/boards/se/7206/io.c | ||
4 | * | ||
5 | * Copyright (C) 2006 Yoshinori Sato | ||
6 | * | ||
7 | * I/O routine for Hitachi 7206 SolutionEngine. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/se7206.h> | ||
15 | |||
16 | |||
17 | static inline void delay(void) | ||
18 | { | ||
19 | ctrl_inw(0x20000000); /* P2 ROM Area */ | ||
20 | } | ||
21 | |||
22 | /* MS7750 requires special versions of in*, out* routines, since | ||
23 | PC-like io ports are located at upper half byte of 16-bit word which | ||
24 | can be accessed only with 16-bit wide. */ | ||
25 | |||
26 | static inline volatile __u16 * | ||
27 | port2adr(unsigned int port) | ||
28 | { | ||
29 | if (port >= 0x2000) | ||
30 | return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000)); | ||
31 | else if (port >= 0x300 || port < 0x310) | ||
32 | return (volatile __u16 *) (PA_SMSC + (port - 0x300)); | ||
33 | } | ||
34 | |||
35 | unsigned char se7206_inb(unsigned long port) | ||
36 | { | ||
37 | return (*port2adr(port))&0xff; | ||
38 | } | ||
39 | |||
40 | unsigned char se7206_inb_p(unsigned long port) | ||
41 | { | ||
42 | unsigned long v; | ||
43 | |||
44 | v = (*port2adr(port))&0xff; | ||
45 | delay(); | ||
46 | return v; | ||
47 | } | ||
48 | |||
49 | unsigned short se7206_inw(unsigned long port) | ||
50 | { | ||
51 | return *port2adr(port);; | ||
52 | } | ||
53 | |||
54 | unsigned int se7206_inl(unsigned long port) | ||
55 | { | ||
56 | maybebadio(port); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | void se7206_outb(unsigned char value, unsigned long port) | ||
61 | { | ||
62 | *(port2adr(port)) = value; | ||
63 | } | ||
64 | |||
65 | void se7206_outb_p(unsigned char value, unsigned long port) | ||
66 | { | ||
67 | *(port2adr(port)) = value; | ||
68 | delay(); | ||
69 | } | ||
70 | |||
71 | void se7206_outw(unsigned short value, unsigned long port) | ||
72 | { | ||
73 | *port2adr(port) = value; | ||
74 | } | ||
75 | |||
76 | void se7206_outl(unsigned int value, unsigned long port) | ||
77 | { | ||
78 | maybebadio(port); | ||
79 | } | ||
80 | |||
81 | void se7206_insb(unsigned long port, void *addr, unsigned long count) | ||
82 | { | ||
83 | volatile __u16 *p = port2adr(port); | ||
84 | __u8 *ap = addr; | ||
85 | |||
86 | while (count--) | ||
87 | *ap++ = *p; | ||
88 | } | ||
89 | |||
90 | void se7206_insw(unsigned long port, void *addr, unsigned long count) | ||
91 | { | ||
92 | volatile __u16 *p = port2adr(port); | ||
93 | __u16 *ap = addr; | ||
94 | while (count--) | ||
95 | *ap++ = *p; | ||
96 | } | ||
97 | |||
98 | void se7206_insl(unsigned long port, void *addr, unsigned long count) | ||
99 | { | ||
100 | maybebadio(port); | ||
101 | } | ||
102 | |||
103 | void se7206_outsb(unsigned long port, const void *addr, unsigned long count) | ||
104 | { | ||
105 | volatile __u16 *p = port2adr(port); | ||
106 | const __u8 *ap = addr; | ||
107 | |||
108 | while (count--) | ||
109 | *p = *ap++; | ||
110 | } | ||
111 | |||
112 | void se7206_outsw(unsigned long port, const void *addr, unsigned long count) | ||
113 | { | ||
114 | volatile __u16 *p = port2adr(port); | ||
115 | const __u16 *ap = addr; | ||
116 | while (count--) | ||
117 | *p = *ap++; | ||
118 | } | ||
119 | |||
120 | void se7206_outsl(unsigned long port, const void *addr, unsigned long count) | ||
121 | { | ||
122 | maybebadio(port); | ||
123 | } | ||
diff --git a/arch/sh/boards/se/7206/irq.c b/arch/sh/boards/se/7206/irq.c new file mode 100644 index 000000000000..3fb0c5f5b23a --- /dev/null +++ b/arch/sh/boards/se/7206/irq.c | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * linux/arch/sh/boards/se/7206/irq.c | ||
3 | * | ||
4 | * Copyright (C) 2005,2006 Yoshinori Sato | ||
5 | * | ||
6 | * Hitachi SolutionEngine Support. | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <asm/se7206.h> | ||
14 | |||
15 | #define INTSTS0 0x31800000 | ||
16 | #define INTSTS1 0x31800002 | ||
17 | #define INTMSK0 0x31800004 | ||
18 | #define INTMSK1 0x31800006 | ||
19 | #define INTSEL 0x31800008 | ||
20 | |||
21 | static void disable_se7206_irq(unsigned int irq) | ||
22 | { | ||
23 | unsigned short val; | ||
24 | unsigned short mask = 0xffff ^ (0x0f << 4 * (3 - (IRQ0_IRQ - irq))); | ||
25 | unsigned short msk0,msk1; | ||
26 | |||
27 | /* Set the priority in IPR to 0 */ | ||
28 | val = ctrl_inw(INTC_IPR01); | ||
29 | val &= mask; | ||
30 | ctrl_outw(val, INTC_IPR01); | ||
31 | /* FPGA mask set */ | ||
32 | msk0 = ctrl_inw(INTMSK0); | ||
33 | msk1 = ctrl_inw(INTMSK1); | ||
34 | |||
35 | switch (irq) { | ||
36 | case IRQ0_IRQ: | ||
37 | msk0 |= 0x0010; | ||
38 | break; | ||
39 | case IRQ1_IRQ: | ||
40 | msk0 |= 0x000f; | ||
41 | break; | ||
42 | case IRQ2_IRQ: | ||
43 | msk0 |= 0x0f00; | ||
44 | msk1 |= 0x00ff; | ||
45 | break; | ||
46 | } | ||
47 | ctrl_outw(msk0, INTMSK0); | ||
48 | ctrl_outw(msk1, INTMSK1); | ||
49 | } | ||
50 | |||
51 | static void enable_se7206_irq(unsigned int irq) | ||
52 | { | ||
53 | unsigned short val; | ||
54 | unsigned short value = (0x0001 << 4 * (3 - (IRQ0_IRQ - irq))); | ||
55 | unsigned short msk0,msk1; | ||
56 | |||
57 | /* Set priority in IPR back to original value */ | ||
58 | val = ctrl_inw(INTC_IPR01); | ||
59 | val |= value; | ||
60 | ctrl_outw(val, INTC_IPR01); | ||
61 | |||
62 | /* FPGA mask reset */ | ||
63 | msk0 = ctrl_inw(INTMSK0); | ||
64 | msk1 = ctrl_inw(INTMSK1); | ||
65 | |||
66 | switch (irq) { | ||
67 | case IRQ0_IRQ: | ||
68 | msk0 &= ~0x0010; | ||
69 | break; | ||
70 | case IRQ1_IRQ: | ||
71 | msk0 &= ~0x000f; | ||
72 | break; | ||
73 | case IRQ2_IRQ: | ||
74 | msk0 &= ~0x0f00; | ||
75 | msk1 &= ~0x00ff; | ||
76 | break; | ||
77 | } | ||
78 | ctrl_outw(msk0, INTMSK0); | ||
79 | ctrl_outw(msk1, INTMSK1); | ||
80 | } | ||
81 | |||
82 | static void eoi_se7206_irq(unsigned int irq) | ||
83 | { | ||
84 | unsigned short sts0,sts1; | ||
85 | |||
86 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
87 | enable_se7206_irq(irq); | ||
88 | /* FPGA isr clear */ | ||
89 | sts0 = ctrl_inw(INTSTS0); | ||
90 | sts1 = ctrl_inw(INTSTS1); | ||
91 | |||
92 | switch (irq) { | ||
93 | case IRQ0_IRQ: | ||
94 | sts0 &= ~0x0010; | ||
95 | break; | ||
96 | case IRQ1_IRQ: | ||
97 | sts0 &= ~0x000f; | ||
98 | break; | ||
99 | case IRQ2_IRQ: | ||
100 | sts0 &= ~0x0f00; | ||
101 | sts1 &= ~0x00ff; | ||
102 | break; | ||
103 | } | ||
104 | ctrl_outw(sts0, INTSTS0); | ||
105 | ctrl_outw(sts1, INTSTS1); | ||
106 | } | ||
107 | |||
108 | static struct irq_chip se7206_irq_chip __read_mostly = { | ||
109 | .name = "SE7206-FPGA-IRQ", | ||
110 | .mask = disable_se7206_irq, | ||
111 | .unmask = enable_se7206_irq, | ||
112 | .mask_ack = disable_se7206_irq, | ||
113 | .eoi = eoi_se7206_irq, | ||
114 | }; | ||
115 | |||
116 | static void make_se7206_irq(unsigned int irq) | ||
117 | { | ||
118 | disable_irq_nosync(irq); | ||
119 | set_irq_chip_and_handler_name(irq, &se7206_irq_chip, | ||
120 | handle_level_irq, "level"); | ||
121 | disable_se7206_irq(irq); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Initialize IRQ setting | ||
126 | */ | ||
127 | void __init init_se7206_IRQ(void) | ||
128 | { | ||
129 | make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */ | ||
130 | make_se7206_irq(IRQ1_IRQ); /* ATA */ | ||
131 | make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ | ||
132 | ctrl_outw(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */ | ||
133 | |||
134 | /* FPGA System register setup*/ | ||
135 | ctrl_outw(0x0000,INTSTS0); /* Clear INTSTS0 */ | ||
136 | ctrl_outw(0x0000,INTSTS1); /* Clear INTSTS1 */ | ||
137 | /* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */ | ||
138 | ctrl_outw(0x0001,INTSEL); | ||
139 | } | ||
diff --git a/arch/sh/boards/se/7206/led.c b/arch/sh/boards/se/7206/led.c new file mode 100644 index 000000000000..ef794601ab86 --- /dev/null +++ b/arch/sh/boards/se/7206/led.c | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * linux/arch/sh/kernel/led_se.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Stuart Menefy <stuart.menefy@st.com> | ||
5 | * | ||
6 | * May be copied or modified under the terms of the GNU General Public | ||
7 | * License. See linux/COPYING for more information. | ||
8 | * | ||
9 | * This file contains Solution Engine specific LED code. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <asm/se7206.h> | ||
14 | |||
15 | #ifdef CONFIG_HEARTBEAT | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | |||
19 | /* Cycle the LED's in the clasic Knightrider/Sun pattern */ | ||
20 | void heartbeat_se(void) | ||
21 | { | ||
22 | static unsigned int cnt = 0, period = 0; | ||
23 | volatile unsigned short* p = (volatile unsigned short*)PA_LED; | ||
24 | static unsigned bit = 0, up = 1; | ||
25 | |||
26 | cnt += 1; | ||
27 | if (cnt < period) { | ||
28 | return; | ||
29 | } | ||
30 | |||
31 | cnt = 0; | ||
32 | |||
33 | /* Go through the points (roughly!): | ||
34 | * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110 | ||
35 | */ | ||
36 | period = 110 - ( (300<<FSHIFT)/ | ||
37 | ((avenrun[0]/5) + (3<<FSHIFT)) ); | ||
38 | |||
39 | if (up) { | ||
40 | if (bit == 7) { | ||
41 | bit--; | ||
42 | up=0; | ||
43 | } else { | ||
44 | bit ++; | ||
45 | } | ||
46 | } else { | ||
47 | if (bit == 0) { | ||
48 | bit++; | ||
49 | up=1; | ||
50 | } else { | ||
51 | bit--; | ||
52 | } | ||
53 | } | ||
54 | *p = 1<<(bit+8); | ||
55 | |||
56 | } | ||
57 | #endif /* CONFIG_HEARTBEAT */ | ||
diff --git a/arch/sh/boards/se/7206/setup.c b/arch/sh/boards/se/7206/setup.c new file mode 100644 index 000000000000..0f42e91a3238 --- /dev/null +++ b/arch/sh/boards/se/7206/setup.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * | ||
3 | * linux/arch/sh/boards/se/7206/setup.c | ||
4 | * | ||
5 | * Copyright (C) 2006 Yoshinori Sato | ||
6 | * | ||
7 | * Hitachi 7206 SolutionEngine Support. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <asm/se7206.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/machvec.h> | ||
16 | |||
17 | static struct resource smc91x_resources[] = { | ||
18 | [0] = { | ||
19 | .start = 0x300, | ||
20 | .end = 0x300 + 0x020 - 1, | ||
21 | .flags = IORESOURCE_MEM, | ||
22 | }, | ||
23 | [1] = { | ||
24 | .start = 64, | ||
25 | .end = 64, | ||
26 | .flags = IORESOURCE_IRQ, | ||
27 | }, | ||
28 | }; | ||
29 | |||
30 | static struct platform_device smc91x_device = { | ||
31 | .name = "smc91x", | ||
32 | .id = -1, | ||
33 | .num_resources = ARRAY_SIZE(smc91x_resources), | ||
34 | .resource = smc91x_resources, | ||
35 | }; | ||
36 | |||
37 | static int __init se7206_devices_setup(void) | ||
38 | { | ||
39 | return platform_device_register(&smc91x_device); | ||
40 | } | ||
41 | |||
42 | __initcall(se7206_devices_setup); | ||
43 | |||
44 | void heartbeat_se(void); | ||
45 | |||
46 | /* | ||
47 | * The Machine Vector | ||
48 | */ | ||
49 | |||
50 | struct sh_machine_vector mv_se __initmv = { | ||
51 | .mv_name = "SolutionEngine", | ||
52 | .mv_nr_irqs = 256, | ||
53 | .mv_inb = se7206_inb, | ||
54 | .mv_inw = se7206_inw, | ||
55 | .mv_inl = se7206_inl, | ||
56 | .mv_outb = se7206_outb, | ||
57 | .mv_outw = se7206_outw, | ||
58 | .mv_outl = se7206_outl, | ||
59 | |||
60 | .mv_inb_p = se7206_inb_p, | ||
61 | .mv_inw_p = se7206_inw, | ||
62 | .mv_inl_p = se7206_inl, | ||
63 | .mv_outb_p = se7206_outb_p, | ||
64 | .mv_outw_p = se7206_outw, | ||
65 | .mv_outl_p = se7206_outl, | ||
66 | |||
67 | .mv_insb = se7206_insb, | ||
68 | .mv_insw = se7206_insw, | ||
69 | .mv_insl = se7206_insl, | ||
70 | .mv_outsb = se7206_outsb, | ||
71 | .mv_outsw = se7206_outsw, | ||
72 | .mv_outsl = se7206_outsl, | ||
73 | |||
74 | .mv_init_irq = init_se7206_IRQ, | ||
75 | #ifdef CONFIG_HEARTBEAT | ||
76 | .mv_heartbeat = heartbeat_se, | ||
77 | #endif | ||
78 | }; | ||
79 | ALIAS_MV(se) | ||
diff --git a/arch/sh/boards/se/7619/Makefile b/arch/sh/boards/se/7619/Makefile new file mode 100644 index 000000000000..3666eca8a658 --- /dev/null +++ b/arch/sh/boards/se/7619/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the 7619 SolutionEngine specific parts of the kernel | ||
3 | # | ||
4 | |||
5 | obj-y := setup.o io.o | ||
diff --git a/arch/sh/boards/se/7619/io.c b/arch/sh/boards/se/7619/io.c new file mode 100644 index 000000000000..176f1f39cd9d --- /dev/null +++ b/arch/sh/boards/se/7619/io.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * | ||
3 | * linux/arch/sh/boards/se/7619/io.c | ||
4 | * | ||
5 | * Copyright (C) 2006 Yoshinori Sato | ||
6 | * | ||
7 | * I/O routine for Hitachi 7619 SolutionEngine. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/se7619.h> | ||
15 | #include <asm/irq.h> | ||
16 | |||
17 | /* FIXME: M3A-ZAB7 Compact Flash Slot support */ | ||
18 | |||
19 | static inline void delay(void) | ||
20 | { | ||
21 | ctrl_inw(0xa0000000); /* Uncached ROM area (P2) */ | ||
22 | } | ||
23 | |||
24 | #define badio(name,port) \ | ||
25 | printk("bad I/O operation (%s) for port 0x%lx at 0x%08x\n", \ | ||
26 | #name, (port), (__u32) __builtin_return_address(0)) | ||
27 | |||
28 | unsigned char se7619___inb(unsigned long port) | ||
29 | { | ||
30 | badio(inb, port); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | unsigned char se7619___inb_p(unsigned long port) | ||
35 | { | ||
36 | badio(inb_p, port); | ||
37 | delay(); | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | unsigned short se7619___inw(unsigned long port) | ||
42 | { | ||
43 | badio(inw, port); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | unsigned int se7619___inl(unsigned long port) | ||
48 | { | ||
49 | badio(inl, port); | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | void se7619___outb(unsigned char value, unsigned long port) | ||
54 | { | ||
55 | badio(outb, port); | ||
56 | } | ||
57 | |||
58 | void se7619___outb_p(unsigned char value, unsigned long port) | ||
59 | { | ||
60 | badio(outb_p, port); | ||
61 | delay(); | ||
62 | } | ||
63 | |||
64 | void se7619___outw(unsigned short value, unsigned long port) | ||
65 | { | ||
66 | badio(outw, port); | ||
67 | } | ||
68 | |||
69 | void se7619___outl(unsigned int value, unsigned long port) | ||
70 | { | ||
71 | badio(outl, port); | ||
72 | } | ||
73 | |||
74 | void se7619___insb(unsigned long port, void *addr, unsigned long count) | ||
75 | { | ||
76 | badio(inw, port); | ||
77 | } | ||
78 | |||
79 | void se7619___insw(unsigned long port, void *addr, unsigned long count) | ||
80 | { | ||
81 | badio(inw, port); | ||
82 | } | ||
83 | |||
84 | void se7619___insl(unsigned long port, void *addr, unsigned long count) | ||
85 | { | ||
86 | badio(insl, port); | ||
87 | } | ||
88 | |||
89 | void se7619___outsb(unsigned long port, const void *addr, unsigned long count) | ||
90 | { | ||
91 | badio(insl, port); | ||
92 | } | ||
93 | |||
94 | void se7619___outsw(unsigned long port, const void *addr, unsigned long count) | ||
95 | { | ||
96 | badio(insl, port); | ||
97 | } | ||
98 | |||
99 | void se7619___outsl(unsigned long port, const void *addr, unsigned long count) | ||
100 | { | ||
101 | badio(outsw, port); | ||
102 | } | ||
diff --git a/arch/sh/boards/se/7619/setup.c b/arch/sh/boards/se/7619/setup.c new file mode 100644 index 000000000000..e627b26de0d0 --- /dev/null +++ b/arch/sh/boards/se/7619/setup.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * arch/sh/boards/se/7619/setup.c | ||
3 | * | ||
4 | * Copyright (C) 2006 Yoshinori Sato | ||
5 | * | ||
6 | * Hitachi SH7619 SolutionEngine Support. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <asm/io.h> | ||
12 | #include <asm/se7619.h> | ||
13 | #include <asm/machvec.h> | ||
14 | |||
15 | /* | ||
16 | * The Machine Vector | ||
17 | */ | ||
18 | |||
19 | struct sh_machine_vector mv_se __initmv = { | ||
20 | .mv_name = "SolutionEngine", | ||
21 | .mv_nr_irqs = 108, | ||
22 | .mv_inb = se7619___inb, | ||
23 | .mv_inw = se7619___inw, | ||
24 | .mv_inl = se7619___inl, | ||
25 | .mv_outb = se7619___outb, | ||
26 | .mv_outw = se7619___outw, | ||
27 | .mv_outl = se7619___outl, | ||
28 | |||
29 | .mv_inb_p = se7619___inb_p, | ||
30 | .mv_inw_p = se7619___inw, | ||
31 | .mv_inl_p = se7619___inl, | ||
32 | .mv_outb_p = se7619___outb_p, | ||
33 | .mv_outw_p = se7619___outw, | ||
34 | .mv_outl_p = se7619___outl, | ||
35 | |||
36 | .mv_insb = se7619___insb, | ||
37 | .mv_insw = se7619___insw, | ||
38 | .mv_insl = se7619___insl, | ||
39 | .mv_outsb = se7619___outsb, | ||
40 | .mv_outsw = se7619___outsw, | ||
41 | .mv_outsl = se7619___outsl, | ||
42 | }; | ||
43 | ALIAS_MV(se) | ||
diff --git a/arch/sh/boards/titan/setup.c b/arch/sh/boards/titan/setup.c index a6046d93758b..6bcd939bfaed 100644 --- a/arch/sh/boards/titan/setup.c +++ b/arch/sh/boards/titan/setup.c | |||
@@ -1,26 +1,30 @@ | |||
1 | /* | 1 | /* |
2 | * Setup for Titan | 2 | * arch/sh/boards/titan/setup.c - Setup for Titan |
3 | * | ||
4 | * Copyright (C) 2006 Jamie Lenehan | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
3 | */ | 9 | */ |
4 | |||
5 | #include <linux/init.h> | 10 | #include <linux/init.h> |
6 | #include <asm/irq.h> | 11 | #include <linux/irq.h> |
7 | #include <asm/titan.h> | 12 | #include <asm/titan.h> |
8 | #include <asm/io.h> | 13 | #include <asm/io.h> |
9 | 14 | ||
10 | extern void __init pcibios_init_platform(void); | ||
11 | |||
12 | static struct ipr_data titan_ipr_map[] = { | 15 | static struct ipr_data titan_ipr_map[] = { |
13 | { TITAN_IRQ_WAN, IRL0_IPR_ADDR, IRL0_IPR_POS, IRL0_PRIORITY }, | 16 | /* IRQ, IPR idx, shift, prio */ |
14 | { TITAN_IRQ_LAN, IRL1_IPR_ADDR, IRL1_IPR_POS, IRL1_PRIORITY }, | 17 | { TITAN_IRQ_WAN, 3, 12, 8 }, /* eth0 (WAN) */ |
15 | { TITAN_IRQ_MPCIA, IRL2_IPR_ADDR, IRL2_IPR_POS, IRL2_PRIORITY }, | 18 | { TITAN_IRQ_LAN, 3, 8, 8 }, /* eth1 (LAN) */ |
16 | { TITAN_IRQ_USB, IRL3_IPR_ADDR, IRL3_IPR_POS, IRL3_PRIORITY }, | 19 | { TITAN_IRQ_MPCIA, 3, 4, 8 }, /* mPCI A (top) */ |
20 | { TITAN_IRQ_USB, 3, 0, 8 }, /* mPCI B (bottom), USB */ | ||
17 | }; | 21 | }; |
18 | 22 | ||
19 | static void __init init_titan_irq(void) | 23 | static void __init init_titan_irq(void) |
20 | { | 24 | { |
21 | /* enable individual interrupt mode for externals */ | 25 | /* enable individual interrupt mode for externals */ |
22 | ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); | 26 | ipr_irq_enable_irlm(); |
23 | 27 | /* register ipr irqs */ | |
24 | make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map)); | 28 | make_ipr_irq(titan_ipr_map, ARRAY_SIZE(titan_ipr_map)); |
25 | } | 29 | } |
26 | 30 | ||
@@ -47,6 +51,5 @@ struct sh_machine_vector mv_titan __initmv = { | |||
47 | .mv_ioport_map = titan_ioport_map, | 51 | .mv_ioport_map = titan_ioport_map, |
48 | 52 | ||
49 | .mv_init_irq = init_titan_irq, | 53 | .mv_init_irq = init_titan_irq, |
50 | .mv_init_pci = pcibios_init_platform, | ||
51 | }; | 54 | }; |
52 | ALIAS_MV(titan) | 55 | ALIAS_MV(titan) |
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index f2fed5ce5cc3..35452d85b7f7 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/addrspace.h> | ||
15 | #ifdef CONFIG_SH_STANDARD_BIOS | 16 | #ifdef CONFIG_SH_STANDARD_BIOS |
16 | #include <asm/sh_bios.h> | 17 | #include <asm/sh_bios.h> |
17 | #endif | 18 | #endif |
@@ -228,7 +229,7 @@ long* stack_start = &user_stack[STACK_SIZE]; | |||
228 | void decompress_kernel(void) | 229 | void decompress_kernel(void) |
229 | { | 230 | { |
230 | output_data = 0; | 231 | output_data = 0; |
231 | output_ptr = (unsigned long)&_text+0x20001000; | 232 | output_ptr = P2SEGADDR((unsigned long)&_text+0x1000); |
232 | free_mem_ptr = (unsigned long)&_end; | 233 | free_mem_ptr = (unsigned long)&_end; |
233 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | 234 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
234 | 235 | ||
diff --git a/arch/sh/configs/r7780rp_defconfig b/arch/sh/configs/r7780rp_defconfig index 34e2046c3213..2b75b4896ba5 100644 --- a/arch/sh/configs/r7780rp_defconfig +++ b/arch/sh/configs/r7780rp_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.19-rc3 | 3 | # Linux kernel version: 2.6.19 |
4 | # Tue Oct 31 12:32:06 2006 | 4 | # Wed Dec 6 11:59:38 2006 |
5 | # | 5 | # |
6 | CONFIG_SUPERH=y | 6 | CONFIG_SUPERH=y |
7 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | 7 | CONFIG_RWSEM_GENERIC_SPINLOCK=y |
@@ -11,6 +11,8 @@ CONFIG_GENERIC_HARDIRQS=y | |||
11 | CONFIG_GENERIC_IRQ_PROBE=y | 11 | CONFIG_GENERIC_IRQ_PROBE=y |
12 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 12 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
13 | # CONFIG_GENERIC_TIME is not set | 13 | # CONFIG_GENERIC_TIME is not set |
14 | CONFIG_STACKTRACE_SUPPORT=y | ||
15 | CONFIG_LOCKDEP_SUPPORT=y | ||
14 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | 16 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" |
15 | 17 | ||
16 | # | 18 | # |
@@ -37,6 +39,7 @@ CONFIG_BSD_PROCESS_ACCT=y | |||
37 | # CONFIG_AUDIT is not set | 39 | # CONFIG_AUDIT is not set |
38 | CONFIG_IKCONFIG=y | 40 | CONFIG_IKCONFIG=y |
39 | CONFIG_IKCONFIG_PROC=y | 41 | CONFIG_IKCONFIG_PROC=y |
42 | # CONFIG_SYSFS_DEPRECATED is not set | ||
40 | # CONFIG_RELAY is not set | 43 | # CONFIG_RELAY is not set |
41 | CONFIG_INITRAMFS_SOURCE="" | 44 | CONFIG_INITRAMFS_SOURCE="" |
42 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 45 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
@@ -118,6 +121,8 @@ CONFIG_SH_R7780RP=y | |||
118 | # CONFIG_SH_LANDISK is not set | 121 | # CONFIG_SH_LANDISK is not set |
119 | # CONFIG_SH_TITAN is not set | 122 | # CONFIG_SH_TITAN is not set |
120 | # CONFIG_SH_SHMIN is not set | 123 | # CONFIG_SH_SHMIN is not set |
124 | # CONFIG_SH_7206_SOLUTION_ENGINE is not set | ||
125 | # CONFIG_SH_7619_SOLUTION_ENGINE is not set | ||
121 | # CONFIG_SH_UNKNOWN is not set | 126 | # CONFIG_SH_UNKNOWN is not set |
122 | 127 | ||
123 | # | 128 | # |
@@ -130,6 +135,12 @@ CONFIG_CPU_SH4A=y | |||
130 | # SH-2 Processor Support | 135 | # SH-2 Processor Support |
131 | # | 136 | # |
132 | # CONFIG_CPU_SUBTYPE_SH7604 is not set | 137 | # CONFIG_CPU_SUBTYPE_SH7604 is not set |
138 | # CONFIG_CPU_SUBTYPE_SH7619 is not set | ||
139 | |||
140 | # | ||
141 | # SH-2A Processor Support | ||
142 | # | ||
143 | # CONFIG_CPU_SUBTYPE_SH7206 is not set | ||
133 | 144 | ||
134 | # | 145 | # |
135 | # SH-3 Processor Support | 146 | # SH-3 Processor Support |
@@ -165,6 +176,7 @@ CONFIG_CPU_SH4A=y | |||
165 | # | 176 | # |
166 | # CONFIG_CPU_SUBTYPE_SH7770 is not set | 177 | # CONFIG_CPU_SUBTYPE_SH7770 is not set |
167 | CONFIG_CPU_SUBTYPE_SH7780=y | 178 | CONFIG_CPU_SUBTYPE_SH7780=y |
179 | # CONFIG_CPU_SUBTYPE_SH7785 is not set | ||
168 | 180 | ||
169 | # | 181 | # |
170 | # SH4AL-DSP Processor Support | 182 | # SH4AL-DSP Processor Support |
@@ -181,8 +193,14 @@ CONFIG_MEMORY_START=0x08000000 | |||
181 | CONFIG_MEMORY_SIZE=0x08000000 | 193 | CONFIG_MEMORY_SIZE=0x08000000 |
182 | # CONFIG_32BIT is not set | 194 | # CONFIG_32BIT is not set |
183 | CONFIG_VSYSCALL=y | 195 | CONFIG_VSYSCALL=y |
196 | CONFIG_PAGE_SIZE_4KB=y | ||
197 | # CONFIG_PAGE_SIZE_8KB is not set | ||
198 | # CONFIG_PAGE_SIZE_64KB is not set | ||
184 | CONFIG_HUGETLB_PAGE_SIZE_64K=y | 199 | CONFIG_HUGETLB_PAGE_SIZE_64K=y |
200 | # CONFIG_HUGETLB_PAGE_SIZE_256K is not set | ||
185 | # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set | 201 | # CONFIG_HUGETLB_PAGE_SIZE_1MB is not set |
202 | # CONFIG_HUGETLB_PAGE_SIZE_4MB is not set | ||
203 | # CONFIG_HUGETLB_PAGE_SIZE_64MB is not set | ||
186 | CONFIG_SELECT_MEMORY_MODEL=y | 204 | CONFIG_SELECT_MEMORY_MODEL=y |
187 | CONFIG_FLATMEM_MANUAL=y | 205 | CONFIG_FLATMEM_MANUAL=y |
188 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 206 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
@@ -204,12 +222,14 @@ CONFIG_SPLIT_PTLOCK_CPUS=4 | |||
204 | # Processor features | 222 | # Processor features |
205 | # | 223 | # |
206 | CONFIG_CPU_LITTLE_ENDIAN=y | 224 | CONFIG_CPU_LITTLE_ENDIAN=y |
225 | # CONFIG_CPU_BIG_ENDIAN is not set | ||
207 | CONFIG_SH_FPU=y | 226 | CONFIG_SH_FPU=y |
208 | # CONFIG_SH_DSP is not set | 227 | # CONFIG_SH_DSP is not set |
209 | CONFIG_SH_STORE_QUEUES=y | 228 | CONFIG_SH_STORE_QUEUES=y |
210 | CONFIG_CPU_HAS_INTEVT=y | 229 | CONFIG_CPU_HAS_INTEVT=y |
211 | CONFIG_CPU_HAS_INTC2_IRQ=y | 230 | CONFIG_CPU_HAS_INTC2_IRQ=y |
212 | CONFIG_CPU_HAS_SR_RB=y | 231 | CONFIG_CPU_HAS_SR_RB=y |
232 | CONFIG_CPU_HAS_PTEA=y | ||
213 | 233 | ||
214 | # | 234 | # |
215 | # Timer support | 235 | # Timer support |
@@ -220,6 +240,8 @@ CONFIG_SH_TMU=y | |||
220 | # R7780RP options | 240 | # R7780RP options |
221 | # | 241 | # |
222 | CONFIG_SH_R7780MP=y | 242 | CONFIG_SH_R7780MP=y |
243 | CONFIG_SH_TIMER_IRQ=28 | ||
244 | CONFIG_NO_IDLE_HZ=y | ||
223 | CONFIG_SH_PCLK_FREQ=32000000 | 245 | CONFIG_SH_PCLK_FREQ=32000000 |
224 | 246 | ||
225 | # | 247 | # |
@@ -238,13 +260,18 @@ CONFIG_SH_PCLK_FREQ=32000000 | |||
238 | # CONFIG_HD6446X_SERIES is not set | 260 | # CONFIG_HD6446X_SERIES is not set |
239 | 261 | ||
240 | # | 262 | # |
263 | # Additional SuperH Device Drivers | ||
264 | # | ||
265 | CONFIG_PUSH_SWITCH=y | ||
266 | |||
267 | # | ||
241 | # Kernel features | 268 | # Kernel features |
242 | # | 269 | # |
243 | # CONFIG_HZ_100 is not set | 270 | # CONFIG_HZ_100 is not set |
244 | CONFIG_HZ_250=y | 271 | CONFIG_HZ_250=y |
245 | # CONFIG_HZ_1000 is not set | 272 | # CONFIG_HZ_1000 is not set |
246 | CONFIG_HZ=250 | 273 | CONFIG_HZ=250 |
247 | # CONFIG_KEXEC is not set | 274 | CONFIG_KEXEC=y |
248 | # CONFIG_SMP is not set | 275 | # CONFIG_SMP is not set |
249 | # CONFIG_PREEMPT_NONE is not set | 276 | # CONFIG_PREEMPT_NONE is not set |
250 | # CONFIG_PREEMPT_VOLUNTARY is not set | 277 | # CONFIG_PREEMPT_VOLUNTARY is not set |
@@ -278,10 +305,7 @@ CONFIG_PCI_AUTO_UPDATE_RESOURCES=y | |||
278 | # | 305 | # |
279 | # PCI Hotplug Support | 306 | # PCI Hotplug Support |
280 | # | 307 | # |
281 | CONFIG_HOTPLUG_PCI=y | 308 | # CONFIG_HOTPLUG_PCI is not set |
282 | # CONFIG_HOTPLUG_PCI_FAKE is not set | ||
283 | # CONFIG_HOTPLUG_PCI_CPCI is not set | ||
284 | # CONFIG_HOTPLUG_PCI_SHPC is not set | ||
285 | 309 | ||
286 | # | 310 | # |
287 | # Executable file formats | 311 | # Executable file formats |
@@ -341,6 +365,7 @@ CONFIG_INET_TCP_DIAG=y | |||
341 | # CONFIG_TCP_CONG_ADVANCED is not set | 365 | # CONFIG_TCP_CONG_ADVANCED is not set |
342 | CONFIG_TCP_CONG_CUBIC=y | 366 | CONFIG_TCP_CONG_CUBIC=y |
343 | CONFIG_DEFAULT_TCP_CONG="cubic" | 367 | CONFIG_DEFAULT_TCP_CONG="cubic" |
368 | # CONFIG_TCP_MD5SIG is not set | ||
344 | # CONFIG_IPV6 is not set | 369 | # CONFIG_IPV6 is not set |
345 | # CONFIG_INET6_XFRM_TUNNEL is not set | 370 | # CONFIG_INET6_XFRM_TUNNEL is not set |
346 | # CONFIG_INET6_TUNNEL is not set | 371 | # CONFIG_INET6_TUNNEL is not set |
@@ -556,6 +581,7 @@ CONFIG_SATA_SIL=y | |||
556 | # CONFIG_PATA_IT821X is not set | 581 | # CONFIG_PATA_IT821X is not set |
557 | # CONFIG_PATA_JMICRON is not set | 582 | # CONFIG_PATA_JMICRON is not set |
558 | # CONFIG_PATA_TRIFLEX is not set | 583 | # CONFIG_PATA_TRIFLEX is not set |
584 | # CONFIG_PATA_MARVELL is not set | ||
559 | # CONFIG_PATA_MPIIX is not set | 585 | # CONFIG_PATA_MPIIX is not set |
560 | # CONFIG_PATA_OLDPIIX is not set | 586 | # CONFIG_PATA_OLDPIIX is not set |
561 | # CONFIG_PATA_NETCELL is not set | 587 | # CONFIG_PATA_NETCELL is not set |
@@ -572,6 +598,7 @@ CONFIG_SATA_SIL=y | |||
572 | # CONFIG_PATA_SIS is not set | 598 | # CONFIG_PATA_SIS is not set |
573 | # CONFIG_PATA_VIA is not set | 599 | # CONFIG_PATA_VIA is not set |
574 | # CONFIG_PATA_WINBOND is not set | 600 | # CONFIG_PATA_WINBOND is not set |
601 | CONFIG_PATA_PLATFORM=y | ||
575 | 602 | ||
576 | # | 603 | # |
577 | # Multi-device support (RAID and LVM) | 604 | # Multi-device support (RAID and LVM) |
@@ -688,6 +715,7 @@ CONFIG_R8169=y | |||
688 | # CONFIG_IXGB is not set | 715 | # CONFIG_IXGB is not set |
689 | # CONFIG_S2IO is not set | 716 | # CONFIG_S2IO is not set |
690 | # CONFIG_MYRI10GE is not set | 717 | # CONFIG_MYRI10GE is not set |
718 | # CONFIG_NETXEN_NIC is not set | ||
691 | 719 | ||
692 | # | 720 | # |
693 | # Token Ring devices | 721 | # Token Ring devices |
@@ -830,10 +858,6 @@ CONFIG_HW_RANDOM=y | |||
830 | # CONFIG_DTLK is not set | 858 | # CONFIG_DTLK is not set |
831 | # CONFIG_R3964 is not set | 859 | # CONFIG_R3964 is not set |
832 | # CONFIG_APPLICOM is not set | 860 | # CONFIG_APPLICOM is not set |
833 | |||
834 | # | ||
835 | # Ftape, the floppy tape device driver | ||
836 | # | ||
837 | # CONFIG_DRM is not set | 861 | # CONFIG_DRM is not set |
838 | # CONFIG_RAW_DRIVER is not set | 862 | # CONFIG_RAW_DRIVER is not set |
839 | 863 | ||
@@ -1020,7 +1044,7 @@ CONFIG_INOTIFY_USER=y | |||
1020 | CONFIG_DNOTIFY=y | 1044 | CONFIG_DNOTIFY=y |
1021 | # CONFIG_AUTOFS_FS is not set | 1045 | # CONFIG_AUTOFS_FS is not set |
1022 | # CONFIG_AUTOFS4_FS is not set | 1046 | # CONFIG_AUTOFS4_FS is not set |
1023 | # CONFIG_FUSE_FS is not set | 1047 | CONFIG_FUSE_FS=m |
1024 | 1048 | ||
1025 | # | 1049 | # |
1026 | # CD-ROM/DVD Filesystems | 1050 | # CD-ROM/DVD Filesystems |
@@ -1052,7 +1076,7 @@ CONFIG_TMPFS=y | |||
1052 | CONFIG_HUGETLBFS=y | 1076 | CONFIG_HUGETLBFS=y |
1053 | CONFIG_HUGETLB_PAGE=y | 1077 | CONFIG_HUGETLB_PAGE=y |
1054 | CONFIG_RAMFS=y | 1078 | CONFIG_RAMFS=y |
1055 | # CONFIG_CONFIGFS_FS is not set | 1079 | CONFIG_CONFIGFS_FS=m |
1056 | 1080 | ||
1057 | # | 1081 | # |
1058 | # Miscellaneous filesystems | 1082 | # Miscellaneous filesystems |
@@ -1153,28 +1177,33 @@ CONFIG_NLS_ISO8859_1=y | |||
1153 | # | 1177 | # |
1154 | # Profiling support | 1178 | # Profiling support |
1155 | # | 1179 | # |
1156 | # CONFIG_PROFILING is not set | 1180 | CONFIG_PROFILING=y |
1181 | CONFIG_OPROFILE=m | ||
1157 | 1182 | ||
1158 | # | 1183 | # |
1159 | # Kernel hacking | 1184 | # Kernel hacking |
1160 | # | 1185 | # |
1161 | # CONFIG_PRINTK_TIME is not set | 1186 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y |
1187 | CONFIG_PRINTK_TIME=y | ||
1162 | CONFIG_ENABLE_MUST_CHECK=y | 1188 | CONFIG_ENABLE_MUST_CHECK=y |
1163 | # CONFIG_MAGIC_SYSRQ is not set | 1189 | CONFIG_MAGIC_SYSRQ=y |
1164 | # CONFIG_UNUSED_SYMBOLS is not set | 1190 | # CONFIG_UNUSED_SYMBOLS is not set |
1165 | CONFIG_DEBUG_KERNEL=y | 1191 | CONFIG_DEBUG_KERNEL=y |
1166 | CONFIG_LOG_BUF_SHIFT=14 | 1192 | CONFIG_LOG_BUF_SHIFT=14 |
1167 | CONFIG_DETECT_SOFTLOCKUP=y | 1193 | CONFIG_DETECT_SOFTLOCKUP=y |
1168 | # CONFIG_SCHEDSTATS is not set | 1194 | # CONFIG_SCHEDSTATS is not set |
1169 | # CONFIG_DEBUG_SLAB is not set | 1195 | # CONFIG_DEBUG_SLAB is not set |
1170 | CONFIG_DEBUG_SPINLOCK=y | 1196 | # CONFIG_DEBUG_PREEMPT is not set |
1197 | # CONFIG_DEBUG_SPINLOCK is not set | ||
1171 | # CONFIG_DEBUG_MUTEXES is not set | 1198 | # CONFIG_DEBUG_MUTEXES is not set |
1172 | # CONFIG_DEBUG_RWSEMS is not set | 1199 | # CONFIG_DEBUG_RWSEMS is not set |
1200 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
1201 | # CONFIG_PROVE_LOCKING is not set | ||
1173 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1202 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
1174 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | 1203 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set |
1175 | # CONFIG_DEBUG_KOBJECT is not set | 1204 | # CONFIG_DEBUG_KOBJECT is not set |
1176 | # CONFIG_DEBUG_BUGVERBOSE is not set | 1205 | CONFIG_DEBUG_BUGVERBOSE=y |
1177 | # CONFIG_DEBUG_INFO is not set | 1206 | CONFIG_DEBUG_INFO=y |
1178 | CONFIG_DEBUG_FS=y | 1207 | CONFIG_DEBUG_FS=y |
1179 | # CONFIG_DEBUG_VM is not set | 1208 | # CONFIG_DEBUG_VM is not set |
1180 | # CONFIG_DEBUG_LIST is not set | 1209 | # CONFIG_DEBUG_LIST is not set |
@@ -1184,7 +1213,7 @@ CONFIG_FORCED_INLINING=y | |||
1184 | # CONFIG_RCU_TORTURE_TEST is not set | 1213 | # CONFIG_RCU_TORTURE_TEST is not set |
1185 | # CONFIG_SH_STANDARD_BIOS is not set | 1214 | # CONFIG_SH_STANDARD_BIOS is not set |
1186 | # CONFIG_EARLY_SCIF_CONSOLE is not set | 1215 | # CONFIG_EARLY_SCIF_CONSOLE is not set |
1187 | # CONFIG_DEBUG_STACKOVERFLOW is not set | 1216 | CONFIG_DEBUG_STACKOVERFLOW=y |
1188 | # CONFIG_DEBUG_STACK_USAGE is not set | 1217 | # CONFIG_DEBUG_STACK_USAGE is not set |
1189 | # CONFIG_4KSTACKS is not set | 1218 | # CONFIG_4KSTACKS is not set |
1190 | # CONFIG_KGDB is not set | 1219 | # CONFIG_KGDB is not set |
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig new file mode 100644 index 000000000000..36cec0b6e7c1 --- /dev/null +++ b/arch/sh/configs/se7206_defconfig | |||
@@ -0,0 +1,826 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.19-rc4 | ||
4 | # Sun Nov 5 16:20:10 2006 | ||
5 | # | ||
6 | CONFIG_SUPERH=y | ||
7 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
8 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
9 | CONFIG_GENERIC_HWEIGHT=y | ||
10 | CONFIG_GENERIC_HARDIRQS=y | ||
11 | CONFIG_GENERIC_IRQ_PROBE=y | ||
12 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
13 | # CONFIG_GENERIC_TIME is not set | ||
14 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
15 | |||
16 | # | ||
17 | # Code maturity level options | ||
18 | # | ||
19 | CONFIG_EXPERIMENTAL=y | ||
20 | CONFIG_BROKEN_ON_SMP=y | ||
21 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
22 | |||
23 | # | ||
24 | # General setup | ||
25 | # | ||
26 | CONFIG_LOCALVERSION="" | ||
27 | # CONFIG_LOCALVERSION_AUTO is not set | ||
28 | # CONFIG_SYSVIPC is not set | ||
29 | # CONFIG_POSIX_MQUEUE is not set | ||
30 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
31 | # CONFIG_TASKSTATS is not set | ||
32 | # CONFIG_UTS_NS is not set | ||
33 | # CONFIG_AUDIT is not set | ||
34 | # CONFIG_IKCONFIG is not set | ||
35 | # CONFIG_RELAY is not set | ||
36 | CONFIG_INITRAMFS_SOURCE="" | ||
37 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
38 | CONFIG_SYSCTL=y | ||
39 | CONFIG_EMBEDDED=y | ||
40 | CONFIG_UID16=y | ||
41 | # CONFIG_SYSCTL_SYSCALL is not set | ||
42 | CONFIG_KALLSYMS=y | ||
43 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
44 | # CONFIG_HOTPLUG is not set | ||
45 | CONFIG_PRINTK=y | ||
46 | CONFIG_BUG=y | ||
47 | CONFIG_ELF_CORE=y | ||
48 | CONFIG_BASE_FULL=y | ||
49 | # CONFIG_FUTEX is not set | ||
50 | # CONFIG_EPOLL is not set | ||
51 | CONFIG_SLAB=y | ||
52 | CONFIG_VM_EVENT_COUNTERS=y | ||
53 | CONFIG_TINY_SHMEM=y | ||
54 | CONFIG_BASE_SMALL=0 | ||
55 | # CONFIG_SLOB is not set | ||
56 | |||
57 | # | ||
58 | # Loadable module support | ||
59 | # | ||
60 | # CONFIG_MODULES is not set | ||
61 | |||
62 | # | ||
63 | # Block layer | ||
64 | # | ||
65 | CONFIG_BLOCK=y | ||
66 | # CONFIG_LBD is not set | ||
67 | # CONFIG_LSF is not set | ||
68 | |||
69 | # | ||
70 | # IO Schedulers | ||
71 | # | ||
72 | CONFIG_IOSCHED_NOOP=y | ||
73 | # CONFIG_IOSCHED_AS is not set | ||
74 | # CONFIG_IOSCHED_DEADLINE is not set | ||
75 | # CONFIG_IOSCHED_CFQ is not set | ||
76 | # CONFIG_DEFAULT_AS is not set | ||
77 | # CONFIG_DEFAULT_DEADLINE is not set | ||
78 | # CONFIG_DEFAULT_CFQ is not set | ||
79 | CONFIG_DEFAULT_NOOP=y | ||
80 | CONFIG_DEFAULT_IOSCHED="noop" | ||
81 | |||
82 | # | ||
83 | # System type | ||
84 | # | ||
85 | # CONFIG_SH_SOLUTION_ENGINE is not set | ||
86 | # CONFIG_SH_7751_SOLUTION_ENGINE is not set | ||
87 | # CONFIG_SH_7300_SOLUTION_ENGINE is not set | ||
88 | # CONFIG_SH_7343_SOLUTION_ENGINE is not set | ||
89 | # CONFIG_SH_73180_SOLUTION_ENGINE is not set | ||
90 | # CONFIG_SH_7751_SYSTEMH is not set | ||
91 | # CONFIG_SH_HP6XX is not set | ||
92 | # CONFIG_SH_EC3104 is not set | ||
93 | # CONFIG_SH_SATURN is not set | ||
94 | # CONFIG_SH_DREAMCAST is not set | ||
95 | # CONFIG_SH_BIGSUR is not set | ||
96 | # CONFIG_SH_MPC1211 is not set | ||
97 | # CONFIG_SH_SH03 is not set | ||
98 | # CONFIG_SH_SECUREEDGE5410 is not set | ||
99 | # CONFIG_SH_HS7751RVOIP is not set | ||
100 | # CONFIG_SH_7710VOIPGW is not set | ||
101 | # CONFIG_SH_RTS7751R2D is not set | ||
102 | # CONFIG_SH_R7780RP is not set | ||
103 | # CONFIG_SH_EDOSK7705 is not set | ||
104 | # CONFIG_SH_SH4202_MICRODEV is not set | ||
105 | # CONFIG_SH_LANDISK is not set | ||
106 | # CONFIG_SH_TITAN is not set | ||
107 | # CONFIG_SH_SHMIN is not set | ||
108 | CONFIG_SH_7206_SOLUTION_ENGINE=y | ||
109 | # CONFIG_SH_7619_SOLUTION_ENGINE is not set | ||
110 | # CONFIG_SH_UNKNOWN is not set | ||
111 | |||
112 | # | ||
113 | # Processor selection | ||
114 | # | ||
115 | CONFIG_CPU_SH2=y | ||
116 | CONFIG_CPU_SH2A=y | ||
117 | |||
118 | # | ||
119 | # SH-2 Processor Support | ||
120 | # | ||
121 | # CONFIG_CPU_SUBTYPE_SH7604 is not set | ||
122 | # CONFIG_CPU_SUBTYPE_SH7619 is not set | ||
123 | |||
124 | # | ||
125 | # SH-2A Processor Support | ||
126 | # | ||
127 | CONFIG_CPU_SUBTYPE_SH7206=y | ||
128 | |||
129 | # | ||
130 | # SH-3 Processor Support | ||
131 | # | ||
132 | # CONFIG_CPU_SUBTYPE_SH7300 is not set | ||
133 | # CONFIG_CPU_SUBTYPE_SH7705 is not set | ||
134 | # CONFIG_CPU_SUBTYPE_SH7706 is not set | ||
135 | # CONFIG_CPU_SUBTYPE_SH7707 is not set | ||
136 | # CONFIG_CPU_SUBTYPE_SH7708 is not set | ||
137 | # CONFIG_CPU_SUBTYPE_SH7709 is not set | ||
138 | # CONFIG_CPU_SUBTYPE_SH7710 is not set | ||
139 | |||
140 | # | ||
141 | # SH-4 Processor Support | ||
142 | # | ||
143 | # CONFIG_CPU_SUBTYPE_SH7750 is not set | ||
144 | # CONFIG_CPU_SUBTYPE_SH7091 is not set | ||
145 | # CONFIG_CPU_SUBTYPE_SH7750R is not set | ||
146 | # CONFIG_CPU_SUBTYPE_SH7750S is not set | ||
147 | # CONFIG_CPU_SUBTYPE_SH7751 is not set | ||
148 | # CONFIG_CPU_SUBTYPE_SH7751R is not set | ||
149 | # CONFIG_CPU_SUBTYPE_SH7760 is not set | ||
150 | # CONFIG_CPU_SUBTYPE_SH4_202 is not set | ||
151 | |||
152 | # | ||
153 | # ST40 Processor Support | ||
154 | # | ||
155 | # CONFIG_CPU_SUBTYPE_ST40STB1 is not set | ||
156 | # CONFIG_CPU_SUBTYPE_ST40GX1 is not set | ||
157 | |||
158 | # | ||
159 | # SH-4A Processor Support | ||
160 | # | ||
161 | # CONFIG_CPU_SUBTYPE_SH7770 is not set | ||
162 | # CONFIG_CPU_SUBTYPE_SH7780 is not set | ||
163 | |||
164 | # | ||
165 | # SH4AL-DSP Processor Support | ||
166 | # | ||
167 | # CONFIG_CPU_SUBTYPE_SH73180 is not set | ||
168 | # CONFIG_CPU_SUBTYPE_SH7343 is not set | ||
169 | |||
170 | # | ||
171 | # Memory management options | ||
172 | # | ||
173 | CONFIG_PAGE_OFFSET=0x00000000 | ||
174 | CONFIG_MEMORY_START=0x0c000000 | ||
175 | CONFIG_MEMORY_SIZE=0x02000000 | ||
176 | CONFIG_SELECT_MEMORY_MODEL=y | ||
177 | CONFIG_FLATMEM_MANUAL=y | ||
178 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
179 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
180 | CONFIG_FLATMEM=y | ||
181 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
182 | # CONFIG_SPARSEMEM_STATIC is not set | ||
183 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
184 | # CONFIG_RESOURCES_64BIT is not set | ||
185 | |||
186 | # | ||
187 | # Cache configuration | ||
188 | # | ||
189 | # CONFIG_SH_DIRECT_MAPPED is not set | ||
190 | # CONFIG_SH_WRITETHROUGH is not set | ||
191 | # CONFIG_SH_OCRAM is not set | ||
192 | |||
193 | # | ||
194 | # Processor features | ||
195 | # | ||
196 | # CONFIG_CPU_LITTLE_ENDIAN is not set | ||
197 | # CONFIG_SH_FPU is not set | ||
198 | # CONFIG_SH_FPU_EMU is not set | ||
199 | # CONFIG_SH_DSP is not set | ||
200 | |||
201 | # | ||
202 | # Timer support | ||
203 | # | ||
204 | CONFIG_SH_CMT=y | ||
205 | # CONFIG_SH_MTU2 is not set | ||
206 | CONFIG_SH_PCLK_FREQ=33333333 | ||
207 | CONFIG_SH_CLK_MD=6 | ||
208 | |||
209 | # | ||
210 | # CPU Frequency scaling | ||
211 | # | ||
212 | # CONFIG_CPU_FREQ is not set | ||
213 | |||
214 | # | ||
215 | # DMA support | ||
216 | # | ||
217 | # CONFIG_SH_DMA is not set | ||
218 | |||
219 | # | ||
220 | # Companion Chips | ||
221 | # | ||
222 | # CONFIG_HD6446X_SERIES is not set | ||
223 | |||
224 | # | ||
225 | # Kernel features | ||
226 | # | ||
227 | CONFIG_HZ_100=y | ||
228 | # CONFIG_HZ_250 is not set | ||
229 | # CONFIG_HZ_1000 is not set | ||
230 | CONFIG_HZ=100 | ||
231 | # CONFIG_KEXEC is not set | ||
232 | # CONFIG_SMP is not set | ||
233 | CONFIG_PREEMPT_NONE=y | ||
234 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
235 | # CONFIG_PREEMPT is not set | ||
236 | |||
237 | # | ||
238 | # Boot options | ||
239 | # | ||
240 | CONFIG_ZERO_PAGE_OFFSET=0x00001000 | ||
241 | CONFIG_BOOT_LINK_OFFSET=0x00800000 | ||
242 | # CONFIG_UBC_WAKEUP is not set | ||
243 | # CONFIG_CMDLINE_BOOL is not set | ||
244 | |||
245 | # | ||
246 | # Bus options | ||
247 | # | ||
248 | # CONFIG_PCI is not set | ||
249 | |||
250 | # | ||
251 | # PCCARD (PCMCIA/CardBus) support | ||
252 | # | ||
253 | |||
254 | # | ||
255 | # PCI Hotplug Support | ||
256 | # | ||
257 | |||
258 | # | ||
259 | # Executable file formats | ||
260 | # | ||
261 | CONFIG_BINFMT_FLAT=y | ||
262 | CONFIG_BINFMT_ZFLAT=y | ||
263 | # CONFIG_BINFMT_SHARED_FLAT is not set | ||
264 | # CONFIG_BINFMT_MISC is not set | ||
265 | |||
266 | # | ||
267 | # Power management options (EXPERIMENTAL) | ||
268 | # | ||
269 | # CONFIG_PM is not set | ||
270 | |||
271 | # | ||
272 | # Networking | ||
273 | # | ||
274 | CONFIG_NET=y | ||
275 | |||
276 | # | ||
277 | # Networking options | ||
278 | # | ||
279 | # CONFIG_NETDEBUG is not set | ||
280 | # CONFIG_PACKET is not set | ||
281 | # CONFIG_UNIX is not set | ||
282 | CONFIG_XFRM=y | ||
283 | # CONFIG_XFRM_USER is not set | ||
284 | # CONFIG_XFRM_SUB_POLICY is not set | ||
285 | # CONFIG_NET_KEY is not set | ||
286 | CONFIG_INET=y | ||
287 | # CONFIG_IP_MULTICAST is not set | ||
288 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
289 | CONFIG_IP_FIB_HASH=y | ||
290 | # CONFIG_IP_PNP is not set | ||
291 | # CONFIG_NET_IPIP is not set | ||
292 | # CONFIG_NET_IPGRE is not set | ||
293 | # CONFIG_ARPD is not set | ||
294 | # CONFIG_SYN_COOKIES is not set | ||
295 | # CONFIG_INET_AH is not set | ||
296 | # CONFIG_INET_ESP is not set | ||
297 | # CONFIG_INET_IPCOMP is not set | ||
298 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
299 | # CONFIG_INET_TUNNEL is not set | ||
300 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
301 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
302 | CONFIG_INET_XFRM_MODE_BEET=y | ||
303 | # CONFIG_INET_DIAG is not set | ||
304 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
305 | CONFIG_TCP_CONG_CUBIC=y | ||
306 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
307 | # CONFIG_IPV6 is not set | ||
308 | # CONFIG_INET6_XFRM_TUNNEL is not set | ||
309 | # CONFIG_INET6_TUNNEL is not set | ||
310 | # CONFIG_NETWORK_SECMARK is not set | ||
311 | # CONFIG_NETFILTER is not set | ||
312 | |||
313 | # | ||
314 | # DCCP Configuration (EXPERIMENTAL) | ||
315 | # | ||
316 | # CONFIG_IP_DCCP is not set | ||
317 | |||
318 | # | ||
319 | # SCTP Configuration (EXPERIMENTAL) | ||
320 | # | ||
321 | # CONFIG_IP_SCTP is not set | ||
322 | |||
323 | # | ||
324 | # TIPC Configuration (EXPERIMENTAL) | ||
325 | # | ||
326 | # CONFIG_TIPC is not set | ||
327 | # CONFIG_ATM is not set | ||
328 | # CONFIG_BRIDGE is not set | ||
329 | # CONFIG_VLAN_8021Q is not set | ||
330 | # CONFIG_DECNET is not set | ||
331 | # CONFIG_LLC2 is not set | ||
332 | # CONFIG_IPX is not set | ||
333 | # CONFIG_ATALK is not set | ||
334 | # CONFIG_X25 is not set | ||
335 | # CONFIG_LAPB is not set | ||
336 | # CONFIG_ECONET is not set | ||
337 | # CONFIG_WAN_ROUTER is not set | ||
338 | |||
339 | # | ||
340 | # QoS and/or fair queueing | ||
341 | # | ||
342 | # CONFIG_NET_SCHED is not set | ||
343 | |||
344 | # | ||
345 | # Network testing | ||
346 | # | ||
347 | # CONFIG_NET_PKTGEN is not set | ||
348 | # CONFIG_HAMRADIO is not set | ||
349 | # CONFIG_IRDA is not set | ||
350 | # CONFIG_BT is not set | ||
351 | # CONFIG_IEEE80211 is not set | ||
352 | |||
353 | # | ||
354 | # Device Drivers | ||
355 | # | ||
356 | |||
357 | # | ||
358 | # Generic Driver Options | ||
359 | # | ||
360 | # CONFIG_STANDALONE is not set | ||
361 | # CONFIG_PREVENT_FIRMWARE_BUILD is not set | ||
362 | # CONFIG_SYS_HYPERVISOR is not set | ||
363 | |||
364 | # | ||
365 | # Connector - unified userspace <-> kernelspace linker | ||
366 | # | ||
367 | # CONFIG_CONNECTOR is not set | ||
368 | |||
369 | # | ||
370 | # Memory Technology Devices (MTD) | ||
371 | # | ||
372 | CONFIG_MTD=y | ||
373 | # CONFIG_MTD_DEBUG is not set | ||
374 | # CONFIG_MTD_CONCAT is not set | ||
375 | CONFIG_MTD_PARTITIONS=y | ||
376 | CONFIG_MTD_REDBOOT_PARTS=y | ||
377 | CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 | ||
378 | # CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED is not set | ||
379 | # CONFIG_MTD_REDBOOT_PARTS_READONLY is not set | ||
380 | # CONFIG_MTD_CMDLINE_PARTS is not set | ||
381 | |||
382 | # | ||
383 | # User Modules And Translation Layers | ||
384 | # | ||
385 | CONFIG_MTD_CHAR=y | ||
386 | CONFIG_MTD_BLOCK=y | ||
387 | # CONFIG_FTL is not set | ||
388 | # CONFIG_NFTL is not set | ||
389 | # CONFIG_INFTL is not set | ||
390 | # CONFIG_RFD_FTL is not set | ||
391 | # CONFIG_SSFDC is not set | ||
392 | |||
393 | # | ||
394 | # RAM/ROM/Flash chip drivers | ||
395 | # | ||
396 | CONFIG_MTD_CFI=y | ||
397 | # CONFIG_MTD_JEDECPROBE is not set | ||
398 | CONFIG_MTD_GEN_PROBE=y | ||
399 | # CONFIG_MTD_CFI_ADV_OPTIONS is not set | ||
400 | CONFIG_MTD_MAP_BANK_WIDTH_1=y | ||
401 | CONFIG_MTD_MAP_BANK_WIDTH_2=y | ||
402 | CONFIG_MTD_MAP_BANK_WIDTH_4=y | ||
403 | # CONFIG_MTD_MAP_BANK_WIDTH_8 is not set | ||
404 | # CONFIG_MTD_MAP_BANK_WIDTH_16 is not set | ||
405 | # CONFIG_MTD_MAP_BANK_WIDTH_32 is not set | ||
406 | CONFIG_MTD_CFI_I1=y | ||
407 | CONFIG_MTD_CFI_I2=y | ||
408 | # CONFIG_MTD_CFI_I4 is not set | ||
409 | # CONFIG_MTD_CFI_I8 is not set | ||
410 | # CONFIG_MTD_CFI_INTELEXT is not set | ||
411 | CONFIG_MTD_CFI_AMDSTD=y | ||
412 | # CONFIG_MTD_CFI_STAA is not set | ||
413 | CONFIG_MTD_CFI_UTIL=y | ||
414 | # CONFIG_MTD_RAM is not set | ||
415 | # CONFIG_MTD_ROM is not set | ||
416 | # CONFIG_MTD_ABSENT is not set | ||
417 | # CONFIG_MTD_OBSOLETE_CHIPS is not set | ||
418 | |||
419 | # | ||
420 | # Mapping drivers for chip access | ||
421 | # | ||
422 | # CONFIG_MTD_COMPLEX_MAPPINGS is not set | ||
423 | CONFIG_MTD_PHYSMAP=y | ||
424 | CONFIG_MTD_PHYSMAP_START=0x20000000 | ||
425 | CONFIG_MTD_PHYSMAP_LEN=0x1000000 | ||
426 | CONFIG_MTD_PHYSMAP_BANKWIDTH=4 | ||
427 | # CONFIG_MTD_SOLUTIONENGINE is not set | ||
428 | # CONFIG_MTD_UCLINUX is not set | ||
429 | # CONFIG_MTD_PLATRAM is not set | ||
430 | |||
431 | # | ||
432 | # Self-contained MTD device drivers | ||
433 | # | ||
434 | # CONFIG_MTD_SLRAM is not set | ||
435 | # CONFIG_MTD_PHRAM is not set | ||
436 | # CONFIG_MTD_MTDRAM is not set | ||
437 | # CONFIG_MTD_BLOCK2MTD is not set | ||
438 | |||
439 | # | ||
440 | # Disk-On-Chip Device Drivers | ||
441 | # | ||
442 | # CONFIG_MTD_DOC2000 is not set | ||
443 | # CONFIG_MTD_DOC2001 is not set | ||
444 | # CONFIG_MTD_DOC2001PLUS is not set | ||
445 | |||
446 | # | ||
447 | # NAND Flash Device Drivers | ||
448 | # | ||
449 | # CONFIG_MTD_NAND is not set | ||
450 | |||
451 | # | ||
452 | # OneNAND Flash Device Drivers | ||
453 | # | ||
454 | # CONFIG_MTD_ONENAND is not set | ||
455 | |||
456 | # | ||
457 | # Parallel port support | ||
458 | # | ||
459 | # CONFIG_PARPORT is not set | ||
460 | |||
461 | # | ||
462 | # Plug and Play support | ||
463 | # | ||
464 | |||
465 | # | ||
466 | # Block devices | ||
467 | # | ||
468 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
469 | # CONFIG_BLK_DEV_LOOP is not set | ||
470 | # CONFIG_BLK_DEV_NBD is not set | ||
471 | CONFIG_BLK_DEV_RAM=y | ||
472 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
473 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
474 | CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 | ||
475 | # CONFIG_BLK_DEV_INITRD is not set | ||
476 | # CONFIG_CDROM_PKTCDVD is not set | ||
477 | # CONFIG_ATA_OVER_ETH is not set | ||
478 | |||
479 | # | ||
480 | # Misc devices | ||
481 | # | ||
482 | # CONFIG_TIFM_CORE is not set | ||
483 | |||
484 | # | ||
485 | # ATA/ATAPI/MFM/RLL support | ||
486 | # | ||
487 | # CONFIG_IDE is not set | ||
488 | |||
489 | # | ||
490 | # SCSI device support | ||
491 | # | ||
492 | # CONFIG_RAID_ATTRS is not set | ||
493 | # CONFIG_SCSI is not set | ||
494 | # CONFIG_SCSI_NETLINK is not set | ||
495 | |||
496 | # | ||
497 | # Serial ATA (prod) and Parallel ATA (experimental) drivers | ||
498 | # | ||
499 | # CONFIG_ATA is not set | ||
500 | |||
501 | # | ||
502 | # Multi-device support (RAID and LVM) | ||
503 | # | ||
504 | # CONFIG_MD is not set | ||
505 | |||
506 | # | ||
507 | # Fusion MPT device support | ||
508 | # | ||
509 | # CONFIG_FUSION is not set | ||
510 | |||
511 | # | ||
512 | # IEEE 1394 (FireWire) support | ||
513 | # | ||
514 | |||
515 | # | ||
516 | # I2O device support | ||
517 | # | ||
518 | |||
519 | # | ||
520 | # Network device support | ||
521 | # | ||
522 | # CONFIG_NETDEVICES is not set | ||
523 | # CONFIG_NETPOLL is not set | ||
524 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
525 | |||
526 | # | ||
527 | # ISDN subsystem | ||
528 | # | ||
529 | # CONFIG_ISDN is not set | ||
530 | |||
531 | # | ||
532 | # Telephony Support | ||
533 | # | ||
534 | # CONFIG_PHONE is not set | ||
535 | |||
536 | # | ||
537 | # Input device support | ||
538 | # | ||
539 | # CONFIG_INPUT is not set | ||
540 | |||
541 | # | ||
542 | # Hardware I/O ports | ||
543 | # | ||
544 | # CONFIG_SERIO is not set | ||
545 | # CONFIG_GAMEPORT is not set | ||
546 | |||
547 | # | ||
548 | # Character devices | ||
549 | # | ||
550 | # CONFIG_VT is not set | ||
551 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
552 | |||
553 | # | ||
554 | # Serial drivers | ||
555 | # | ||
556 | # CONFIG_SERIAL_8250 is not set | ||
557 | |||
558 | # | ||
559 | # Non-8250 serial port support | ||
560 | # | ||
561 | CONFIG_SERIAL_SH_SCI=y | ||
562 | CONFIG_SERIAL_SH_SCI_NR_UARTS=4 | ||
563 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | ||
564 | CONFIG_SERIAL_CORE=y | ||
565 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
566 | # CONFIG_UNIX98_PTYS is not set | ||
567 | CONFIG_LEGACY_PTYS=y | ||
568 | CONFIG_LEGACY_PTY_COUNT=256 | ||
569 | |||
570 | # | ||
571 | # IPMI | ||
572 | # | ||
573 | # CONFIG_IPMI_HANDLER is not set | ||
574 | |||
575 | # | ||
576 | # Watchdog Cards | ||
577 | # | ||
578 | # CONFIG_WATCHDOG is not set | ||
579 | CONFIG_HW_RANDOM=y | ||
580 | # CONFIG_GEN_RTC is not set | ||
581 | # CONFIG_DTLK is not set | ||
582 | # CONFIG_R3964 is not set | ||
583 | |||
584 | # | ||
585 | # Ftape, the floppy tape device driver | ||
586 | # | ||
587 | # CONFIG_RAW_DRIVER is not set | ||
588 | |||
589 | # | ||
590 | # TPM devices | ||
591 | # | ||
592 | # CONFIG_TCG_TPM is not set | ||
593 | |||
594 | # | ||
595 | # I2C support | ||
596 | # | ||
597 | # CONFIG_I2C is not set | ||
598 | |||
599 | # | ||
600 | # SPI support | ||
601 | # | ||
602 | # CONFIG_SPI is not set | ||
603 | # CONFIG_SPI_MASTER is not set | ||
604 | |||
605 | # | ||
606 | # Dallas's 1-wire bus | ||
607 | # | ||
608 | # CONFIG_W1 is not set | ||
609 | |||
610 | # | ||
611 | # Hardware Monitoring support | ||
612 | # | ||
613 | CONFIG_HWMON=y | ||
614 | # CONFIG_HWMON_VID is not set | ||
615 | # CONFIG_SENSORS_ABITUGURU is not set | ||
616 | # CONFIG_SENSORS_F71805F is not set | ||
617 | # CONFIG_SENSORS_VT1211 is not set | ||
618 | # CONFIG_HWMON_DEBUG_CHIP is not set | ||
619 | |||
620 | # | ||
621 | # Multimedia devices | ||
622 | # | ||
623 | # CONFIG_VIDEO_DEV is not set | ||
624 | |||
625 | # | ||
626 | # Digital Video Broadcasting Devices | ||
627 | # | ||
628 | # CONFIG_DVB is not set | ||
629 | |||
630 | # | ||
631 | # Graphics support | ||
632 | # | ||
633 | CONFIG_FIRMWARE_EDID=y | ||
634 | # CONFIG_FB is not set | ||
635 | |||
636 | # | ||
637 | # Sound | ||
638 | # | ||
639 | # CONFIG_SOUND is not set | ||
640 | |||
641 | # | ||
642 | # USB support | ||
643 | # | ||
644 | # CONFIG_USB_ARCH_HAS_HCD is not set | ||
645 | # CONFIG_USB_ARCH_HAS_OHCI is not set | ||
646 | # CONFIG_USB_ARCH_HAS_EHCI is not set | ||
647 | |||
648 | # | ||
649 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | ||
650 | # | ||
651 | |||
652 | # | ||
653 | # USB Gadget Support | ||
654 | # | ||
655 | # CONFIG_USB_GADGET is not set | ||
656 | |||
657 | # | ||
658 | # MMC/SD Card support | ||
659 | # | ||
660 | # CONFIG_MMC is not set | ||
661 | |||
662 | # | ||
663 | # LED devices | ||
664 | # | ||
665 | # CONFIG_NEW_LEDS is not set | ||
666 | |||
667 | # | ||
668 | # LED drivers | ||
669 | # | ||
670 | |||
671 | # | ||
672 | # LED Triggers | ||
673 | # | ||
674 | |||
675 | # | ||
676 | # InfiniBand support | ||
677 | # | ||
678 | |||
679 | # | ||
680 | # EDAC - error detection and reporting (RAS) (EXPERIMENTAL) | ||
681 | # | ||
682 | |||
683 | # | ||
684 | # Real Time Clock | ||
685 | # | ||
686 | # CONFIG_RTC_CLASS is not set | ||
687 | |||
688 | # | ||
689 | # DMA Engine support | ||
690 | # | ||
691 | # CONFIG_DMA_ENGINE is not set | ||
692 | |||
693 | # | ||
694 | # DMA Clients | ||
695 | # | ||
696 | |||
697 | # | ||
698 | # DMA Devices | ||
699 | # | ||
700 | |||
701 | # | ||
702 | # File systems | ||
703 | # | ||
704 | CONFIG_EXT2_FS=y | ||
705 | # CONFIG_EXT2_FS_XATTR is not set | ||
706 | # CONFIG_EXT3_FS is not set | ||
707 | # CONFIG_EXT4DEV_FS is not set | ||
708 | # CONFIG_REISERFS_FS is not set | ||
709 | # CONFIG_JFS_FS is not set | ||
710 | # CONFIG_FS_POSIX_ACL is not set | ||
711 | # CONFIG_XFS_FS is not set | ||
712 | # CONFIG_GFS2_FS is not set | ||
713 | # CONFIG_MINIX_FS is not set | ||
714 | CONFIG_ROMFS_FS=y | ||
715 | # CONFIG_INOTIFY is not set | ||
716 | # CONFIG_QUOTA is not set | ||
717 | # CONFIG_DNOTIFY is not set | ||
718 | # CONFIG_AUTOFS_FS is not set | ||
719 | # CONFIG_AUTOFS4_FS is not set | ||
720 | # CONFIG_FUSE_FS is not set | ||
721 | |||
722 | # | ||
723 | # CD-ROM/DVD Filesystems | ||
724 | # | ||
725 | # CONFIG_ISO9660_FS is not set | ||
726 | # CONFIG_UDF_FS is not set | ||
727 | |||
728 | # | ||
729 | # DOS/FAT/NT Filesystems | ||
730 | # | ||
731 | # CONFIG_MSDOS_FS is not set | ||
732 | # CONFIG_VFAT_FS is not set | ||
733 | # CONFIG_NTFS_FS is not set | ||
734 | |||
735 | # | ||
736 | # Pseudo filesystems | ||
737 | # | ||
738 | CONFIG_PROC_FS=y | ||
739 | CONFIG_PROC_SYSCTL=y | ||
740 | # CONFIG_SYSFS is not set | ||
741 | # CONFIG_TMPFS is not set | ||
742 | # CONFIG_HUGETLBFS is not set | ||
743 | # CONFIG_HUGETLB_PAGE is not set | ||
744 | CONFIG_RAMFS=y | ||
745 | |||
746 | # | ||
747 | # Miscellaneous filesystems | ||
748 | # | ||
749 | # CONFIG_ADFS_FS is not set | ||
750 | # CONFIG_AFFS_FS is not set | ||
751 | # CONFIG_HFS_FS is not set | ||
752 | # CONFIG_HFSPLUS_FS is not set | ||
753 | # CONFIG_BEFS_FS is not set | ||
754 | # CONFIG_BFS_FS is not set | ||
755 | # CONFIG_EFS_FS is not set | ||
756 | # CONFIG_JFFS_FS is not set | ||
757 | # CONFIG_JFFS2_FS is not set | ||
758 | CONFIG_CRAMFS=y | ||
759 | # CONFIG_VXFS_FS is not set | ||
760 | # CONFIG_HPFS_FS is not set | ||
761 | # CONFIG_QNX4FS_FS is not set | ||
762 | # CONFIG_SYSV_FS is not set | ||
763 | # CONFIG_UFS_FS is not set | ||
764 | |||
765 | # | ||
766 | # Network File Systems | ||
767 | # | ||
768 | # CONFIG_NFS_FS is not set | ||
769 | # CONFIG_NFSD is not set | ||
770 | # CONFIG_SMB_FS is not set | ||
771 | # CONFIG_CIFS is not set | ||
772 | # CONFIG_NCP_FS is not set | ||
773 | # CONFIG_CODA_FS is not set | ||
774 | # CONFIG_AFS_FS is not set | ||
775 | # CONFIG_9P_FS is not set | ||
776 | |||
777 | # | ||
778 | # Partition Types | ||
779 | # | ||
780 | # CONFIG_PARTITION_ADVANCED is not set | ||
781 | CONFIG_MSDOS_PARTITION=y | ||
782 | |||
783 | # | ||
784 | # Native Language Support | ||
785 | # | ||
786 | # CONFIG_NLS is not set | ||
787 | |||
788 | # | ||
789 | # Profiling support | ||
790 | # | ||
791 | # CONFIG_PROFILING is not set | ||
792 | |||
793 | # | ||
794 | # Kernel hacking | ||
795 | # | ||
796 | # CONFIG_PRINTK_TIME is not set | ||
797 | CONFIG_ENABLE_MUST_CHECK=y | ||
798 | # CONFIG_MAGIC_SYSRQ is not set | ||
799 | # CONFIG_UNUSED_SYMBOLS is not set | ||
800 | # CONFIG_DEBUG_KERNEL is not set | ||
801 | CONFIG_LOG_BUF_SHIFT=14 | ||
802 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
803 | # CONFIG_UNWIND_INFO is not set | ||
804 | # CONFIG_HEADERS_CHECK is not set | ||
805 | # CONFIG_SH_STANDARD_BIOS is not set | ||
806 | # CONFIG_EARLY_SCIF_CONSOLE is not set | ||
807 | # CONFIG_KGDB is not set | ||
808 | |||
809 | # | ||
810 | # Security options | ||
811 | # | ||
812 | # CONFIG_KEYS is not set | ||
813 | |||
814 | # | ||
815 | # Cryptographic options | ||
816 | # | ||
817 | # CONFIG_CRYPTO is not set | ||
818 | |||
819 | # | ||
820 | # Library routines | ||
821 | # | ||
822 | CONFIG_CRC_CCITT=y | ||
823 | # CONFIG_CRC16 is not set | ||
824 | CONFIG_CRC32=y | ||
825 | # CONFIG_LIBCRC32C is not set | ||
826 | CONFIG_ZLIB_INFLATE=y | ||
diff --git a/arch/sh/drivers/Kconfig b/arch/sh/drivers/Kconfig new file mode 100644 index 000000000000..c54c758e6243 --- /dev/null +++ b/arch/sh/drivers/Kconfig | |||
@@ -0,0 +1,9 @@ | |||
1 | menu "Additional SuperH Device Drivers" | ||
2 | |||
3 | config PUSH_SWITCH | ||
4 | tristate "Push switch support" | ||
5 | help | ||
6 | This enables support for the push switch framework, a simple | ||
7 | framework that allows for sysfs driven switch status reporting. | ||
8 | |||
9 | endmenu | ||
diff --git a/arch/sh/drivers/Makefile b/arch/sh/drivers/Makefile index 338c3729d270..bf18dbfb6787 100644 --- a/arch/sh/drivers/Makefile +++ b/arch/sh/drivers/Makefile | |||
@@ -5,4 +5,4 @@ | |||
5 | obj-$(CONFIG_PCI) += pci/ | 5 | obj-$(CONFIG_PCI) += pci/ |
6 | obj-$(CONFIG_SH_DMA) += dma/ | 6 | obj-$(CONFIG_SH_DMA) += dma/ |
7 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ | 7 | obj-$(CONFIG_SUPERHYWAY) += superhyway/ |
8 | 8 | obj-$(CONFIG_PUSH_SWITCH) += push-switch.o | |
diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index 065d4c90970e..db1295d32268 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # Makefile for the SuperH DMA specific kernel interface routines under Linux. | 2 | # Makefile for the SuperH DMA specific kernel interface routines under Linux. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += dma-api.o dma-isa.o | 5 | obj-y += dma-api.o |
6 | obj-$(CONFIG_ISA_DMA_API) += dma-isa.o | ||
6 | obj-$(CONFIG_SYSFS) += dma-sysfs.o | 7 | obj-$(CONFIG_SYSFS) += dma-sysfs.o |
7 | obj-$(CONFIG_SH_DMA) += dma-sh.o | 8 | obj-$(CONFIG_SH_DMA) += dma-sh.o |
8 | obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o | 9 | obj-$(CONFIG_SH_DREAMCAST) += dma-pvr2.o dma-g2.o |
9 | |||
diff --git a/arch/sh/drivers/dma/dma-api.c b/arch/sh/drivers/dma/dma-api.c index 47c3e837599b..e062067edd24 100644 --- a/arch/sh/drivers/dma/dma-api.c +++ b/arch/sh/drivers/dma/dma-api.c | |||
@@ -11,61 +11,27 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
16 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
17 | #include <linux/list.h> | 16 | #include <linux/list.h> |
18 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/mm.h> | ||
19 | #include <asm/dma.h> | 19 | #include <asm/dma.h> |
20 | 20 | ||
21 | DEFINE_SPINLOCK(dma_spin_lock); | 21 | DEFINE_SPINLOCK(dma_spin_lock); |
22 | static LIST_HEAD(registered_dmac_list); | 22 | static LIST_HEAD(registered_dmac_list); |
23 | 23 | ||
24 | /* | ||
25 | * A brief note about the reasons for this API as it stands. | ||
26 | * | ||
27 | * For starters, the old ISA DMA API didn't work for us for a number of | ||
28 | * reasons, for one, the vast majority of channels on the SH DMAC are | ||
29 | * dual-address mode only, and both the new and the old DMA APIs are after the | ||
30 | * concept of managing a DMA buffer, which doesn't overly fit this model very | ||
31 | * well. In addition to which, the new API is largely geared at IOMMUs and | ||
32 | * GARTs, and doesn't even support the channel notion very well. | ||
33 | * | ||
34 | * The other thing that's a marginal issue, is the sheer number of random DMA | ||
35 | * engines that are present (ie, in boards like the Dreamcast), some of which | ||
36 | * cascade off of the SH DMAC, and others do not. As such, there was a real | ||
37 | * need for a scalable subsystem that could deal with both single and | ||
38 | * dual-address mode usage, in addition to interoperating with cascaded DMACs. | ||
39 | * | ||
40 | * There really isn't any reason why this needs to be SH specific, though I'm | ||
41 | * not aware of too many other processors (with the exception of some MIPS) | ||
42 | * that have the same concept of a dual address mode, or any real desire to | ||
43 | * actually make use of the DMAC even if such a subsystem were exposed | ||
44 | * elsewhere. | ||
45 | * | ||
46 | * The idea for this was derived from the ARM port, which acted as an excellent | ||
47 | * reference when trying to address these issues. | ||
48 | * | ||
49 | * It should also be noted that the decision to add Yet Another DMA API(tm) to | ||
50 | * the kernel wasn't made easily, and was only decided upon after conferring | ||
51 | * with jejb with regards to the state of the old and new APIs as they applied | ||
52 | * to these circumstances. Philip Blundell was also a great help in figuring | ||
53 | * out some single-address mode DMA semantics that were otherwise rather | ||
54 | * confusing. | ||
55 | */ | ||
56 | |||
57 | struct dma_info *get_dma_info(unsigned int chan) | 24 | struct dma_info *get_dma_info(unsigned int chan) |
58 | { | 25 | { |
59 | struct dma_info *info; | 26 | struct dma_info *info; |
60 | unsigned int total = 0; | ||
61 | 27 | ||
62 | /* | 28 | /* |
63 | * Look for each DMAC's range to determine who the owner of | 29 | * Look for each DMAC's range to determine who the owner of |
64 | * the channel is. | 30 | * the channel is. |
65 | */ | 31 | */ |
66 | list_for_each_entry(info, ®istered_dmac_list, list) { | 32 | list_for_each_entry(info, ®istered_dmac_list, list) { |
67 | total += info->nr_channels; | 33 | if ((chan < info->first_channel_nr) || |
68 | if (chan > total) | 34 | (chan >= info->first_channel_nr + info->nr_channels)) |
69 | continue; | 35 | continue; |
70 | 36 | ||
71 | return info; | 37 | return info; |
@@ -73,6 +39,22 @@ struct dma_info *get_dma_info(unsigned int chan) | |||
73 | 39 | ||
74 | return NULL; | 40 | return NULL; |
75 | } | 41 | } |
42 | EXPORT_SYMBOL(get_dma_info); | ||
43 | |||
44 | struct dma_info *get_dma_info_by_name(const char *dmac_name) | ||
45 | { | ||
46 | struct dma_info *info; | ||
47 | |||
48 | list_for_each_entry(info, ®istered_dmac_list, list) { | ||
49 | if (dmac_name && (strcmp(dmac_name, info->name) != 0)) | ||
50 | continue; | ||
51 | else | ||
52 | return info; | ||
53 | } | ||
54 | |||
55 | return NULL; | ||
56 | } | ||
57 | EXPORT_SYMBOL(get_dma_info_by_name); | ||
76 | 58 | ||
77 | static unsigned int get_nr_channels(void) | 59 | static unsigned int get_nr_channels(void) |
78 | { | 60 | { |
@@ -91,63 +73,161 @@ static unsigned int get_nr_channels(void) | |||
91 | struct dma_channel *get_dma_channel(unsigned int chan) | 73 | struct dma_channel *get_dma_channel(unsigned int chan) |
92 | { | 74 | { |
93 | struct dma_info *info = get_dma_info(chan); | 75 | struct dma_info *info = get_dma_info(chan); |
76 | struct dma_channel *channel; | ||
77 | int i; | ||
94 | 78 | ||
95 | if (!info) | 79 | if (unlikely(!info)) |
96 | return ERR_PTR(-EINVAL); | 80 | return ERR_PTR(-EINVAL); |
97 | 81 | ||
98 | return info->channels + chan; | 82 | for (i = 0; i < info->nr_channels; i++) { |
83 | channel = &info->channels[i]; | ||
84 | if (channel->chan == chan) | ||
85 | return channel; | ||
86 | } | ||
87 | |||
88 | return NULL; | ||
99 | } | 89 | } |
90 | EXPORT_SYMBOL(get_dma_channel); | ||
100 | 91 | ||
101 | int get_dma_residue(unsigned int chan) | 92 | int get_dma_residue(unsigned int chan) |
102 | { | 93 | { |
103 | struct dma_info *info = get_dma_info(chan); | 94 | struct dma_info *info = get_dma_info(chan); |
104 | struct dma_channel *channel = &info->channels[chan]; | 95 | struct dma_channel *channel = get_dma_channel(chan); |
105 | 96 | ||
106 | if (info->ops->get_residue) | 97 | if (info->ops->get_residue) |
107 | return info->ops->get_residue(channel); | 98 | return info->ops->get_residue(channel); |
108 | 99 | ||
109 | return 0; | 100 | return 0; |
110 | } | 101 | } |
102 | EXPORT_SYMBOL(get_dma_residue); | ||
111 | 103 | ||
112 | int request_dma(unsigned int chan, const char *dev_id) | 104 | static int search_cap(const char **haystack, const char *needle) |
113 | { | 105 | { |
114 | struct dma_info *info = get_dma_info(chan); | 106 | const char **p; |
115 | struct dma_channel *channel = &info->channels[chan]; | 107 | |
108 | for (p = haystack; *p; p++) | ||
109 | if (strcmp(*p, needle) == 0) | ||
110 | return 1; | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * request_dma_bycap - Allocate a DMA channel based on its capabilities | ||
117 | * @dmac: List of DMA controllers to search | ||
118 | * @caps: List of capabilites | ||
119 | * | ||
120 | * Search all channels of all DMA controllers to find a channel which | ||
121 | * matches the requested capabilities. The result is the channel | ||
122 | * number if a match is found, or %-ENODEV if no match is found. | ||
123 | * | ||
124 | * Note that not all DMA controllers export capabilities, in which | ||
125 | * case they can never be allocated using this API, and so | ||
126 | * request_dma() must be used specifying the channel number. | ||
127 | */ | ||
128 | int request_dma_bycap(const char **dmac, const char **caps, const char *dev_id) | ||
129 | { | ||
130 | unsigned int found = 0; | ||
131 | struct dma_info *info; | ||
132 | const char **p; | ||
133 | int i; | ||
134 | |||
135 | BUG_ON(!dmac || !caps); | ||
136 | |||
137 | list_for_each_entry(info, ®istered_dmac_list, list) | ||
138 | if (strcmp(*dmac, info->name) == 0) { | ||
139 | found = 1; | ||
140 | break; | ||
141 | } | ||
142 | |||
143 | if (!found) | ||
144 | return -ENODEV; | ||
145 | |||
146 | for (i = 0; i < info->nr_channels; i++) { | ||
147 | struct dma_channel *channel = &info->channels[i]; | ||
148 | |||
149 | if (unlikely(!channel->caps)) | ||
150 | continue; | ||
151 | |||
152 | for (p = caps; *p; p++) { | ||
153 | if (!search_cap(channel->caps, *p)) | ||
154 | break; | ||
155 | if (request_dma(channel->chan, dev_id) == 0) | ||
156 | return channel->chan; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | return -EINVAL; | ||
161 | } | ||
162 | EXPORT_SYMBOL(request_dma_bycap); | ||
163 | |||
164 | int dmac_search_free_channel(const char *dev_id) | ||
165 | { | ||
166 | struct dma_channel *channel = { 0 }; | ||
167 | struct dma_info *info = get_dma_info(0); | ||
168 | int i; | ||
169 | |||
170 | for (i = 0; i < info->nr_channels; i++) { | ||
171 | channel = &info->channels[i]; | ||
172 | if (unlikely(!channel)) | ||
173 | return -ENODEV; | ||
174 | |||
175 | if (atomic_read(&channel->busy) == 0) | ||
176 | break; | ||
177 | } | ||
116 | 178 | ||
117 | down(&channel->sem); | 179 | if (info->ops->request) { |
180 | int result = info->ops->request(channel); | ||
181 | if (result) | ||
182 | return result; | ||
118 | 183 | ||
119 | if (!info->ops || chan >= MAX_DMA_CHANNELS) { | 184 | atomic_set(&channel->busy, 1); |
120 | up(&channel->sem); | 185 | return channel->chan; |
121 | return -EINVAL; | ||
122 | } | 186 | } |
123 | 187 | ||
124 | atomic_set(&channel->busy, 1); | 188 | return -ENOSYS; |
189 | } | ||
190 | |||
191 | int request_dma(unsigned int chan, const char *dev_id) | ||
192 | { | ||
193 | struct dma_channel *channel = { 0 }; | ||
194 | struct dma_info *info = get_dma_info(chan); | ||
195 | int result; | ||
196 | |||
197 | channel = get_dma_channel(chan); | ||
198 | if (atomic_xchg(&channel->busy, 1)) | ||
199 | return -EBUSY; | ||
125 | 200 | ||
126 | strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); | 201 | strlcpy(channel->dev_id, dev_id, sizeof(channel->dev_id)); |
127 | 202 | ||
128 | up(&channel->sem); | 203 | if (info->ops->request) { |
204 | result = info->ops->request(channel); | ||
205 | if (result) | ||
206 | atomic_set(&channel->busy, 0); | ||
129 | 207 | ||
130 | if (info->ops->request) | 208 | return result; |
131 | return info->ops->request(channel); | 209 | } |
132 | 210 | ||
133 | return 0; | 211 | return 0; |
134 | } | 212 | } |
213 | EXPORT_SYMBOL(request_dma); | ||
135 | 214 | ||
136 | void free_dma(unsigned int chan) | 215 | void free_dma(unsigned int chan) |
137 | { | 216 | { |
138 | struct dma_info *info = get_dma_info(chan); | 217 | struct dma_info *info = get_dma_info(chan); |
139 | struct dma_channel *channel = &info->channels[chan]; | 218 | struct dma_channel *channel = get_dma_channel(chan); |
140 | 219 | ||
141 | if (info->ops->free) | 220 | if (info->ops->free) |
142 | info->ops->free(channel); | 221 | info->ops->free(channel); |
143 | 222 | ||
144 | atomic_set(&channel->busy, 0); | 223 | atomic_set(&channel->busy, 0); |
145 | } | 224 | } |
225 | EXPORT_SYMBOL(free_dma); | ||
146 | 226 | ||
147 | void dma_wait_for_completion(unsigned int chan) | 227 | void dma_wait_for_completion(unsigned int chan) |
148 | { | 228 | { |
149 | struct dma_info *info = get_dma_info(chan); | 229 | struct dma_info *info = get_dma_info(chan); |
150 | struct dma_channel *channel = &info->channels[chan]; | 230 | struct dma_channel *channel = get_dma_channel(chan); |
151 | 231 | ||
152 | if (channel->flags & DMA_TEI_CAPABLE) { | 232 | if (channel->flags & DMA_TEI_CAPABLE) { |
153 | wait_event(channel->wait_queue, | 233 | wait_event(channel->wait_queue, |
@@ -158,21 +238,52 @@ void dma_wait_for_completion(unsigned int chan) | |||
158 | while (info->ops->get_residue(channel)) | 238 | while (info->ops->get_residue(channel)) |
159 | cpu_relax(); | 239 | cpu_relax(); |
160 | } | 240 | } |
241 | EXPORT_SYMBOL(dma_wait_for_completion); | ||
242 | |||
243 | int register_chan_caps(const char *dmac, struct dma_chan_caps *caps) | ||
244 | { | ||
245 | struct dma_info *info; | ||
246 | unsigned int found = 0; | ||
247 | int i; | ||
248 | |||
249 | list_for_each_entry(info, ®istered_dmac_list, list) | ||
250 | if (strcmp(dmac, info->name) == 0) { | ||
251 | found = 1; | ||
252 | break; | ||
253 | } | ||
254 | |||
255 | if (unlikely(!found)) | ||
256 | return -ENODEV; | ||
257 | |||
258 | for (i = 0; i < info->nr_channels; i++, caps++) { | ||
259 | struct dma_channel *channel; | ||
260 | |||
261 | if ((info->first_channel_nr + i) != caps->ch_num) | ||
262 | return -EINVAL; | ||
263 | |||
264 | channel = &info->channels[i]; | ||
265 | channel->caps = caps->caplist; | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | } | ||
270 | EXPORT_SYMBOL(register_chan_caps); | ||
161 | 271 | ||
162 | void dma_configure_channel(unsigned int chan, unsigned long flags) | 272 | void dma_configure_channel(unsigned int chan, unsigned long flags) |
163 | { | 273 | { |
164 | struct dma_info *info = get_dma_info(chan); | 274 | struct dma_info *info = get_dma_info(chan); |
165 | struct dma_channel *channel = &info->channels[chan]; | 275 | struct dma_channel *channel = get_dma_channel(chan); |
166 | 276 | ||
167 | if (info->ops->configure) | 277 | if (info->ops->configure) |
168 | info->ops->configure(channel, flags); | 278 | info->ops->configure(channel, flags); |
169 | } | 279 | } |
280 | EXPORT_SYMBOL(dma_configure_channel); | ||
170 | 281 | ||
171 | int dma_xfer(unsigned int chan, unsigned long from, | 282 | int dma_xfer(unsigned int chan, unsigned long from, |
172 | unsigned long to, size_t size, unsigned int mode) | 283 | unsigned long to, size_t size, unsigned int mode) |
173 | { | 284 | { |
174 | struct dma_info *info = get_dma_info(chan); | 285 | struct dma_info *info = get_dma_info(chan); |
175 | struct dma_channel *channel = &info->channels[chan]; | 286 | struct dma_channel *channel = get_dma_channel(chan); |
176 | 287 | ||
177 | channel->sar = from; | 288 | channel->sar = from; |
178 | channel->dar = to; | 289 | channel->dar = to; |
@@ -181,8 +292,20 @@ int dma_xfer(unsigned int chan, unsigned long from, | |||
181 | 292 | ||
182 | return info->ops->xfer(channel); | 293 | return info->ops->xfer(channel); |
183 | } | 294 | } |
295 | EXPORT_SYMBOL(dma_xfer); | ||
296 | |||
297 | int dma_extend(unsigned int chan, unsigned long op, void *param) | ||
298 | { | ||
299 | struct dma_info *info = get_dma_info(chan); | ||
300 | struct dma_channel *channel = get_dma_channel(chan); | ||
301 | |||
302 | if (info->ops->extend) | ||
303 | return info->ops->extend(channel, op, param); | ||
304 | |||
305 | return -ENOSYS; | ||
306 | } | ||
307 | EXPORT_SYMBOL(dma_extend); | ||
184 | 308 | ||
185 | #ifdef CONFIG_PROC_FS | ||
186 | static int dma_read_proc(char *buf, char **start, off_t off, | 309 | static int dma_read_proc(char *buf, char **start, off_t off, |
187 | int len, int *eof, void *data) | 310 | int len, int *eof, void *data) |
188 | { | 311 | { |
@@ -214,8 +337,6 @@ static int dma_read_proc(char *buf, char **start, off_t off, | |||
214 | 337 | ||
215 | return p - buf; | 338 | return p - buf; |
216 | } | 339 | } |
217 | #endif | ||
218 | |||
219 | 340 | ||
220 | int register_dmac(struct dma_info *info) | 341 | int register_dmac(struct dma_info *info) |
221 | { | 342 | { |
@@ -224,8 +345,7 @@ int register_dmac(struct dma_info *info) | |||
224 | INIT_LIST_HEAD(&info->list); | 345 | INIT_LIST_HEAD(&info->list); |
225 | 346 | ||
226 | printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", | 347 | printk(KERN_INFO "DMA: Registering %s handler (%d channel%s).\n", |
227 | info->name, info->nr_channels, | 348 | info->name, info->nr_channels, info->nr_channels > 1 ? "s" : ""); |
228 | info->nr_channels > 1 ? "s" : ""); | ||
229 | 349 | ||
230 | BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); | 350 | BUG_ON((info->flags & DMAC_CHANNELS_CONFIGURED) && !info->channels); |
231 | 351 | ||
@@ -242,28 +362,26 @@ int register_dmac(struct dma_info *info) | |||
242 | 362 | ||
243 | size = sizeof(struct dma_channel) * info->nr_channels; | 363 | size = sizeof(struct dma_channel) * info->nr_channels; |
244 | 364 | ||
245 | info->channels = kmalloc(size, GFP_KERNEL); | 365 | info->channels = kzalloc(size, GFP_KERNEL); |
246 | if (!info->channels) | 366 | if (!info->channels) |
247 | return -ENOMEM; | 367 | return -ENOMEM; |
248 | |||
249 | memset(info->channels, 0, size); | ||
250 | } | 368 | } |
251 | 369 | ||
252 | total_channels = get_nr_channels(); | 370 | total_channels = get_nr_channels(); |
253 | for (i = 0; i < info->nr_channels; i++) { | 371 | for (i = 0; i < info->nr_channels; i++) { |
254 | struct dma_channel *chan = info->channels + i; | 372 | struct dma_channel *chan = &info->channels[i]; |
373 | |||
374 | atomic_set(&chan->busy, 0); | ||
255 | 375 | ||
256 | chan->chan = i; | 376 | chan->chan = info->first_channel_nr + i; |
257 | chan->vchan = i + total_channels; | 377 | chan->vchan = info->first_channel_nr + i + total_channels; |
258 | 378 | ||
259 | memcpy(chan->dev_id, "Unused", 7); | 379 | memcpy(chan->dev_id, "Unused", 7); |
260 | 380 | ||
261 | if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) | 381 | if (info->flags & DMAC_CHANNELS_TEI_CAPABLE) |
262 | chan->flags |= DMA_TEI_CAPABLE; | 382 | chan->flags |= DMA_TEI_CAPABLE; |
263 | 383 | ||
264 | init_MUTEX(&chan->sem); | ||
265 | init_waitqueue_head(&chan->wait_queue); | 384 | init_waitqueue_head(&chan->wait_queue); |
266 | |||
267 | dma_create_sysfs_files(chan, info); | 385 | dma_create_sysfs_files(chan, info); |
268 | } | 386 | } |
269 | 387 | ||
@@ -271,6 +389,7 @@ int register_dmac(struct dma_info *info) | |||
271 | 389 | ||
272 | return 0; | 390 | return 0; |
273 | } | 391 | } |
392 | EXPORT_SYMBOL(register_dmac); | ||
274 | 393 | ||
275 | void unregister_dmac(struct dma_info *info) | 394 | void unregister_dmac(struct dma_info *info) |
276 | { | 395 | { |
@@ -285,31 +404,16 @@ void unregister_dmac(struct dma_info *info) | |||
285 | list_del(&info->list); | 404 | list_del(&info->list); |
286 | platform_device_unregister(info->pdev); | 405 | platform_device_unregister(info->pdev); |
287 | } | 406 | } |
407 | EXPORT_SYMBOL(unregister_dmac); | ||
288 | 408 | ||
289 | static int __init dma_api_init(void) | 409 | static int __init dma_api_init(void) |
290 | { | 410 | { |
291 | printk("DMA: Registering DMA API.\n"); | 411 | printk(KERN_NOTICE "DMA: Registering DMA API.\n"); |
292 | |||
293 | #ifdef CONFIG_PROC_FS | ||
294 | create_proc_read_entry("dma", 0, 0, dma_read_proc, 0); | 412 | create_proc_read_entry("dma", 0, 0, dma_read_proc, 0); |
295 | #endif | ||
296 | |||
297 | return 0; | 413 | return 0; |
298 | } | 414 | } |
299 | |||
300 | subsys_initcall(dma_api_init); | 415 | subsys_initcall(dma_api_init); |
301 | 416 | ||
302 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); | 417 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>"); |
303 | MODULE_DESCRIPTION("DMA API for SuperH"); | 418 | MODULE_DESCRIPTION("DMA API for SuperH"); |
304 | MODULE_LICENSE("GPL"); | 419 | MODULE_LICENSE("GPL"); |
305 | |||
306 | EXPORT_SYMBOL(request_dma); | ||
307 | EXPORT_SYMBOL(free_dma); | ||
308 | EXPORT_SYMBOL(register_dmac); | ||
309 | EXPORT_SYMBOL(get_dma_residue); | ||
310 | EXPORT_SYMBOL(get_dma_info); | ||
311 | EXPORT_SYMBOL(get_dma_channel); | ||
312 | EXPORT_SYMBOL(dma_xfer); | ||
313 | EXPORT_SYMBOL(dma_wait_for_completion); | ||
314 | EXPORT_SYMBOL(dma_configure_channel); | ||
315 | |||
diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 660786013350..f63721ed86c2 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c | |||
@@ -94,20 +94,13 @@ static int sh_dmac_request_dma(struct dma_channel *chan) | |||
94 | if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) | 94 | if (unlikely(!chan->flags & DMA_TEI_CAPABLE)) |
95 | return 0; | 95 | return 0; |
96 | 96 | ||
97 | chan->name = kzalloc(32, GFP_KERNEL); | ||
98 | if (unlikely(chan->name == NULL)) | ||
99 | return -ENOMEM; | ||
100 | snprintf(chan->name, 32, "DMAC Transfer End (Channel %d)", | ||
101 | chan->chan); | ||
102 | |||
103 | return request_irq(get_dmte_irq(chan->chan), dma_tei, | 97 | return request_irq(get_dmte_irq(chan->chan), dma_tei, |
104 | IRQF_DISABLED, chan->name, chan); | 98 | IRQF_DISABLED, chan->dev_id, chan); |
105 | } | 99 | } |
106 | 100 | ||
107 | static void sh_dmac_free_dma(struct dma_channel *chan) | 101 | static void sh_dmac_free_dma(struct dma_channel *chan) |
108 | { | 102 | { |
109 | free_irq(get_dmte_irq(chan->chan), chan); | 103 | free_irq(get_dmte_irq(chan->chan), chan); |
110 | kfree(chan->name); | ||
111 | } | 104 | } |
112 | 105 | ||
113 | static void | 106 | static void |
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c index 29b8ef9873d1..eebcd4768bbf 100644 --- a/arch/sh/drivers/dma/dma-sysfs.c +++ b/arch/sh/drivers/dma/dma-sysfs.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * sysfs interface for SH DMA API | 4 | * sysfs interface for SH DMA API |
5 | * | 5 | * |
6 | * Copyright (C) 2004, 2005 Paul Mundt | 6 | * Copyright (C) 2004 - 2006 Paul Mundt |
7 | * | 7 | * |
8 | * This file is subject to the terms and conditions of the GNU General Public | 8 | * This file is subject to the terms and conditions of the GNU General Public |
9 | * License. See the file "COPYING" in the main directory of this archive | 9 | * License. See the file "COPYING" in the main directory of this archive |
@@ -21,7 +21,6 @@ | |||
21 | static struct sysdev_class dma_sysclass = { | 21 | static struct sysdev_class dma_sysclass = { |
22 | set_kset_name("dma"), | 22 | set_kset_name("dma"), |
23 | }; | 23 | }; |
24 | |||
25 | EXPORT_SYMBOL(dma_sysclass); | 24 | EXPORT_SYMBOL(dma_sysclass); |
26 | 25 | ||
27 | static ssize_t dma_show_devices(struct sys_device *dev, char *buf) | 26 | static ssize_t dma_show_devices(struct sys_device *dev, char *buf) |
@@ -31,7 +30,10 @@ static ssize_t dma_show_devices(struct sys_device *dev, char *buf) | |||
31 | 30 | ||
32 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 31 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
33 | struct dma_info *info = get_dma_info(i); | 32 | struct dma_info *info = get_dma_info(i); |
34 | struct dma_channel *channel = &info->channels[i]; | 33 | struct dma_channel *channel = get_dma_channel(i); |
34 | |||
35 | if (unlikely(!info) || !channel) | ||
36 | continue; | ||
35 | 37 | ||
36 | len += sprintf(buf + len, "%2d: %14s %s\n", | 38 | len += sprintf(buf + len, "%2d: %14s %s\n", |
37 | channel->chan, info->name, | 39 | channel->chan, info->name, |
@@ -125,11 +127,16 @@ int dma_create_sysfs_files(struct dma_channel *chan, struct dma_info *info) | |||
125 | if (ret) | 127 | if (ret) |
126 | return ret; | 128 | return ret; |
127 | 129 | ||
128 | sysdev_create_file(dev, &attr_dev_id); | 130 | ret |= sysdev_create_file(dev, &attr_dev_id); |
129 | sysdev_create_file(dev, &attr_count); | 131 | ret |= sysdev_create_file(dev, &attr_count); |
130 | sysdev_create_file(dev, &attr_mode); | 132 | ret |= sysdev_create_file(dev, &attr_mode); |
131 | sysdev_create_file(dev, &attr_flags); | 133 | ret |= sysdev_create_file(dev, &attr_flags); |
132 | sysdev_create_file(dev, &attr_config); | 134 | ret |= sysdev_create_file(dev, &attr_config); |
135 | |||
136 | if (unlikely(ret)) { | ||
137 | dev_err(&info->pdev->dev, "Failed creating attrs\n"); | ||
138 | return ret; | ||
139 | } | ||
133 | 140 | ||
134 | snprintf(name, sizeof(name), "dma%d", chan->chan); | 141 | snprintf(name, sizeof(name), "dma%d", chan->chan); |
135 | return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name); | 142 | return sysfs_create_link(&info->pdev->dev.kobj, &dev->kobj, name); |
diff --git a/arch/sh/drivers/pci/ops-titan.c b/arch/sh/drivers/pci/ops-titan.c index cd56d53375e7..ac8ee2312cd8 100644 --- a/arch/sh/drivers/pci/ops-titan.c +++ b/arch/sh/drivers/pci/ops-titan.c | |||
@@ -15,25 +15,21 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <asm/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/titan.h> | 19 | #include <asm/titan.h> |
20 | #include "pci-sh4.h" | 20 | #include "pci-sh4.h" |
21 | 21 | ||
22 | static char titan_irq_tab[] __initdata = { | ||
23 | TITAN_IRQ_WAN, | ||
24 | TITAN_IRQ_LAN, | ||
25 | TITAN_IRQ_MPCIA, | ||
26 | TITAN_IRQ_MPCIB, | ||
27 | TITAN_IRQ_USB, | ||
28 | }; | ||
29 | |||
22 | int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) | 30 | int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin) |
23 | { | 31 | { |
24 | int irq = -1; | 32 | int irq = titan_irq_tab[slot]; |
25 | |||
26 | switch (slot) { | ||
27 | case 0: irq = TITAN_IRQ_WAN; break; /* eth0 (WAN) */ | ||
28 | case 1: irq = TITAN_IRQ_LAN; break; /* eth1 (LAN) */ | ||
29 | case 2: irq = TITAN_IRQ_MPCIA; break; /* mPCI A */ | ||
30 | case 3: irq = TITAN_IRQ_MPCIB; break; /* mPCI B */ | ||
31 | case 4: irq = TITAN_IRQ_USB; break; /* USB */ | ||
32 | default: | ||
33 | printk(KERN_INFO "PCI: Bad IRQ mapping " | ||
34 | "request for slot %d\n", slot); | ||
35 | return -1; | ||
36 | } | ||
37 | 33 | ||
38 | printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n", | 34 | printk("PCI: Mapping TITAN IRQ for slot %d, pin %c to irq %d\n", |
39 | slot, pin - 1 + 'A', irq); | 35 | slot, pin - 1 + 'A', irq); |
diff --git a/arch/sh/drivers/pci/pci-sh7780.c b/arch/sh/drivers/pci/pci-sh7780.c index d6e635296534..602b644c35ad 100644 --- a/arch/sh/drivers/pci/pci-sh7780.c +++ b/arch/sh/drivers/pci/pci-sh7780.c | |||
@@ -22,6 +22,20 @@ | |||
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include "pci-sh4.h" | 23 | #include "pci-sh4.h" |
24 | 24 | ||
25 | #define INTC_BASE 0xffd00000 | ||
26 | #define INTC_ICR0 (INTC_BASE+0x0) | ||
27 | #define INTC_ICR1 (INTC_BASE+0x1c) | ||
28 | #define INTC_INTPRI (INTC_BASE+0x10) | ||
29 | #define INTC_INTREQ (INTC_BASE+0x24) | ||
30 | #define INTC_INTMSK0 (INTC_BASE+0x44) | ||
31 | #define INTC_INTMSK1 (INTC_BASE+0x48) | ||
32 | #define INTC_INTMSK2 (INTC_BASE+0x40080) | ||
33 | #define INTC_INTMSKCLR0 (INTC_BASE+0x64) | ||
34 | #define INTC_INTMSKCLR1 (INTC_BASE+0x68) | ||
35 | #define INTC_INTMSKCLR2 (INTC_BASE+0x40084) | ||
36 | #define INTC_INT2MSKR (INTC_BASE+0x40038) | ||
37 | #define INTC_INT2MSKCR (INTC_BASE+0x4003c) | ||
38 | |||
25 | /* | 39 | /* |
26 | * Initialization. Try all known PCI access methods. Note that we support | 40 | * Initialization. Try all known PCI access methods. Note that we support |
27 | * using both PCI BIOS and direct access: in such cases, we use I/O ports | 41 | * using both PCI BIOS and direct access: in such cases, we use I/O ports |
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c new file mode 100644 index 000000000000..f2b9157c314f --- /dev/null +++ b/arch/sh/drivers/push-switch.c | |||
@@ -0,0 +1,138 @@ | |||
1 | /* | ||
2 | * Generic push-switch framework | ||
3 | * | ||
4 | * Copyright (C) 2006 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <asm/push-switch.h> | ||
15 | |||
16 | #define DRV_NAME "push-switch" | ||
17 | #define DRV_VERSION "0.1.0" | ||
18 | |||
19 | static ssize_t switch_show(struct device *dev, | ||
20 | struct device_attribute *attr, | ||
21 | char *buf) | ||
22 | { | ||
23 | struct push_switch_platform_info *psw_info = dev->platform_data; | ||
24 | return sprintf(buf, "%s\n", psw_info->name); | ||
25 | } | ||
26 | static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL); | ||
27 | |||
28 | static void switch_timer(unsigned long data) | ||
29 | { | ||
30 | struct push_switch *psw = (struct push_switch *)data; | ||
31 | |||
32 | schedule_work(&psw->work); | ||
33 | } | ||
34 | |||
35 | static void switch_work_handler(void *data) | ||
36 | { | ||
37 | struct platform_device *pdev = data; | ||
38 | struct push_switch *psw = platform_get_drvdata(pdev); | ||
39 | |||
40 | psw->state = 0; | ||
41 | |||
42 | kobject_uevent(&pdev->dev.kobj, KOBJ_CHANGE); | ||
43 | } | ||
44 | |||
45 | static int switch_drv_probe(struct platform_device *pdev) | ||
46 | { | ||
47 | struct push_switch_platform_info *psw_info; | ||
48 | struct push_switch *psw; | ||
49 | int ret, irq; | ||
50 | |||
51 | psw = kzalloc(sizeof(struct push_switch), GFP_KERNEL); | ||
52 | if (unlikely(!psw)) | ||
53 | return -ENOMEM; | ||
54 | |||
55 | irq = platform_get_irq(pdev, 0); | ||
56 | if (unlikely(irq < 0)) { | ||
57 | ret = -ENODEV; | ||
58 | goto err; | ||
59 | } | ||
60 | |||
61 | psw_info = pdev->dev.platform_data; | ||
62 | BUG_ON(!psw_info); | ||
63 | |||
64 | ret = request_irq(irq, psw_info->irq_handler, | ||
65 | IRQF_DISABLED | psw_info->irq_flags, | ||
66 | psw_info->name ? psw_info->name : DRV_NAME, pdev); | ||
67 | if (unlikely(ret < 0)) | ||
68 | goto err; | ||
69 | |||
70 | if (psw_info->name) { | ||
71 | ret = device_create_file(&pdev->dev, &dev_attr_switch); | ||
72 | if (unlikely(ret)) { | ||
73 | dev_err(&pdev->dev, "Failed creating device attrs\n"); | ||
74 | ret = -EINVAL; | ||
75 | goto err_irq; | ||
76 | } | ||
77 | } | ||
78 | |||
79 | INIT_WORK(&psw->work, switch_work_handler, pdev); | ||
80 | init_timer(&psw->debounce); | ||
81 | |||
82 | psw->debounce.function = switch_timer; | ||
83 | psw->debounce.data = (unsigned long)psw; | ||
84 | |||
85 | platform_set_drvdata(pdev, psw); | ||
86 | |||
87 | return 0; | ||
88 | |||
89 | err_irq: | ||
90 | free_irq(irq, pdev); | ||
91 | err: | ||
92 | kfree(psw); | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static int switch_drv_remove(struct platform_device *pdev) | ||
97 | { | ||
98 | struct push_switch *psw = platform_get_drvdata(pdev); | ||
99 | struct push_switch_platform_info *psw_info = pdev->dev.platform_data; | ||
100 | int irq = platform_get_irq(pdev, 0); | ||
101 | |||
102 | if (psw_info->name) | ||
103 | device_remove_file(&pdev->dev, &dev_attr_switch); | ||
104 | |||
105 | platform_set_drvdata(pdev, NULL); | ||
106 | flush_scheduled_work(); | ||
107 | del_timer_sync(&psw->debounce); | ||
108 | free_irq(irq, pdev); | ||
109 | |||
110 | kfree(psw); | ||
111 | |||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static struct platform_driver switch_driver = { | ||
116 | .probe = switch_drv_probe, | ||
117 | .remove = switch_drv_remove, | ||
118 | .driver = { | ||
119 | .name = DRV_NAME, | ||
120 | }, | ||
121 | }; | ||
122 | |||
123 | static int __init switch_init(void) | ||
124 | { | ||
125 | printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION); | ||
126 | return platform_driver_register(&switch_driver); | ||
127 | } | ||
128 | |||
129 | static void __exit switch_exit(void) | ||
130 | { | ||
131 | platform_driver_unregister(&switch_driver); | ||
132 | } | ||
133 | module_init(switch_init); | ||
134 | module_exit(switch_exit); | ||
135 | |||
136 | MODULE_VERSION(DRV_VERSION); | ||
137 | MODULE_AUTHOR("Paul Mundt"); | ||
138 | MODULE_LICENSE("GPLv2"); | ||
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 5da88a43d350..99c7e5249f7a 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o signal.o entry.o traps.o irq.o \ | 7 | obj-y := process.o signal.o traps.o irq.o \ |
8 | ptrace.o setup.o time.o sys_sh.o semaphore.o \ | 8 | ptrace.o setup.o time.o sys_sh.o semaphore.o \ |
9 | io.o io_generic.o sh_ksyms.o syscalls.o | 9 | io.o io_generic.o sh_ksyms.o syscalls.o |
10 | 10 | ||
@@ -21,3 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | |||
21 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 21 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
22 | obj-$(CONFIG_APM) += apm.o | 22 | obj-$(CONFIG_APM) += apm.o |
23 | obj-$(CONFIG_PM) += pm.o | 23 | obj-$(CONFIG_PM) += pm.o |
24 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile index fb5dac069382..0582e6712b79 100644 --- a/arch/sh/kernel/cpu/Makefile +++ b/arch/sh/kernel/cpu/Makefile | |||
@@ -2,11 +2,12 @@ | |||
2 | # Makefile for the Linux/SuperH CPU-specifc backends. | 2 | # Makefile for the Linux/SuperH CPU-specifc backends. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += irq/ init.o clock.o | 5 | obj-$(CONFIG_CPU_SH2) = sh2/ |
6 | 6 | obj-$(CONFIG_CPU_SH2A) = sh2a/ | |
7 | obj-$(CONFIG_CPU_SH2) += sh2/ | 7 | obj-$(CONFIG_CPU_SH3) = sh3/ |
8 | obj-$(CONFIG_CPU_SH3) += sh3/ | 8 | obj-$(CONFIG_CPU_SH4) = sh4/ |
9 | obj-$(CONFIG_CPU_SH4) += sh4/ | ||
10 | 9 | ||
11 | obj-$(CONFIG_UBC_WAKEUP) += ubc.o | 10 | obj-$(CONFIG_UBC_WAKEUP) += ubc.o |
12 | obj-$(CONFIG_SH_ADC) += adc.o | 11 | obj-$(CONFIG_SH_ADC) += adc.o |
12 | |||
13 | obj-y += irq/ init.o clock.o | ||
diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c index 51ec64cdf348..abb586b12565 100644 --- a/arch/sh/kernel/cpu/clock.c +++ b/arch/sh/kernel/cpu/clock.c | |||
@@ -5,9 +5,11 @@ | |||
5 | * | 5 | * |
6 | * This clock framework is derived from the OMAP version by: | 6 | * This clock framework is derived from the OMAP version by: |
7 | * | 7 | * |
8 | * Copyright (C) 2004 Nokia Corporation | 8 | * Copyright (C) 2004 - 2005 Nokia Corporation |
9 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> | 9 | * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> |
10 | * | 10 | * |
11 | * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> | ||
12 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | 13 | * This file is subject to the terms and conditions of the GNU General Public |
12 | * License. See the file "COPYING" in the main directory of this archive | 14 | * License. See the file "COPYING" in the main directory of this archive |
13 | * for more details. | 15 | * for more details. |
@@ -20,6 +22,7 @@ | |||
20 | #include <linux/kref.h> | 22 | #include <linux/kref.h> |
21 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
22 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/platform_device.h> | ||
23 | #include <asm/clock.h> | 26 | #include <asm/clock.h> |
24 | #include <asm/timer.h> | 27 | #include <asm/timer.h> |
25 | 28 | ||
@@ -195,17 +198,37 @@ void clk_recalc_rate(struct clk *clk) | |||
195 | propagate_rate(clk); | 198 | propagate_rate(clk); |
196 | } | 199 | } |
197 | 200 | ||
198 | struct clk *clk_get(const char *id) | 201 | /* |
202 | * Returns a clock. Note that we first try to use device id on the bus | ||
203 | * and clock name. If this fails, we try to use clock name only. | ||
204 | */ | ||
205 | struct clk *clk_get(struct device *dev, const char *id) | ||
199 | { | 206 | { |
200 | struct clk *p, *clk = ERR_PTR(-ENOENT); | 207 | struct clk *p, *clk = ERR_PTR(-ENOENT); |
208 | int idno; | ||
209 | |||
210 | if (dev == NULL || dev->bus != &platform_bus_type) | ||
211 | idno = -1; | ||
212 | else | ||
213 | idno = to_platform_device(dev)->id; | ||
201 | 214 | ||
202 | mutex_lock(&clock_list_sem); | 215 | mutex_lock(&clock_list_sem); |
203 | list_for_each_entry(p, &clock_list, node) { | 216 | list_for_each_entry(p, &clock_list, node) { |
217 | if (p->id == idno && | ||
218 | strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | ||
219 | clk = p; | ||
220 | goto found; | ||
221 | } | ||
222 | } | ||
223 | |||
224 | list_for_each_entry(p, &clock_list, node) { | ||
204 | if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { | 225 | if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { |
205 | clk = p; | 226 | clk = p; |
206 | break; | 227 | break; |
207 | } | 228 | } |
208 | } | 229 | } |
230 | |||
231 | found: | ||
209 | mutex_unlock(&clock_list_sem); | 232 | mutex_unlock(&clock_list_sem); |
210 | 233 | ||
211 | return clk; | 234 | return clk; |
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index bfb90eb0b7a6..48121766e8d2 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -68,12 +68,14 @@ static void __init cache_init(void) | |||
68 | 68 | ||
69 | waysize = cpu_data->dcache.sets; | 69 | waysize = cpu_data->dcache.sets; |
70 | 70 | ||
71 | #ifdef CCR_CACHE_ORA | ||
71 | /* | 72 | /* |
72 | * If the OC is already in RAM mode, we only have | 73 | * If the OC is already in RAM mode, we only have |
73 | * half of the entries to flush.. | 74 | * half of the entries to flush.. |
74 | */ | 75 | */ |
75 | if (ccr & CCR_CACHE_ORA) | 76 | if (ccr & CCR_CACHE_ORA) |
76 | waysize >>= 1; | 77 | waysize >>= 1; |
78 | #endif | ||
77 | 79 | ||
78 | waysize <<= cpu_data->dcache.entry_shift; | 80 | waysize <<= cpu_data->dcache.entry_shift; |
79 | 81 | ||
diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile index 1c034c283f59..0049d217561a 100644 --- a/arch/sh/kernel/cpu/irq/Makefile +++ b/arch/sh/kernel/cpu/irq/Makefile | |||
@@ -1,8 +1,9 @@ | |||
1 | # | 1 | # |
2 | # Makefile for the Linux/SuperH CPU-specifc IRQ handlers. | 2 | # Makefile for the Linux/SuperH CPU-specifc IRQ handlers. |
3 | # | 3 | # |
4 | obj-y += ipr.o imask.o | 4 | obj-y += imask.o |
5 | 5 | ||
6 | obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o | ||
6 | obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o | 7 | obj-$(CONFIG_CPU_HAS_PINT_IRQ) += pint.o |
7 | obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o | 8 | obj-$(CONFIG_CPU_HAS_MASKREG_IRQ) += maskreg.o |
8 | obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o | 9 | obj-$(CONFIG_CPU_HAS_INTC2_IRQ) += intc2.o |
diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c index a33ae3e0a5a5..301b505c4278 100644 --- a/arch/sh/kernel/cpu/irq/imask.c +++ b/arch/sh/kernel/cpu/irq/imask.c | |||
@@ -53,7 +53,10 @@ void static inline set_interrupt_registers(int ip) | |||
53 | { | 53 | { |
54 | unsigned long __dummy; | 54 | unsigned long __dummy; |
55 | 55 | ||
56 | asm volatile("ldc %2, r6_bank\n\t" | 56 | asm volatile( |
57 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
58 | "ldc %2, r6_bank\n\t" | ||
59 | #endif | ||
57 | "stc sr, %0\n\t" | 60 | "stc sr, %0\n\t" |
58 | "and #0xf0, %0\n\t" | 61 | "and #0xf0, %0\n\t" |
59 | "shlr2 %0\n\t" | 62 | "shlr2 %0\n\t" |
diff --git a/arch/sh/kernel/cpu/irq/intc2.c b/arch/sh/kernel/cpu/irq/intc2.c index 74ca576a7ce5..74defe76a058 100644 --- a/arch/sh/kernel/cpu/irq/intc2.c +++ b/arch/sh/kernel/cpu/irq/intc2.c | |||
@@ -11,22 +11,29 @@ | |||
11 | * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780. | 11 | * Hitachi 7751, the STM ST40 STB1, SH7760, and SH7780. |
12 | */ | 12 | */ |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/irq.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <asm/system.h> | 16 | |
17 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) | ||
18 | #define INTC2_BASE 0xfe080000 | ||
19 | #define INTC2_INTMSK (INTC2_BASE + 0x40) | ||
20 | #define INTC2_INTMSKCLR (INTC2_BASE + 0x60) | ||
21 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
22 | #define INTC2_BASE 0xffd40000 | ||
23 | #define INTC2_INTMSK (INTC2_BASE + 0x38) | ||
24 | #define INTC2_INTMSKCLR (INTC2_BASE + 0x3c) | ||
25 | #endif | ||
17 | 26 | ||
18 | static void disable_intc2_irq(unsigned int irq) | 27 | static void disable_intc2_irq(unsigned int irq) |
19 | { | 28 | { |
20 | struct intc2_data *p = get_irq_chip_data(irq); | 29 | struct intc2_data *p = get_irq_chip_data(irq); |
21 | ctrl_outl(1 << p->msk_shift, | 30 | ctrl_outl(1 << p->msk_shift, INTC2_INTMSK + p->msk_offset); |
22 | INTC2_BASE + INTC2_INTMSK_OFFSET + p->msk_offset); | ||
23 | } | 31 | } |
24 | 32 | ||
25 | static void enable_intc2_irq(unsigned int irq) | 33 | static void enable_intc2_irq(unsigned int irq) |
26 | { | 34 | { |
27 | struct intc2_data *p = get_irq_chip_data(irq); | 35 | struct intc2_data *p = get_irq_chip_data(irq); |
28 | ctrl_outl(1 << p->msk_shift, | 36 | ctrl_outl(1 << p->msk_shift, INTC2_INTMSKCLR + p->msk_offset); |
29 | INTC2_BASE + INTC2_INTMSKCLR_OFFSET + p->msk_offset); | ||
30 | } | 37 | } |
31 | 38 | ||
32 | static struct irq_chip intc2_irq_chip = { | 39 | static struct irq_chip intc2_irq_chip = { |
@@ -61,12 +68,10 @@ void make_intc2_irq(struct intc2_data *table, unsigned int nr_irqs) | |||
61 | /* Set the priority level */ | 68 | /* Set the priority level */ |
62 | local_irq_save(flags); | 69 | local_irq_save(flags); |
63 | 70 | ||
64 | ipr = ctrl_inl(INTC2_BASE + INTC2_INTPRI_OFFSET + | 71 | ipr = ctrl_inl(INTC2_BASE + p->ipr_offset); |
65 | p->ipr_offset); | ||
66 | ipr &= ~(0xf << p->ipr_shift); | 72 | ipr &= ~(0xf << p->ipr_shift); |
67 | ipr |= p->priority << p->ipr_shift; | 73 | ipr |= p->priority << p->ipr_shift; |
68 | ctrl_outl(ipr, INTC2_BASE + INTC2_INTPRI_OFFSET + | 74 | ctrl_outl(ipr, INTC2_BASE + p->ipr_offset); |
69 | p->ipr_offset); | ||
70 | 75 | ||
71 | local_irq_restore(flags); | 76 | local_irq_restore(flags); |
72 | 77 | ||
diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c index a0089563cbfc..35eb5751a3aa 100644 --- a/arch/sh/kernel/cpu/irq/ipr.c +++ b/arch/sh/kernel/cpu/irq/ipr.c | |||
@@ -19,25 +19,21 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <asm/system.h> | 22 | #include <linux/io.h> |
23 | #include <asm/io.h> | 23 | #include <linux/interrupt.h> |
24 | #include <asm/machvec.h> | ||
25 | |||
26 | 24 | ||
27 | static void disable_ipr_irq(unsigned int irq) | 25 | static void disable_ipr_irq(unsigned int irq) |
28 | { | 26 | { |
29 | struct ipr_data *p = get_irq_chip_data(irq); | 27 | struct ipr_data *p = get_irq_chip_data(irq); |
30 | int shift = p->shift*4; | ||
31 | /* Set the priority in IPR to 0 */ | 28 | /* Set the priority in IPR to 0 */ |
32 | ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << shift)), p->addr); | 29 | ctrl_outw(ctrl_inw(p->addr) & (0xffff ^ (0xf << p->shift)), p->addr); |
33 | } | 30 | } |
34 | 31 | ||
35 | static void enable_ipr_irq(unsigned int irq) | 32 | static void enable_ipr_irq(unsigned int irq) |
36 | { | 33 | { |
37 | struct ipr_data *p = get_irq_chip_data(irq); | 34 | struct ipr_data *p = get_irq_chip_data(irq); |
38 | int shift = p->shift*4; | ||
39 | /* Set priority in IPR back to original value */ | 35 | /* Set priority in IPR back to original value */ |
40 | ctrl_outw(ctrl_inw(p->addr) | (p->priority << shift), p->addr); | 36 | ctrl_outw(ctrl_inw(p->addr) | (p->priority << p->shift), p->addr); |
41 | } | 37 | } |
42 | 38 | ||
43 | static struct irq_chip ipr_irq_chip = { | 39 | static struct irq_chip ipr_irq_chip = { |
@@ -53,6 +49,10 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) | |||
53 | 49 | ||
54 | for (i = 0; i < nr_irqs; i++) { | 50 | for (i = 0; i < nr_irqs; i++) { |
55 | unsigned int irq = table[i].irq; | 51 | unsigned int irq = table[i].irq; |
52 | table[i].addr = map_ipridx_to_addr(table[i].ipr_idx); | ||
53 | /* could the IPR index be mapped, if not we ignore this */ | ||
54 | if (table[i].addr == 0) | ||
55 | continue; | ||
56 | disable_irq_nosync(irq); | 56 | disable_irq_nosync(irq); |
57 | set_irq_chip_and_handler_name(irq, &ipr_irq_chip, | 57 | set_irq_chip_and_handler_name(irq, &ipr_irq_chip, |
58 | handle_level_irq, "level"); | 58 | handle_level_irq, "level"); |
@@ -62,83 +62,6 @@ void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs) | |||
62 | } | 62 | } |
63 | EXPORT_SYMBOL(make_ipr_irq); | 63 | EXPORT_SYMBOL(make_ipr_irq); |
64 | 64 | ||
65 | static struct ipr_data sys_ipr_map[] = { | ||
66 | #ifndef CONFIG_CPU_SUBTYPE_SH7780 | ||
67 | { TIMER_IRQ, TIMER_IPR_ADDR, TIMER_IPR_POS, TIMER_PRIORITY }, | ||
68 | { TIMER1_IRQ, TIMER1_IPR_ADDR, TIMER1_IPR_POS, TIMER1_PRIORITY }, | ||
69 | #ifdef RTC_IRQ | ||
70 | { RTC_IRQ, RTC_IPR_ADDR, RTC_IPR_POS, RTC_PRIORITY }, | ||
71 | #endif | ||
72 | #ifdef SCI_ERI_IRQ | ||
73 | { SCI_ERI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, | ||
74 | { SCI_RXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, | ||
75 | { SCI_TXI_IRQ, SCI_IPR_ADDR, SCI_IPR_POS, SCI_PRIORITY }, | ||
76 | #endif | ||
77 | #ifdef SCIF1_ERI_IRQ | ||
78 | { SCIF1_ERI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, | ||
79 | { SCIF1_RXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, | ||
80 | { SCIF1_BRI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, | ||
81 | { SCIF1_TXI_IRQ, SCIF1_IPR_ADDR, SCIF1_IPR_POS, SCIF1_PRIORITY }, | ||
82 | #endif | ||
83 | #if defined(CONFIG_CPU_SUBTYPE_SH7300) | ||
84 | { SCIF0_IRQ, SCIF0_IPR_ADDR, SCIF0_IPR_POS, SCIF0_PRIORITY }, | ||
85 | { DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, | ||
86 | { DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY }, | ||
87 | { VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY }, | ||
88 | #endif | ||
89 | #ifdef SCIF_ERI_IRQ | ||
90 | { SCIF_ERI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, | ||
91 | { SCIF_RXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, | ||
92 | { SCIF_BRI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, | ||
93 | { SCIF_TXI_IRQ, SCIF_IPR_ADDR, SCIF_IPR_POS, SCIF_PRIORITY }, | ||
94 | #endif | ||
95 | #ifdef IRDA_ERI_IRQ | ||
96 | { IRDA_ERI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, | ||
97 | { IRDA_RXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, | ||
98 | { IRDA_BRI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, | ||
99 | { IRDA_TXI_IRQ, IRDA_IPR_ADDR, IRDA_IPR_POS, IRDA_PRIORITY }, | ||
100 | #endif | ||
101 | #if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \ | ||
102 | defined(CONFIG_CPU_SUBTYPE_SH7706) || \ | ||
103 | defined(CONFIG_CPU_SUBTYPE_SH7300) || defined(CONFIG_CPU_SUBTYPE_SH7705) | ||
104 | /* | ||
105 | * Initialize the Interrupt Controller (INTC) | ||
106 | * registers to their power on values | ||
107 | */ | ||
108 | |||
109 | /* | ||
110 | * Enable external irq (INTC IRQ mode). | ||
111 | * You should set corresponding bits of PFC to "00" | ||
112 | * to enable these interrupts. | ||
113 | */ | ||
114 | { IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, IRQ0_PRIORITY }, | ||
115 | { IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, IRQ1_PRIORITY }, | ||
116 | { IRQ2_IRQ, IRQ2_IPR_ADDR, IRQ2_IPR_POS, IRQ2_PRIORITY }, | ||
117 | { IRQ3_IRQ, IRQ3_IPR_ADDR, IRQ3_IPR_POS, IRQ3_PRIORITY }, | ||
118 | { IRQ4_IRQ, IRQ4_IPR_ADDR, IRQ4_IPR_POS, IRQ4_PRIORITY }, | ||
119 | { IRQ5_IRQ, IRQ5_IPR_ADDR, IRQ5_IPR_POS, IRQ5_PRIORITY }, | ||
120 | #endif | ||
121 | #endif | ||
122 | }; | ||
123 | |||
124 | void __init init_IRQ(void) | ||
125 | { | ||
126 | make_ipr_irq(sys_ipr_map, ARRAY_SIZE(sys_ipr_map)); | ||
127 | |||
128 | #ifdef CONFIG_CPU_HAS_PINT_IRQ | ||
129 | init_IRQ_pint(); | ||
130 | #endif | ||
131 | |||
132 | #ifdef CONFIG_CPU_HAS_INTC2_IRQ | ||
133 | init_IRQ_intc2(); | ||
134 | #endif | ||
135 | /* Perform the machine specific initialisation */ | ||
136 | if (sh_mv.mv_init_irq != NULL) | ||
137 | sh_mv.mv_init_irq(); | ||
138 | |||
139 | irq_ctx_init(smp_processor_id()); | ||
140 | } | ||
141 | |||
142 | #if !defined(CONFIG_CPU_HAS_PINT_IRQ) | 65 | #if !defined(CONFIG_CPU_HAS_PINT_IRQ) |
143 | int ipr_irq_demux(int irq) | 66 | int ipr_irq_demux(int irq) |
144 | { | 67 | { |
diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile index 389353fba608..f0f059acfcfb 100644 --- a/arch/sh/kernel/cpu/sh2/Makefile +++ b/arch/sh/kernel/cpu/sh2/Makefile | |||
@@ -2,5 +2,6 @@ | |||
2 | # Makefile for the Linux/SuperH SH-2 backends. | 2 | # Makefile for the Linux/SuperH SH-2 backends. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := probe.o | 5 | obj-y := ex.o probe.o entry.o |
6 | 6 | ||
7 | obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o | ||
diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c new file mode 100644 index 000000000000..d0440b269702 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh2/clock-sh7619.c | ||
3 | * | ||
4 | * SH7619 support for the clock framework | ||
5 | * | ||
6 | * Copyright (C) 2006 Yoshinori Sato | ||
7 | * | ||
8 | * Based on clock-sh4.c | ||
9 | * Copyright (C) 2005 Paul Mundt | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <asm/clock.h> | ||
18 | #include <asm/freq.h> | ||
19 | #include <asm/io.h> | ||
20 | |||
21 | const static int pll1rate[]={1,2}; | ||
22 | const static int pfc_divisors[]={1,2,0,4}; | ||
23 | |||
24 | #if (CONFIG_SH_CLK_MD == 1) || (CONFIG_SH_CLK_MD == 2) | ||
25 | #define PLL2 (4) | ||
26 | #elif (CONFIG_SH_CLK_MD == 5) || (CONFIG_SH_CLK_MD == 6) | ||
27 | #define PLL2 (2) | ||
28 | #else | ||
29 | #error "Illigal Clock Mode!" | ||
30 | #endif | ||
31 | |||
32 | static void master_clk_init(struct clk *clk) | ||
33 | { | ||
34 | clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; | ||
35 | } | ||
36 | |||
37 | static struct clk_ops sh7619_master_clk_ops = { | ||
38 | .init = master_clk_init, | ||
39 | }; | ||
40 | |||
41 | static void module_clk_recalc(struct clk *clk) | ||
42 | { | ||
43 | int idx = (ctrl_inw(FREQCR) & 0x0007); | ||
44 | clk->rate = clk->parent->rate / pfc_divisors[idx]; | ||
45 | } | ||
46 | |||
47 | static struct clk_ops sh7619_module_clk_ops = { | ||
48 | .recalc = module_clk_recalc, | ||
49 | }; | ||
50 | |||
51 | static void bus_clk_recalc(struct clk *clk) | ||
52 | { | ||
53 | clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 7]; | ||
54 | } | ||
55 | |||
56 | static struct clk_ops sh7619_bus_clk_ops = { | ||
57 | .recalc = bus_clk_recalc, | ||
58 | }; | ||
59 | |||
60 | static void cpu_clk_recalc(struct clk *clk) | ||
61 | { | ||
62 | clk->rate = clk->parent->rate; | ||
63 | } | ||
64 | |||
65 | static struct clk_ops sh7619_cpu_clk_ops = { | ||
66 | .recalc = cpu_clk_recalc, | ||
67 | }; | ||
68 | |||
69 | static struct clk_ops *sh7619_clk_ops[] = { | ||
70 | &sh7619_master_clk_ops, | ||
71 | &sh7619_module_clk_ops, | ||
72 | &sh7619_bus_clk_ops, | ||
73 | &sh7619_cpu_clk_ops, | ||
74 | }; | ||
75 | |||
76 | void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | ||
77 | { | ||
78 | if (idx < ARRAY_SIZE(sh7619_clk_ops)) | ||
79 | *ops = sh7619_clk_ops[idx]; | ||
80 | } | ||
81 | |||
diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S new file mode 100644 index 000000000000..34d51b3745ea --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/entry.S | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh2/entry.S | ||
3 | * | ||
4 | * The SH-2 exception entry | ||
5 | * | ||
6 | * Copyright (C) 2005,2006 Yoshinori Sato | ||
7 | * Copyright (C) 2005 AXE,Inc. | ||
8 | * | ||
9 | * This file is subject to the terms and conditions of the GNU General Public | ||
10 | * License. See the file "COPYING" in the main directory of this archive | ||
11 | * for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/linkage.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/thread_info.h> | ||
17 | #include <asm/cpu/mmu_context.h> | ||
18 | #include <asm/unistd.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* Offsets to the stack */ | ||
23 | OFF_R0 = 0 /* Return value. New ABI also arg4 */ | ||
24 | OFF_R1 = 4 /* New ABI: arg5 */ | ||
25 | OFF_R2 = 8 /* New ABI: arg6 */ | ||
26 | OFF_R3 = 12 /* New ABI: syscall_nr */ | ||
27 | OFF_R4 = 16 /* New ABI: arg0 */ | ||
28 | OFF_R5 = 20 /* New ABI: arg1 */ | ||
29 | OFF_R6 = 24 /* New ABI: arg2 */ | ||
30 | OFF_R7 = 28 /* New ABI: arg3 */ | ||
31 | OFF_SP = (15*4) | ||
32 | OFF_PC = (16*4) | ||
33 | OFF_SR = (16*4+2*4) | ||
34 | OFF_TRA = (16*4+6*4) | ||
35 | |||
36 | #include <asm/entry-macros.S> | ||
37 | |||
38 | ENTRY(exception_handler) | ||
39 | ! already saved r0/r1 | ||
40 | mov.l r2,@-sp | ||
41 | mov.l r3,@-sp | ||
42 | mov r0,r1 | ||
43 | cli | ||
44 | mov.l $cpu_mode,r2 | ||
45 | mov.l @r2,r0 | ||
46 | mov.l @(5*4,r15),r3 ! previous SR | ||
47 | shll2 r3 ! set "S" flag | ||
48 | rotl r0 ! T <- "S" flag | ||
49 | rotl r0 ! "S" flag is LSB | ||
50 | rotcr r3 ! T -> r3:b30 | ||
51 | shlr r3 | ||
52 | shlr r0 | ||
53 | bt/s 1f | ||
54 | mov.l r3,@(5*4,r15) ! copy cpu mode to SR | ||
55 | ! switch to kernel mode | ||
56 | mov #1,r0 | ||
57 | rotr r0 | ||
58 | rotr r0 | ||
59 | mov.l r0,@r2 ! enter kernel mode | ||
60 | mov.l $current_thread_info,r2 | ||
61 | mov.l @r2,r2 | ||
62 | mov #0x20,r0 | ||
63 | shll8 r0 | ||
64 | add r2,r0 | ||
65 | mov r15,r2 ! r2 = user stack top | ||
66 | mov r0,r15 ! switch kernel stack | ||
67 | add #-4,r15 ! dummy | ||
68 | mov.l r1,@-r15 ! TRA | ||
69 | sts.l macl, @-r15 | ||
70 | sts.l mach, @-r15 | ||
71 | stc.l gbr, @-r15 | ||
72 | mov.l @(4*4,r2),r0 | ||
73 | mov.l @(5*4,r2),r1 | ||
74 | mov.l r1,@-r15 ! original SR | ||
75 | sts.l pr,@-r15 | ||
76 | mov.l r0,@-r15 ! original PC | ||
77 | mov r2,r3 | ||
78 | add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame | ||
79 | mov.l r3,@-r15 ! original SP | ||
80 | mov.l r14,@-r15 | ||
81 | mov.l r13,@-r15 | ||
82 | mov.l r12,@-r15 | ||
83 | mov.l r11,@-r15 | ||
84 | mov.l r10,@-r15 | ||
85 | mov.l r9,@-r15 | ||
86 | mov.l r8,@-r15 | ||
87 | mov.l r7,@-r15 | ||
88 | mov.l r6,@-r15 | ||
89 | mov.l r5,@-r15 | ||
90 | mov.l r4,@-r15 | ||
91 | mov r2,r8 ! copy user -> kernel stack | ||
92 | mov.l @r8+,r3 | ||
93 | mov.l r3,@-r15 | ||
94 | mov.l @r8+,r2 | ||
95 | mov.l r2,@-r15 | ||
96 | mov.l @r8+,r1 | ||
97 | mov.l r1,@-r15 | ||
98 | mov.l @r8+,r0 | ||
99 | bra 2f | ||
100 | mov.l r0,@-r15 | ||
101 | 1: | ||
102 | ! in kernel exception | ||
103 | mov #(22-4-4-1)*4+4,r0 | ||
104 | mov r15,r2 | ||
105 | sub r0,r15 | ||
106 | mov.l @r2+,r0 ! old R3 | ||
107 | mov.l r0,@-r15 | ||
108 | mov.l @r2+,r0 ! old R2 | ||
109 | mov.l r0,@-r15 | ||
110 | mov.l @r2+,r0 ! old R1 | ||
111 | mov.l r0,@-r15 | ||
112 | mov.l @r2+,r0 ! old R0 | ||
113 | mov.l r0,@-r15 | ||
114 | mov.l @r2+,r3 ! old PC | ||
115 | mov.l @r2+,r0 ! old SR | ||
116 | add #-4,r2 ! exception frame stub (sr) | ||
117 | mov.l r1,@-r2 ! TRA | ||
118 | sts.l macl, @-r2 | ||
119 | sts.l mach, @-r2 | ||
120 | stc.l gbr, @-r2 | ||
121 | mov.l r0,@-r2 ! save old SR | ||
122 | sts.l pr,@-r2 | ||
123 | mov.l r3,@-r2 ! save old PC | ||
124 | mov r2,r0 | ||
125 | add #8*4,r0 | ||
126 | mov.l r0,@-r2 ! save old SP | ||
127 | mov.l r14,@-r2 | ||
128 | mov.l r13,@-r2 | ||
129 | mov.l r12,@-r2 | ||
130 | mov.l r11,@-r2 | ||
131 | mov.l r10,@-r2 | ||
132 | mov.l r9,@-r2 | ||
133 | mov.l r8,@-r2 | ||
134 | mov.l r7,@-r2 | ||
135 | mov.l r6,@-r2 | ||
136 | mov.l r5,@-r2 | ||
137 | mov.l r4,@-r2 | ||
138 | mov.l @(OFF_R0,r15),r0 | ||
139 | mov.l @(OFF_R1,r15),r1 | ||
140 | mov.l @(OFF_R2,r15),r2 | ||
141 | mov.l @(OFF_R3,r15),r3 | ||
142 | 2: | ||
143 | mov #OFF_TRA,r8 | ||
144 | add r15,r8 | ||
145 | mov.l @r8,r9 | ||
146 | mov #64,r8 | ||
147 | cmp/hs r8,r9 | ||
148 | bt interrupt_entry ! vec >= 64 is interrupt | ||
149 | mov #32,r8 | ||
150 | cmp/hs r8,r9 | ||
151 | bt trap_entry ! 64 > vec >= 32 is trap | ||
152 | mov.l 4f,r8 | ||
153 | mov r9,r4 | ||
154 | shll2 r9 | ||
155 | add r9,r8 | ||
156 | mov.l @r8,r8 | ||
157 | mov #0,r9 | ||
158 | cmp/eq r9,r8 | ||
159 | bf 3f | ||
160 | mov.l 8f,r8 ! unhandled exception | ||
161 | 3: | ||
162 | mov.l 5f,r10 | ||
163 | jmp @r8 | ||
164 | lds r10,pr | ||
165 | |||
166 | interrupt_entry: | ||
167 | mov r9,r4 | ||
168 | mov.l 6f,r9 | ||
169 | mov.l 7f,r8 | ||
170 | jmp @r8 | ||
171 | lds r9,pr | ||
172 | |||
173 | .align 2 | ||
174 | 4: .long exception_handling_table | ||
175 | 5: .long ret_from_exception | ||
176 | 6: .long ret_from_irq | ||
177 | 7: .long do_IRQ | ||
178 | 8: .long do_exception_error | ||
179 | |||
180 | trap_entry: | ||
181 | add #-0x10,r9 | ||
182 | shll2 r9 ! TRA | ||
183 | mov #OFF_TRA,r8 | ||
184 | add r15,r8 | ||
185 | mov.l r9,@r8 | ||
186 | mov r9,r8 | ||
187 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
188 | mov.l 5f, r9 | ||
189 | jsr @r9 | ||
190 | nop | ||
191 | #endif | ||
192 | sti | ||
193 | bra system_call | ||
194 | nop | ||
195 | |||
196 | .align 2 | ||
197 | 1: .long syscall_exit | ||
198 | 2: .long break_point_trap_software | ||
199 | 3: .long NR_syscalls | ||
200 | 4: .long sys_call_table | ||
201 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
202 | 5: .long trace_hardirqs_on | ||
203 | #endif | ||
204 | |||
205 | #if defined(CONFIG_SH_STANDARD_BIOS) | ||
206 | /* Unwind the stack and jmp to the debug entry */ | ||
207 | debug_kernel_fw: | ||
208 | mov r15,r0 | ||
209 | add #(22-4)*4-4,r0 | ||
210 | ldc.l @r0+,gbr | ||
211 | lds.l @r0+,mach | ||
212 | lds.l @r0+,macl | ||
213 | mov r15,r0 | ||
214 | mov.l @(OFF_SP,r0),r1 | ||
215 | mov #OFF_SR,r2 | ||
216 | mov.l @(r0,r2),r3 | ||
217 | mov.l r3,@-r1 | ||
218 | mov #OFF_SP,r2 | ||
219 | mov.l @(r0,r2),r3 | ||
220 | mov.l r3,@-r1 | ||
221 | mov r15,r0 | ||
222 | add #(22-4)*4-8,r0 | ||
223 | mov.l 1f,r2 | ||
224 | mov.l @r2,r2 | ||
225 | stc sr,r3 | ||
226 | mov.l r2,@r0 | ||
227 | mov.l r3,@r0 | ||
228 | mov.l r1,@(8,r0) | ||
229 | mov.l @r15+, r0 | ||
230 | mov.l @r15+, r1 | ||
231 | mov.l @r15+, r2 | ||
232 | mov.l @r15+, r3 | ||
233 | mov.l @r15+, r4 | ||
234 | mov.l @r15+, r5 | ||
235 | mov.l @r15+, r6 | ||
236 | mov.l @r15+, r7 | ||
237 | mov.l @r15+, r8 | ||
238 | mov.l @r15+, r9 | ||
239 | mov.l @r15+, r10 | ||
240 | mov.l @r15+, r11 | ||
241 | mov.l @r15+, r12 | ||
242 | mov.l @r15+, r13 | ||
243 | mov.l @r15+, r14 | ||
244 | add #8,r15 | ||
245 | lds.l @r15+, pr | ||
246 | rte | ||
247 | mov.l @r15+,r15 | ||
248 | .align 2 | ||
249 | 1: .long gdb_vbr_vector | ||
250 | #endif /* CONFIG_SH_STANDARD_BIOS */ | ||
251 | |||
252 | ENTRY(address_error_handler) | ||
253 | mov r15,r4 ! regs | ||
254 | add #4,r4 | ||
255 | mov #OFF_PC,r0 | ||
256 | mov.l @(r0,r15),r6 ! pc | ||
257 | mov.l 1f,r0 | ||
258 | jmp @r0 | ||
259 | mov #0,r5 ! writeaccess is unknown | ||
260 | .align 2 | ||
261 | |||
262 | 1: .long do_address_error | ||
263 | |||
264 | restore_all: | ||
265 | cli | ||
266 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
267 | mov.l 3f, r0 | ||
268 | jsr @r0 | ||
269 | nop | ||
270 | #endif | ||
271 | mov r15,r0 | ||
272 | mov.l $cpu_mode,r2 | ||
273 | mov #OFF_SR,r3 | ||
274 | mov.l @(r0,r3),r1 | ||
275 | mov.l r1,@r2 | ||
276 | shll2 r1 ! clear MD bit | ||
277 | shlr2 r1 | ||
278 | mov.l @(OFF_SP,r0),r2 | ||
279 | add #-8,r2 | ||
280 | mov.l r2,@(OFF_SP,r0) ! point exception frame top | ||
281 | mov.l r1,@(4,r2) ! set sr | ||
282 | mov #OFF_PC,r3 | ||
283 | mov.l @(r0,r3),r1 | ||
284 | mov.l r1,@r2 ! set pc | ||
285 | add #4*16+4,r0 | ||
286 | lds.l @r0+,pr | ||
287 | add #4,r0 ! skip sr | ||
288 | ldc.l @r0+,gbr | ||
289 | lds.l @r0+,mach | ||
290 | lds.l @r0+,macl | ||
291 | get_current_thread_info r0, r1 | ||
292 | mov.l $current_thread_info,r1 | ||
293 | mov.l r0,@r1 | ||
294 | mov.l @r15+,r0 | ||
295 | mov.l @r15+,r1 | ||
296 | mov.l @r15+,r2 | ||
297 | mov.l @r15+,r3 | ||
298 | mov.l @r15+,r4 | ||
299 | mov.l @r15+,r5 | ||
300 | mov.l @r15+,r6 | ||
301 | mov.l @r15+,r7 | ||
302 | mov.l @r15+,r8 | ||
303 | mov.l @r15+,r9 | ||
304 | mov.l @r15+,r10 | ||
305 | mov.l @r15+,r11 | ||
306 | mov.l @r15+,r12 | ||
307 | mov.l @r15+,r13 | ||
308 | mov.l @r15+,r14 | ||
309 | mov.l @r15,r15 | ||
310 | rte | ||
311 | nop | ||
312 | 2: | ||
313 | mov.l 1f,r8 | ||
314 | mov.l 2f,r9 | ||
315 | jmp @r9 | ||
316 | lds r8,pr | ||
317 | |||
318 | .align 2 | ||
319 | $current_thread_info: | ||
320 | .long __current_thread_info | ||
321 | $cpu_mode: | ||
322 | .long __cpu_mode | ||
323 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
324 | 3: .long trace_hardirqs_off | ||
325 | #endif | ||
326 | |||
327 | ! common exception handler | ||
328 | #include "../../entry-common.S" | ||
329 | |||
330 | .data | ||
331 | ! cpu operation mode | ||
332 | ! bit30 = MD (compatible SH3/4) | ||
333 | __cpu_mode: | ||
334 | .long 0x40000000 | ||
335 | |||
336 | .section .bss | ||
337 | __current_thread_info: | ||
338 | .long 0 | ||
339 | |||
340 | ENTRY(exception_handling_table) | ||
341 | .space 4*32 | ||
diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S new file mode 100644 index 000000000000..6d285af7846c --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/ex.S | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh2/ex.S | ||
3 | * | ||
4 | * The SH-2 exception vector table | ||
5 | * | ||
6 | * Copyright (C) 2005 Yoshinori Sato | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/linkage.h> | ||
14 | |||
15 | ! | ||
16 | ! convert Exception Vector to Exception Number | ||
17 | ! | ||
18 | exception_entry: | ||
19 | no = 0 | ||
20 | .rept 256 | ||
21 | mov.l r0,@-sp | ||
22 | mov #no,r0 | ||
23 | bra exception_trampoline | ||
24 | and #0xff,r0 | ||
25 | no = no + 1 | ||
26 | .endr | ||
27 | exception_trampoline: | ||
28 | mov.l r1,@-sp | ||
29 | mov.l $exception_handler,r1 | ||
30 | jmp @r1 | ||
31 | |||
32 | .align 2 | ||
33 | $exception_entry: | ||
34 | .long exception_entry | ||
35 | $exception_handler: | ||
36 | .long exception_handler | ||
37 | ! | ||
38 | ! Exception Vector Base | ||
39 | ! | ||
40 | .align 2 | ||
41 | ENTRY(vbr_base) | ||
42 | vector = 0 | ||
43 | .rept 256 | ||
44 | .long exception_entry + vector * 8 | ||
45 | vector = vector + 1 | ||
46 | .endr | ||
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c index f17a2a0d588e..ba527d9b5024 100644 --- a/arch/sh/kernel/cpu/sh2/probe.c +++ b/arch/sh/kernel/cpu/sh2/probe.c | |||
@@ -17,17 +17,23 @@ | |||
17 | 17 | ||
18 | int __init detect_cpu_and_cache_system(void) | 18 | int __init detect_cpu_and_cache_system(void) |
19 | { | 19 | { |
20 | /* | 20 | #if defined(CONFIG_CPU_SUBTYPE_SH7604) |
21 | * For now, assume SH7604 .. fix this later. | ||
22 | */ | ||
23 | cpu_data->type = CPU_SH7604; | 21 | cpu_data->type = CPU_SH7604; |
24 | cpu_data->dcache.ways = 4; | 22 | cpu_data->dcache.ways = 4; |
25 | cpu_data->dcache.way_shift = 6; | 23 | cpu_data->dcache.way_incr = (1<<10); |
26 | cpu_data->dcache.sets = 64; | 24 | cpu_data->dcache.sets = 64; |
27 | cpu_data->dcache.entry_shift = 4; | 25 | cpu_data->dcache.entry_shift = 4; |
28 | cpu_data->dcache.linesz = L1_CACHE_BYTES; | 26 | cpu_data->dcache.linesz = L1_CACHE_BYTES; |
29 | cpu_data->dcache.flags = 0; | 27 | cpu_data->dcache.flags = 0; |
30 | 28 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | |
29 | cpu_data->type = CPU_SH7619; | ||
30 | cpu_data->dcache.ways = 4; | ||
31 | cpu_data->dcache.way_incr = (1<<12); | ||
32 | cpu_data->dcache.sets = 256; | ||
33 | cpu_data->dcache.entry_shift = 4; | ||
34 | cpu_data->dcache.linesz = L1_CACHE_BYTES; | ||
35 | cpu_data->dcache.flags = 0; | ||
36 | #endif | ||
31 | /* | 37 | /* |
32 | * SH-2 doesn't have separate caches | 38 | * SH-2 doesn't have separate caches |
33 | */ | 39 | */ |
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c new file mode 100644 index 000000000000..82c2d905152f --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * SH7619 Setup | ||
3 | * | ||
4 | * Copyright (C) 2006 Yoshinori Sato | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/serial.h> | ||
13 | #include <asm/sci.h> | ||
14 | |||
15 | static struct plat_sci_port sci_platform_data[] = { | ||
16 | { | ||
17 | .mapbase = 0xf8400000, | ||
18 | .flags = UPF_BOOT_AUTOCONF, | ||
19 | .type = PORT_SCIF, | ||
20 | .irqs = { 88, 89, 91, 90}, | ||
21 | }, { | ||
22 | .mapbase = 0xf8410000, | ||
23 | .flags = UPF_BOOT_AUTOCONF, | ||
24 | .type = PORT_SCIF, | ||
25 | .irqs = { 92, 93, 95, 94}, | ||
26 | }, { | ||
27 | .mapbase = 0xf8420000, | ||
28 | .flags = UPF_BOOT_AUTOCONF, | ||
29 | .type = PORT_SCIF, | ||
30 | .irqs = { 96, 97, 99, 98}, | ||
31 | }, { | ||
32 | .flags = 0, | ||
33 | } | ||
34 | }; | ||
35 | |||
36 | static struct platform_device sci_device = { | ||
37 | .name = "sh-sci", | ||
38 | .id = -1, | ||
39 | .dev = { | ||
40 | .platform_data = sci_platform_data, | ||
41 | }, | ||
42 | }; | ||
43 | |||
44 | static struct platform_device *sh7619_devices[] __initdata = { | ||
45 | &sci_device, | ||
46 | }; | ||
47 | |||
48 | static int __init sh7619_devices_setup(void) | ||
49 | { | ||
50 | return platform_add_devices(sh7619_devices, | ||
51 | ARRAY_SIZE(sh7619_devices)); | ||
52 | } | ||
53 | __initcall(sh7619_devices_setup); | ||
diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile new file mode 100644 index 000000000000..350972ae9410 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/SuperH SH-2A backends. | ||
3 | # | ||
4 | |||
5 | obj-y := common.o probe.o | ||
6 | |||
7 | common-y += $(addprefix ../sh2/, ex.o) | ||
8 | common-y += $(addprefix ../sh2/, entry.o) | ||
9 | |||
10 | obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o | ||
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c new file mode 100644 index 000000000000..a9ad309c6a33 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh2a/clock-sh7206.c | ||
3 | * | ||
4 | * SH7206 support for the clock framework | ||
5 | * | ||
6 | * Copyright (C) 2006 Yoshinori Sato | ||
7 | * | ||
8 | * Based on clock-sh4.c | ||
9 | * Copyright (C) 2005 Paul Mundt | ||
10 | * | ||
11 | * This file is subject to the terms and conditions of the GNU General Public | ||
12 | * License. See the file "COPYING" in the main directory of this archive | ||
13 | * for more details. | ||
14 | */ | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <asm/clock.h> | ||
18 | #include <asm/freq.h> | ||
19 | #include <asm/io.h> | ||
20 | |||
21 | const static int pll1rate[]={1,2,3,4,6,8}; | ||
22 | const static int pfc_divisors[]={1,2,3,4,6,8,12}; | ||
23 | #define ifc_divisors pfc_divisors | ||
24 | |||
25 | #if (CONFIG_SH_CLK_MD == 2) | ||
26 | #define PLL2 (4) | ||
27 | #elif (CONFIG_SH_CLK_MD == 6) | ||
28 | #define PLL2 (2) | ||
29 | #elif (CONFIG_SH_CLK_MD == 7) | ||
30 | #define PLL2 (1) | ||
31 | #else | ||
32 | #error "Illigal Clock Mode!" | ||
33 | #endif | ||
34 | |||
35 | static void master_clk_init(struct clk *clk) | ||
36 | { | ||
37 | clk->rate *= PLL2 * pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; | ||
38 | } | ||
39 | |||
40 | static struct clk_ops sh7206_master_clk_ops = { | ||
41 | .init = master_clk_init, | ||
42 | }; | ||
43 | |||
44 | static void module_clk_recalc(struct clk *clk) | ||
45 | { | ||
46 | int idx = (ctrl_inw(FREQCR) & 0x0007); | ||
47 | clk->rate = clk->parent->rate / pfc_divisors[idx]; | ||
48 | } | ||
49 | |||
50 | static struct clk_ops sh7206_module_clk_ops = { | ||
51 | .recalc = module_clk_recalc, | ||
52 | }; | ||
53 | |||
54 | static void bus_clk_recalc(struct clk *clk) | ||
55 | { | ||
56 | clk->rate = clk->parent->rate / pll1rate[(ctrl_inw(FREQCR) >> 8) & 0x0007]; | ||
57 | } | ||
58 | |||
59 | static struct clk_ops sh7206_bus_clk_ops = { | ||
60 | .recalc = bus_clk_recalc, | ||
61 | }; | ||
62 | |||
63 | static void cpu_clk_recalc(struct clk *clk) | ||
64 | { | ||
65 | int idx = (ctrl_inw(FREQCR) & 0x0007); | ||
66 | clk->rate = clk->parent->rate / ifc_divisors[idx]; | ||
67 | } | ||
68 | |||
69 | static struct clk_ops sh7206_cpu_clk_ops = { | ||
70 | .recalc = cpu_clk_recalc, | ||
71 | }; | ||
72 | |||
73 | static struct clk_ops *sh7206_clk_ops[] = { | ||
74 | &sh7206_master_clk_ops, | ||
75 | &sh7206_module_clk_ops, | ||
76 | &sh7206_bus_clk_ops, | ||
77 | &sh7206_cpu_clk_ops, | ||
78 | }; | ||
79 | |||
80 | void __init arch_init_clk_ops(struct clk_ops **ops, int idx) | ||
81 | { | ||
82 | if (idx < ARRAY_SIZE(sh7206_clk_ops)) | ||
83 | *ops = sh7206_clk_ops[idx]; | ||
84 | } | ||
85 | |||
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c new file mode 100644 index 000000000000..87c6c0542089 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/probe.c | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/cpu/sh2a/probe.c | ||
3 | * | ||
4 | * CPU Subtype Probing for SH-2A. | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <asm/processor.h> | ||
15 | #include <asm/cache.h> | ||
16 | |||
17 | int __init detect_cpu_and_cache_system(void) | ||
18 | { | ||
19 | /* Just SH7206 for now .. */ | ||
20 | cpu_data->type = CPU_SH7206; | ||
21 | |||
22 | cpu_data->dcache.ways = 4; | ||
23 | cpu_data->dcache.way_incr = (1 << 11); | ||
24 | cpu_data->dcache.sets = 128; | ||
25 | cpu_data->dcache.entry_shift = 4; | ||
26 | cpu_data->dcache.linesz = L1_CACHE_BYTES; | ||
27 | cpu_data->dcache.flags = 0; | ||
28 | |||
29 | /* | ||
30 | * The icache is the same as the dcache as far as this setup is | ||
31 | * concerned. The only real difference in hardware is that the icache | ||
32 | * lacks the U bit that the dcache has, none of this has any bearing | ||
33 | * on the cache info. | ||
34 | */ | ||
35 | cpu_data->icache = cpu_data->dcache; | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c new file mode 100644 index 000000000000..cdfeef49e62e --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c | |||
@@ -0,0 +1,58 @@ | |||
1 | /* | ||
2 | * SH7206 Setup | ||
3 | * | ||
4 | * Copyright (C) 2006 Yoshinori Sato | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/platform_device.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/serial.h> | ||
13 | #include <asm/sci.h> | ||
14 | |||
15 | static struct plat_sci_port sci_platform_data[] = { | ||
16 | { | ||
17 | .mapbase = 0xfffe8000, | ||
18 | .flags = UPF_BOOT_AUTOCONF, | ||
19 | .type = PORT_SCIF, | ||
20 | .irqs = { 240, 241, 242, 243}, | ||
21 | }, { | ||
22 | .mapbase = 0xfffe8800, | ||
23 | .flags = UPF_BOOT_AUTOCONF, | ||
24 | .type = PORT_SCIF, | ||
25 | .irqs = { 244, 245, 246, 247}, | ||
26 | }, { | ||
27 | .mapbase = 0xfffe9000, | ||
28 | .flags = UPF_BOOT_AUTOCONF, | ||
29 | .type = PORT_SCIF, | ||
30 | .irqs = { 248, 249, 250, 251}, | ||
31 | }, { | ||
32 | .mapbase = 0xfffe9800, | ||
33 | .flags = UPF_BOOT_AUTOCONF, | ||
34 | .type = PORT_SCIF, | ||
35 | .irqs = { 252, 253, 254, 255}, | ||
36 | }, { | ||
37 | .flags = 0, | ||
38 | } | ||
39 | }; | ||
40 | |||
41 | static struct platform_device sci_device = { | ||
42 | .name = "sh-sci", | ||
43 | .id = -1, | ||
44 | .dev = { | ||
45 | .platform_data = sci_platform_data, | ||
46 | }, | ||
47 | }; | ||
48 | |||
49 | static struct platform_device *sh7206_devices[] __initdata = { | ||
50 | &sci_device, | ||
51 | }; | ||
52 | |||
53 | static int __init sh7206_devices_setup(void) | ||
54 | { | ||
55 | return platform_add_devices(sh7206_devices, | ||
56 | ARRAY_SIZE(sh7206_devices)); | ||
57 | } | ||
58 | __initcall(sh7206_devices_setup); | ||
diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile index 58d3815695ff..83905e4e4387 100644 --- a/arch/sh/kernel/cpu/sh3/Makefile +++ b/arch/sh/kernel/cpu/sh3/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux/SuperH SH-3 backends. | 2 | # Makefile for the Linux/SuperH SH-3 backends. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := ex.o probe.o | 5 | obj-y := ex.o probe.o entry.o |
6 | 6 | ||
7 | # CPU subtype setup | 7 | # CPU subtype setup |
8 | obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o | 8 | obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o |
diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c index 10461a745e5f..b791a29fdb62 100644 --- a/arch/sh/kernel/cpu/sh3/clock-sh7709.c +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c | |||
@@ -24,7 +24,7 @@ static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; | |||
24 | 24 | ||
25 | static void set_bus_parent(struct clk *clk) | 25 | static void set_bus_parent(struct clk *clk) |
26 | { | 26 | { |
27 | struct clk *bus_clk = clk_get("bus_clk"); | 27 | struct clk *bus_clk = clk_get(NULL, "bus_clk"); |
28 | clk->parent = bus_clk; | 28 | clk->parent = bus_clk; |
29 | clk_put(bus_clk); | 29 | clk_put(bus_clk); |
30 | } | 30 | } |
diff --git a/arch/sh/kernel/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 39aaefb2d83f..8c0dc2700c69 100644 --- a/arch/sh/kernel/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * linux/arch/sh/entry.S | 2 | * arch/sh/kernel/entry.S |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2003 - 2006 Paul Mundt | 5 | * Copyright (C) 2003 - 2006 Paul Mundt |
@@ -7,15 +7,16 @@ | |||
7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
9 | * for more details. | 9 | * for more details. |
10 | * | ||
11 | */ | 10 | */ |
12 | #include <linux/sys.h> | 11 | #include <linux/sys.h> |
13 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
14 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
15 | #include <asm/asm-offsets.h> | 14 | #include <asm/asm-offsets.h> |
16 | #include <asm/thread_info.h> | 15 | #include <asm/thread_info.h> |
17 | #include <asm/cpu/mmu_context.h> | ||
18 | #include <asm/unistd.h> | 16 | #include <asm/unistd.h> |
17 | #include <asm/cpu/mmu_context.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | #include <asm/page.h> | ||
19 | 20 | ||
20 | ! NOTE: | 21 | ! NOTE: |
21 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address | 22 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address |
@@ -81,6 +82,8 @@ OFF_TRA = (16*4+6*4) | |||
81 | #define k_g_imask r6_bank /* r6_bank1 */ | 82 | #define k_g_imask r6_bank /* r6_bank1 */ |
82 | #define current r7 /* r7_bank1 */ | 83 | #define current r7 /* r7_bank1 */ |
83 | 84 | ||
85 | #include <asm/entry-macros.S> | ||
86 | |||
84 | /* | 87 | /* |
85 | * Kernel mode register usage: | 88 | * Kernel mode register usage: |
86 | * k0 scratch | 89 | * k0 scratch |
@@ -107,26 +110,6 @@ OFF_TRA = (16*4+6*4) | |||
107 | ! this first version depends *much* on C implementation. | 110 | ! this first version depends *much* on C implementation. |
108 | ! | 111 | ! |
109 | 112 | ||
110 | #define CLI() \ | ||
111 | stc sr, r0; \ | ||
112 | or #0xf0, r0; \ | ||
113 | ldc r0, sr | ||
114 | |||
115 | #define STI() \ | ||
116 | mov.l __INV_IMASK, r11; \ | ||
117 | stc sr, r10; \ | ||
118 | and r11, r10; \ | ||
119 | stc k_g_imask, r11; \ | ||
120 | or r11, r10; \ | ||
121 | ldc r10, sr | ||
122 | |||
123 | #if defined(CONFIG_PREEMPT) | ||
124 | # define preempt_stop() CLI() | ||
125 | #else | ||
126 | # define preempt_stop() | ||
127 | # define resume_kernel restore_all | ||
128 | #endif | ||
129 | |||
130 | #if defined(CONFIG_MMU) | 113 | #if defined(CONFIG_MMU) |
131 | .align 2 | 114 | .align 2 |
132 | ENTRY(tlb_miss_load) | 115 | ENTRY(tlb_miss_load) |
@@ -155,29 +138,14 @@ ENTRY(tlb_protection_violation_store) | |||
155 | 138 | ||
156 | call_dpf: | 139 | call_dpf: |
157 | mov.l 1f, r0 | 140 | mov.l 1f, r0 |
158 | mov r5, r8 | 141 | mov.l @r0, r6 ! address |
159 | mov.l @r0, r6 | ||
160 | mov r6, r9 | ||
161 | mov.l 2f, r0 | ||
162 | sts pr, r10 | ||
163 | jsr @r0 | ||
164 | mov r15, r4 | ||
165 | ! | ||
166 | tst r0, r0 | ||
167 | bf/s 0f | ||
168 | lds r10, pr | ||
169 | rts | ||
170 | nop | ||
171 | 0: STI() | ||
172 | mov.l 3f, r0 | 142 | mov.l 3f, r0 |
173 | mov r9, r6 | 143 | |
174 | mov r8, r5 | ||
175 | jmp @r0 | 144 | jmp @r0 |
176 | mov r15, r4 | 145 | mov r15, r4 ! regs |
177 | 146 | ||
178 | .align 2 | 147 | .align 2 |
179 | 1: .long MMU_TEA | 148 | 1: .long MMU_TEA |
180 | 2: .long __do_page_fault | ||
181 | 3: .long do_page_fault | 149 | 3: .long do_page_fault |
182 | 150 | ||
183 | .align 2 | 151 | .align 2 |
@@ -203,32 +171,6 @@ call_dae: | |||
203 | 2: .long do_address_error | 171 | 2: .long do_address_error |
204 | #endif /* CONFIG_MMU */ | 172 | #endif /* CONFIG_MMU */ |
205 | 173 | ||
206 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | ||
207 | ! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. | ||
208 | ! If both are configured, handle the debug traps (breakpoints) in SW, | ||
209 | ! but still allow BIOS traps to FW. | ||
210 | |||
211 | .align 2 | ||
212 | debug_kernel: | ||
213 | #if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) | ||
214 | /* Force BIOS call to FW (debug_trap put TRA in r8) */ | ||
215 | mov r8,r0 | ||
216 | shlr2 r0 | ||
217 | cmp/eq #0x3f,r0 | ||
218 | bt debug_kernel_fw | ||
219 | #endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ | ||
220 | |||
221 | debug_enter: | ||
222 | #if defined(CONFIG_SH_KGDB) | ||
223 | /* Jump to kgdb, pass stacked regs as arg */ | ||
224 | debug_kernel_sw: | ||
225 | mov.l 3f, r0 | ||
226 | jmp @r0 | ||
227 | mov r15, r4 | ||
228 | .align 2 | ||
229 | 3: .long kgdb_handle_exception | ||
230 | #endif /* CONFIG_SH_KGDB */ | ||
231 | |||
232 | #if defined(CONFIG_SH_STANDARD_BIOS) | 174 | #if defined(CONFIG_SH_STANDARD_BIOS) |
233 | /* Unwind the stack and jmp to the debug entry */ | 175 | /* Unwind the stack and jmp to the debug entry */ |
234 | debug_kernel_fw: | 176 | debug_kernel_fw: |
@@ -269,276 +211,6 @@ debug_kernel_fw: | |||
269 | 2: .long gdb_vbr_vector | 211 | 2: .long gdb_vbr_vector |
270 | #endif /* CONFIG_SH_STANDARD_BIOS */ | 212 | #endif /* CONFIG_SH_STANDARD_BIOS */ |
271 | 213 | ||
272 | #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ | ||
273 | |||
274 | |||
275 | .align 2 | ||
276 | debug_trap: | ||
277 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | ||
278 | mov #OFF_SR, r0 | ||
279 | mov.l @(r0,r15), r0 ! get status register | ||
280 | shll r0 | ||
281 | shll r0 ! kernel space? | ||
282 | bt/s debug_kernel | ||
283 | #endif | ||
284 | mov.l @r15, r0 ! Restore R0 value | ||
285 | mov.l 1f, r8 | ||
286 | jmp @r8 | ||
287 | nop | ||
288 | |||
289 | .align 2 | ||
290 | ENTRY(exception_error) | ||
291 | ! | ||
292 | STI() | ||
293 | mov.l 2f, r0 | ||
294 | jmp @r0 | ||
295 | nop | ||
296 | |||
297 | ! | ||
298 | .align 2 | ||
299 | 1: .long break_point_trap_software | ||
300 | 2: .long do_exception_error | ||
301 | |||
302 | .align 2 | ||
303 | ret_from_exception: | ||
304 | preempt_stop() | ||
305 | ENTRY(ret_from_irq) | ||
306 | ! | ||
307 | mov #OFF_SR, r0 | ||
308 | mov.l @(r0,r15), r0 ! get status register | ||
309 | shll r0 | ||
310 | shll r0 ! kernel space? | ||
311 | bt/s resume_kernel ! Yes, it's from kernel, go back soon | ||
312 | GET_THREAD_INFO(r8) | ||
313 | |||
314 | #ifdef CONFIG_PREEMPT | ||
315 | bra resume_userspace | ||
316 | nop | ||
317 | ENTRY(resume_kernel) | ||
318 | mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count | ||
319 | tst r0, r0 | ||
320 | bf noresched | ||
321 | need_resched: | ||
322 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
323 | tst #_TIF_NEED_RESCHED, r0 ! need_resched set? | ||
324 | bt noresched | ||
325 | |||
326 | mov #OFF_SR, r0 | ||
327 | mov.l @(r0,r15), r0 ! get status register | ||
328 | and #0xf0, r0 ! interrupts off (exception path)? | ||
329 | cmp/eq #0xf0, r0 | ||
330 | bt noresched | ||
331 | |||
332 | mov.l 1f, r0 | ||
333 | mov.l r0, @(TI_PRE_COUNT,r8) | ||
334 | |||
335 | STI() | ||
336 | mov.l 2f, r0 | ||
337 | jsr @r0 | ||
338 | nop | ||
339 | mov #0, r0 | ||
340 | mov.l r0, @(TI_PRE_COUNT,r8) | ||
341 | CLI() | ||
342 | |||
343 | bra need_resched | ||
344 | nop | ||
345 | noresched: | ||
346 | bra restore_all | ||
347 | nop | ||
348 | |||
349 | .align 2 | ||
350 | 1: .long PREEMPT_ACTIVE | ||
351 | 2: .long schedule | ||
352 | #endif | ||
353 | |||
354 | ENTRY(resume_userspace) | ||
355 | ! r8: current_thread_info | ||
356 | CLI() | ||
357 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
358 | tst #_TIF_WORK_MASK, r0 | ||
359 | bt/s restore_all | ||
360 | tst #_TIF_NEED_RESCHED, r0 | ||
361 | |||
362 | .align 2 | ||
363 | work_pending: | ||
364 | ! r0: current_thread_info->flags | ||
365 | ! r8: current_thread_info | ||
366 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" | ||
367 | bf/s work_resched | ||
368 | tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 | ||
369 | work_notifysig: | ||
370 | bt/s restore_all | ||
371 | mov r15, r4 | ||
372 | mov r12, r5 ! set arg1(save_r0) | ||
373 | mov r0, r6 | ||
374 | mov.l 2f, r1 | ||
375 | mova restore_all, r0 | ||
376 | jmp @r1 | ||
377 | lds r0, pr | ||
378 | work_resched: | ||
379 | #ifndef CONFIG_PREEMPT | ||
380 | ! gUSA handling | ||
381 | mov.l @(OFF_SP,r15), r0 ! get user space stack pointer | ||
382 | mov r0, r1 | ||
383 | shll r0 | ||
384 | bf/s 1f | ||
385 | shll r0 | ||
386 | bf/s 1f | ||
387 | mov #OFF_PC, r0 | ||
388 | ! SP >= 0xc0000000 : gUSA mark | ||
389 | mov.l @(r0,r15), r2 ! get user space PC (program counter) | ||
390 | mov.l @(OFF_R0,r15), r3 ! end point | ||
391 | cmp/hs r3, r2 ! r2 >= r3? | ||
392 | bt 1f | ||
393 | add r3, r1 ! rewind point #2 | ||
394 | mov.l r1, @(r0,r15) ! reset PC to rewind point #2 | ||
395 | ! | ||
396 | 1: | ||
397 | #endif | ||
398 | mov.l 1f, r1 | ||
399 | jsr @r1 ! schedule | ||
400 | nop | ||
401 | CLI() | ||
402 | ! | ||
403 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
404 | tst #_TIF_WORK_MASK, r0 | ||
405 | bt restore_all | ||
406 | bra work_pending | ||
407 | tst #_TIF_NEED_RESCHED, r0 | ||
408 | |||
409 | .align 2 | ||
410 | 1: .long schedule | ||
411 | 2: .long do_notify_resume | ||
412 | |||
413 | .align 2 | ||
414 | syscall_exit_work: | ||
415 | ! r0: current_thread_info->flags | ||
416 | ! r8: current_thread_info | ||
417 | tst #_TIF_SYSCALL_TRACE, r0 | ||
418 | bt/s work_pending | ||
419 | tst #_TIF_NEED_RESCHED, r0 | ||
420 | STI() | ||
421 | ! XXX setup arguments... | ||
422 | mov.l 4f, r0 ! do_syscall_trace | ||
423 | jsr @r0 | ||
424 | nop | ||
425 | bra resume_userspace | ||
426 | nop | ||
427 | |||
428 | .align 2 | ||
429 | syscall_trace_entry: | ||
430 | ! Yes it is traced. | ||
431 | ! XXX setup arguments... | ||
432 | mov.l 4f, r11 ! Call do_syscall_trace which notifies | ||
433 | jsr @r11 ! superior (will chomp R[0-7]) | ||
434 | nop | ||
435 | ! Reload R0-R4 from kernel stack, where the | ||
436 | ! parent may have modified them using | ||
437 | ! ptrace(POKEUSR). (Note that R0-R2 are | ||
438 | ! used by the system call handler directly | ||
439 | ! from the kernel stack anyway, so don't need | ||
440 | ! to be reloaded here.) This allows the parent | ||
441 | ! to rewrite system calls and args on the fly. | ||
442 | mov.l @(OFF_R4,r15), r4 ! arg0 | ||
443 | mov.l @(OFF_R5,r15), r5 | ||
444 | mov.l @(OFF_R6,r15), r6 | ||
445 | mov.l @(OFF_R7,r15), r7 ! arg3 | ||
446 | mov.l @(OFF_R3,r15), r3 ! syscall_nr | ||
447 | ! Arrange for do_syscall_trace to be called | ||
448 | ! again as the system call returns. | ||
449 | mov.l 2f, r10 ! Number of syscalls | ||
450 | cmp/hs r10, r3 | ||
451 | bf syscall_call | ||
452 | mov #-ENOSYS, r0 | ||
453 | bra syscall_exit | ||
454 | mov.l r0, @(OFF_R0,r15) ! Return value | ||
455 | |||
456 | /* | ||
457 | * Syscall interface: | ||
458 | * | ||
459 | * Syscall #: R3 | ||
460 | * Arguments #0 to #3: R4--R7 | ||
461 | * Arguments #4 to #6: R0, R1, R2 | ||
462 | * TRA: (number of arguments + 0x10) x 4 | ||
463 | * | ||
464 | * This code also handles delegating other traps to the BIOS/gdb stub | ||
465 | * according to: | ||
466 | * | ||
467 | * Trap number | ||
468 | * (TRA>>2) Purpose | ||
469 | * -------- ------- | ||
470 | * 0x0-0xf old syscall ABI | ||
471 | * 0x10-0x1f new syscall ABI | ||
472 | * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. | ||
473 | * | ||
474 | * Note: When we're first called, the TRA value must be shifted | ||
475 | * right 2 bits in order to get the value that was used as the "trapa" | ||
476 | * argument. | ||
477 | */ | ||
478 | |||
479 | .align 2 | ||
480 | .globl ret_from_fork | ||
481 | ret_from_fork: | ||
482 | mov.l 1f, r8 | ||
483 | jsr @r8 | ||
484 | mov r0, r4 | ||
485 | bra syscall_exit | ||
486 | nop | ||
487 | .align 2 | ||
488 | 1: .long schedule_tail | ||
489 | ! | ||
490 | ENTRY(system_call) | ||
491 | mov.l 1f, r9 | ||
492 | mov.l @r9, r8 ! Read from TRA (Trap Address) Register | ||
493 | ! | ||
494 | ! Is the trap argument >= 0x20? (TRA will be >= 0x80) | ||
495 | mov #0x7f, r9 | ||
496 | cmp/hi r9, r8 | ||
497 | bt/s 0f | ||
498 | mov #OFF_TRA, r9 | ||
499 | add r15, r9 | ||
500 | ! | ||
501 | mov.l r8, @r9 ! set TRA value to tra | ||
502 | STI() | ||
503 | ! Call the system call handler through the table. | ||
504 | ! First check for bad syscall number | ||
505 | mov r3, r9 | ||
506 | mov.l 2f, r8 ! Number of syscalls | ||
507 | cmp/hs r8, r9 | ||
508 | bf/s good_system_call | ||
509 | GET_THREAD_INFO(r8) | ||
510 | syscall_badsys: ! Bad syscall number | ||
511 | mov #-ENOSYS, r0 | ||
512 | bra resume_userspace | ||
513 | mov.l r0, @(OFF_R0,r15) ! Return value | ||
514 | ! | ||
515 | 0: | ||
516 | bra debug_trap | ||
517 | nop | ||
518 | ! | ||
519 | good_system_call: ! Good syscall number | ||
520 | mov.l @(TI_FLAGS,r8), r8 | ||
521 | mov #_TIF_SYSCALL_TRACE, r10 | ||
522 | tst r10, r8 | ||
523 | bf syscall_trace_entry | ||
524 | ! | ||
525 | syscall_call: | ||
526 | shll2 r9 ! x4 | ||
527 | mov.l 3f, r8 ! Load the address of sys_call_table | ||
528 | add r8, r9 | ||
529 | mov.l @r9, r8 | ||
530 | jsr @r8 ! jump to specific syscall handler | ||
531 | nop | ||
532 | mov.l @(OFF_R0,r15), r12 ! save r0 | ||
533 | mov.l r0, @(OFF_R0,r15) ! save the return value | ||
534 | ! | ||
535 | syscall_exit: | ||
536 | CLI() | ||
537 | ! | ||
538 | GET_THREAD_INFO(r8) | ||
539 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
540 | tst #_TIF_ALLWORK_MASK, r0 | ||
541 | bf syscall_exit_work | ||
542 | restore_all: | 214 | restore_all: |
543 | mov.l @r15+, r0 | 215 | mov.l @r15+, r0 |
544 | mov.l @r15+, r1 | 216 | mov.l @r15+, r1 |
@@ -606,7 +278,9 @@ skip_restore: | |||
606 | ! | 278 | ! |
607 | ! Calculate new SR value | 279 | ! Calculate new SR value |
608 | mov k3, k2 ! original SR value | 280 | mov k3, k2 ! original SR value |
609 | mov.l 9f, k1 | 281 | mov #0xf0, k1 |
282 | extu.b k1, k1 | ||
283 | not k1, k1 | ||
610 | and k1, k2 ! Mask orignal SR value | 284 | and k1, k2 ! Mask orignal SR value |
611 | ! | 285 | ! |
612 | mov k3, k0 ! Calculate IMASK-bits | 286 | mov k3, k0 ! Calculate IMASK-bits |
@@ -632,16 +306,12 @@ skip_restore: | |||
632 | nop | 306 | nop |
633 | 307 | ||
634 | .align 2 | 308 | .align 2 |
635 | 1: .long TRA | ||
636 | 2: .long NR_syscalls | ||
637 | 3: .long sys_call_table | ||
638 | 4: .long do_syscall_trace | ||
639 | 5: .long 0x00001000 ! DSP | 309 | 5: .long 0x00001000 ! DSP |
640 | 7: .long 0x30000000 | 310 | 7: .long 0x30000000 |
641 | 9: | ||
642 | __INV_IMASK: | ||
643 | .long 0xffffff0f ! ~(IMASK) | ||
644 | 311 | ||
312 | ! common exception handler | ||
313 | #include "../../entry-common.S" | ||
314 | |||
645 | ! Exception Vector Base | 315 | ! Exception Vector Base |
646 | ! | 316 | ! |
647 | ! Should be aligned page boundary. | 317 | ! Should be aligned page boundary. |
@@ -661,9 +331,176 @@ general_exception: | |||
661 | 2: .long ret_from_exception | 331 | 2: .long ret_from_exception |
662 | ! | 332 | ! |
663 | ! | 333 | ! |
334 | |||
335 | /* This code makes some assumptions to improve performance. | ||
336 | * Make sure they are stil true. */ | ||
337 | #if PTRS_PER_PGD != PTRS_PER_PTE | ||
338 | #error PGD and PTE sizes don't match | ||
339 | #endif | ||
340 | |||
341 | /* gas doesn't flag impossible values for mov #immediate as an error */ | ||
342 | #if (_PAGE_PRESENT >> 2) > 0x7f | ||
343 | #error cannot load PAGE_PRESENT as an immediate | ||
344 | #endif | ||
345 | #if _PAGE_DIRTY > 0x7f | ||
346 | #error cannot load PAGE_DIRTY as an immediate | ||
347 | #endif | ||
348 | #if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED | ||
349 | #error cannot derive PAGE_ACCESSED from PAGE_PRESENT | ||
350 | #endif | ||
351 | |||
352 | #if defined(CONFIG_CPU_SH4) | ||
353 | #define ldmmupteh(r) mov.l 8f, r | ||
354 | #else | ||
355 | #define ldmmupteh(r) mov #MMU_PTEH, r | ||
356 | #endif | ||
357 | |||
664 | .balign 1024,0,1024 | 358 | .balign 1024,0,1024 |
665 | tlb_miss: | 359 | tlb_miss: |
666 | mov.l 1f, k2 | 360 | #ifdef COUNT_EXCEPTIONS |
361 | ! Increment the counts | ||
362 | mov.l 9f, k1 | ||
363 | mov.l @k1, k2 | ||
364 | add #1, k2 | ||
365 | mov.l k2, @k1 | ||
366 | #endif | ||
367 | |||
368 | ! k0 scratch | ||
369 | ! k1 pgd and pte pointers | ||
370 | ! k2 faulting address | ||
371 | ! k3 pgd and pte index masks | ||
372 | ! k4 shift | ||
373 | |||
374 | ! Load up the pgd entry (k1) | ||
375 | |||
376 | ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH | ||
377 | |||
378 | mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2 | ||
379 | mov #-(PGDIR_SHIFT-2), k4 ! 6 EX | ||
380 | |||
381 | mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2) | ||
382 | |||
383 | mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2) | ||
384 | |||
385 | mov k2, k0 ! 5 MT (latency=0) | ||
386 | shld k4, k0 ! 99 EX | ||
387 | |||
388 | and k3, k0 ! 78 EX | ||
389 | |||
390 | mov.l @(k0, k1), k1 ! 21 LS (latency=2) | ||
391 | mov #-(PAGE_SHIFT-2), k4 ! 6 EX | ||
392 | |||
393 | ! Load up the pte entry (k2) | ||
394 | |||
395 | mov k2, k0 ! 5 MT (latency=0) | ||
396 | shld k4, k0 ! 99 EX | ||
397 | |||
398 | tst k1, k1 ! 86 MT | ||
399 | |||
400 | bt 20f ! 110 BR | ||
401 | |||
402 | and k3, k0 ! 78 EX | ||
403 | mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT | ||
404 | |||
405 | mov.l @(k0, k1), k2 ! 21 LS (latency=2) | ||
406 | add k0, k1 ! 49 EX | ||
407 | |||
408 | #ifdef CONFIG_CPU_HAS_PTEA | ||
409 | ! Test the entry for present and _PAGE_ACCESSED | ||
410 | |||
411 | mov #-28, k3 ! 6 EX | ||
412 | mov k2, k0 ! 5 MT (latency=0) | ||
413 | |||
414 | tst k4, k2 ! 68 MT | ||
415 | shld k3, k0 ! 99 EX | ||
416 | |||
417 | bt 20f ! 110 BR | ||
418 | |||
419 | ! Set PTEA register | ||
420 | ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1) | ||
421 | ! | ||
422 | ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT | ||
423 | |||
424 | and #0xe, k0 ! 79 EX | ||
425 | |||
426 | mov k0, k3 ! 5 MT (latency=0) | ||
427 | mov k2, k0 ! 5 MT (latency=0) | ||
428 | |||
429 | and #1, k0 ! 79 EX | ||
430 | |||
431 | or k0, k3 ! 82 EX | ||
432 | |||
433 | ldmmupteh(k0) ! 9 LS (latency=2) | ||
434 | shll2 k4 ! 101 EX _PAGE_ACCESSED | ||
435 | |||
436 | tst k4, k2 ! 68 MT | ||
437 | |||
438 | mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS | ||
439 | |||
440 | mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK | ||
441 | |||
442 | ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED | ||
443 | #else | ||
444 | |||
445 | ! Test the entry for present and _PAGE_ACCESSED | ||
446 | |||
447 | mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK | ||
448 | tst k4, k2 ! 68 MT | ||
449 | |||
450 | shll2 k4 ! 101 EX _PAGE_ACCESSED | ||
451 | ldmmupteh(k0) ! 9 LS (latency=2) | ||
452 | |||
453 | bt 20f ! 110 BR | ||
454 | tst k4, k2 ! 68 MT | ||
455 | |||
456 | ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED | ||
457 | |||
458 | #endif | ||
459 | |||
460 | ! Set up the entry | ||
461 | |||
462 | and k2, k3 ! 78 EX | ||
463 | bt/s 10f ! 108 BR | ||
464 | |||
465 | mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS | ||
466 | |||
467 | ldtlb ! 128 CO | ||
468 | |||
469 | ! At least one instruction between ldtlb and rte | ||
470 | nop ! 119 NOP | ||
471 | |||
472 | rte ! 126 CO | ||
473 | |||
474 | nop ! 119 NOP | ||
475 | |||
476 | |||
477 | 10: or k4, k2 ! 82 EX | ||
478 | |||
479 | ldtlb ! 128 CO | ||
480 | |||
481 | ! At least one instruction between ldtlb and rte | ||
482 | mov.l k2, @k1 ! 27 LS | ||
483 | |||
484 | rte ! 126 CO | ||
485 | |||
486 | ! Note we cannot execute mov here, because it is executed after | ||
487 | ! restoring SSR, so would be executed in user space. | ||
488 | nop ! 119 NOP | ||
489 | |||
490 | |||
491 | .align 5 | ||
492 | ! Once cache line if possible... | ||
493 | 1: .long swapper_pg_dir | ||
494 | 4: .short (PTRS_PER_PGD-1) << 2 | ||
495 | 5: .short _PAGE_PRESENT | ||
496 | 7: .long _PAGE_FLAGS_HARDWARE_MASK | ||
497 | 8: .long MMU_PTEH | ||
498 | #ifdef COUNT_EXCEPTIONS | ||
499 | 9: .long exception_count_miss | ||
500 | #endif | ||
501 | |||
502 | ! Either pgd or pte not present | ||
503 | 20: mov.l 1f, k2 | ||
667 | mov.l 4f, k3 | 504 | mov.l 4f, k3 |
668 | bra handle_exception | 505 | bra handle_exception |
669 | mov.l @k2, k2 | 506 | mov.l @k2, k2 |
@@ -710,8 +547,9 @@ ENTRY(handle_exception) | |||
710 | bt/s 1f ! It's a kernel to kernel transition. | 547 | bt/s 1f ! It's a kernel to kernel transition. |
711 | mov r15, k0 ! save original stack to k0 | 548 | mov r15, k0 ! save original stack to k0 |
712 | /* User space to kernel */ | 549 | /* User space to kernel */ |
713 | mov #(THREAD_SIZE >> 8), k1 | 550 | mov #(THREAD_SIZE >> 10), k1 |
714 | shll8 k1 ! k1 := THREAD_SIZE | 551 | shll8 k1 ! k1 := THREAD_SIZE |
552 | shll2 k1 | ||
715 | add current, k1 | 553 | add current, k1 |
716 | mov k1, r15 ! change to kernel stack | 554 | mov k1, r15 ! change to kernel stack |
717 | ! | 555 | ! |
@@ -761,7 +599,7 @@ skip_save: | |||
761 | ! Save the user registers on the stack. | 599 | ! Save the user registers on the stack. |
762 | mov.l k2, @-r15 ! EXPEVT | 600 | mov.l k2, @-r15 ! EXPEVT |
763 | 601 | ||
764 | mov #-1, k4 | 602 | mov #-1, k4 |
765 | mov.l k4, @-r15 ! set TRA (default: -1) | 603 | mov.l k4, @-r15 ! set TRA (default: -1) |
766 | ! | 604 | ! |
767 | sts.l macl, @-r15 | 605 | sts.l macl, @-r15 |
@@ -813,6 +651,15 @@ skip_save: | |||
813 | bf interrupt_exception | 651 | bf interrupt_exception |
814 | shlr2 r8 | 652 | shlr2 r8 |
815 | shlr r8 | 653 | shlr r8 |
654 | |||
655 | #ifdef COUNT_EXCEPTIONS | ||
656 | mov.l 5f, r9 | ||
657 | add r8, r9 | ||
658 | mov.l @r9, r10 | ||
659 | add #1, r10 | ||
660 | mov.l r10, @r9 | ||
661 | #endif | ||
662 | |||
816 | mov.l 4f, r9 | 663 | mov.l 4f, r9 |
817 | add r8, r9 | 664 | add r8, r9 |
818 | mov.l @r9, r9 | 665 | mov.l @r9, r9 |
@@ -826,6 +673,9 @@ skip_save: | |||
826 | 2: .long 0x000080f0 ! FD=1, IMASK=15 | 673 | 2: .long 0x000080f0 ! FD=1, IMASK=15 |
827 | 3: .long 0xcfffffff ! RB=0, BL=0 | 674 | 3: .long 0xcfffffff ! RB=0, BL=0 |
828 | 4: .long exception_handling_table | 675 | 4: .long exception_handling_table |
676 | #ifdef COUNT_EXCEPTIONS | ||
677 | 5: .long exception_count_table | ||
678 | #endif | ||
829 | 679 | ||
830 | interrupt_exception: | 680 | interrupt_exception: |
831 | mov.l 1f, r9 | 681 | mov.l 1f, r9 |
diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile index 8dbf3895ece7..6e415baf04b4 100644 --- a/arch/sh/kernel/cpu/sh4/Makefile +++ b/arch/sh/kernel/cpu/sh4/Makefile | |||
@@ -2,7 +2,8 @@ | |||
2 | # Makefile for the Linux/SuperH SH-4 backends. | 2 | # Makefile for the Linux/SuperH SH-4 backends. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := ex.o probe.o | 5 | obj-y := ex.o probe.o common.o |
6 | common-y += $(addprefix ../sh3/, entry.o) | ||
6 | 7 | ||
7 | obj-$(CONFIG_SH_FPU) += fpu.o | 8 | obj-$(CONFIG_SH_FPU) += fpu.o |
8 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o | 9 | obj-$(CONFIG_SH_STORE_QUEUES) += sq.o |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index bfdf5fe8d948..fa2019aabd74 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -97,7 +97,7 @@ static void shoc_clk_recalc(struct clk *clk) | |||
97 | 97 | ||
98 | static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) | 98 | static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) |
99 | { | 99 | { |
100 | struct clk *bclk = clk_get("bus_clk"); | 100 | struct clk *bclk = clk_get(NULL, "bus_clk"); |
101 | unsigned long bclk_rate = clk_get_rate(bclk); | 101 | unsigned long bclk_rate = clk_get_rate(bclk); |
102 | 102 | ||
103 | clk_put(bclk); | 103 | clk_put(bclk); |
@@ -151,7 +151,7 @@ static struct clk *sh4202_onchip_clocks[] = { | |||
151 | 151 | ||
152 | static int __init sh4202_clk_init(void) | 152 | static int __init sh4202_clk_init(void) |
153 | { | 153 | { |
154 | struct clk *clk = clk_get("master_clk"); | 154 | struct clk *clk = clk_get(NULL, "master_clk"); |
155 | int i; | 155 | int i; |
156 | 156 | ||
157 | for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { | 157 | for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh7780.c b/arch/sh/kernel/cpu/sh4/clock-sh7780.c index 93ad367342c9..9e6a216750c8 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh7780.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh7780.c | |||
@@ -98,7 +98,7 @@ static struct clk *sh7780_onchip_clocks[] = { | |||
98 | 98 | ||
99 | static int __init sh7780_clk_init(void) | 99 | static int __init sh7780_clk_init(void) |
100 | { | 100 | { |
101 | struct clk *clk = clk_get("master_clk"); | 101 | struct clk *clk = clk_get(NULL, "master_clk"); |
102 | int i; | 102 | int i; |
103 | 103 | ||
104 | for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) { | 104 | for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) { |
diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c index f486c07e10e2..7624677f6628 100644 --- a/arch/sh/kernel/cpu/sh4/fpu.c +++ b/arch/sh/kernel/cpu/sh4/fpu.c | |||
@@ -282,11 +282,8 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
282 | grab_fpu(regs); | 282 | grab_fpu(regs); |
283 | restore_fpu(tsk); | 283 | restore_fpu(tsk); |
284 | set_tsk_thread_flag(tsk, TIF_USEDFPU); | 284 | set_tsk_thread_flag(tsk, TIF_USEDFPU); |
285 | } else { | 285 | } else |
286 | tsk->thread.trap_no = 11; | ||
287 | tsk->thread.error_code = 0; | ||
288 | force_sig(SIGFPE, tsk); | 286 | force_sig(SIGFPE, tsk); |
289 | } | ||
290 | 287 | ||
291 | regs->pc = nextpc; | 288 | regs->pc = nextpc; |
292 | return 1; | 289 | return 1; |
@@ -296,29 +293,29 @@ ieee_fpe_handler (struct pt_regs *regs) | |||
296 | } | 293 | } |
297 | 294 | ||
298 | asmlinkage void | 295 | asmlinkage void |
299 | do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, | 296 | do_fpu_error(unsigned long r4, unsigned long r5, unsigned long r6, |
300 | struct pt_regs regs) | 297 | unsigned long r7, struct pt_regs __regs) |
301 | { | 298 | { |
299 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
302 | struct task_struct *tsk = current; | 300 | struct task_struct *tsk = current; |
303 | 301 | ||
304 | if (ieee_fpe_handler (®s)) | 302 | if (ieee_fpe_handler(regs)) |
305 | return; | 303 | return; |
306 | 304 | ||
307 | regs.pc += 2; | 305 | regs->pc += 2; |
308 | save_fpu(tsk, ®s); | 306 | save_fpu(tsk, regs); |
309 | tsk->thread.trap_no = 11; | ||
310 | tsk->thread.error_code = 0; | ||
311 | force_sig(SIGFPE, tsk); | 307 | force_sig(SIGFPE, tsk); |
312 | } | 308 | } |
313 | 309 | ||
314 | asmlinkage void | 310 | asmlinkage void |
315 | do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6, | 311 | do_fpu_state_restore(unsigned long r4, unsigned long r5, unsigned long r6, |
316 | unsigned long r7, struct pt_regs regs) | 312 | unsigned long r7, struct pt_regs __regs) |
317 | { | 313 | { |
314 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
318 | struct task_struct *tsk = current; | 315 | struct task_struct *tsk = current; |
319 | 316 | ||
320 | grab_fpu(®s); | 317 | grab_fpu(regs); |
321 | if (!user_mode(®s)) { | 318 | if (!user_mode(regs)) { |
322 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); | 319 | printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); |
323 | return; | 320 | return; |
324 | } | 321 | } |
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c index c294de1e14a3..afe0f1b1c030 100644 --- a/arch/sh/kernel/cpu/sh4/probe.c +++ b/arch/sh/kernel/cpu/sh4/probe.c | |||
@@ -79,16 +79,16 @@ int __init detect_cpu_and_cache_system(void) | |||
79 | case 0x205: | 79 | case 0x205: |
80 | cpu_data->type = CPU_SH7750; | 80 | cpu_data->type = CPU_SH7750; |
81 | cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | | 81 | cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | |
82 | CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; | 82 | CPU_HAS_PERF_COUNTER; |
83 | break; | 83 | break; |
84 | case 0x206: | 84 | case 0x206: |
85 | cpu_data->type = CPU_SH7750S; | 85 | cpu_data->type = CPU_SH7750S; |
86 | cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | | 86 | cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU | |
87 | CPU_HAS_PERF_COUNTER | CPU_HAS_PTEA; | 87 | CPU_HAS_PERF_COUNTER; |
88 | break; | 88 | break; |
89 | case 0x1100: | 89 | case 0x1100: |
90 | cpu_data->type = CPU_SH7751; | 90 | cpu_data->type = CPU_SH7751; |
91 | cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; | 91 | cpu_data->flags |= CPU_HAS_FPU; |
92 | break; | 92 | break; |
93 | case 0x2000: | 93 | case 0x2000: |
94 | cpu_data->type = CPU_SH73180; | 94 | cpu_data->type = CPU_SH73180; |
@@ -126,23 +126,22 @@ int __init detect_cpu_and_cache_system(void) | |||
126 | break; | 126 | break; |
127 | case 0x8000: | 127 | case 0x8000: |
128 | cpu_data->type = CPU_ST40RA; | 128 | cpu_data->type = CPU_ST40RA; |
129 | cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; | 129 | cpu_data->flags |= CPU_HAS_FPU; |
130 | break; | 130 | break; |
131 | case 0x8100: | 131 | case 0x8100: |
132 | cpu_data->type = CPU_ST40GX1; | 132 | cpu_data->type = CPU_ST40GX1; |
133 | cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; | 133 | cpu_data->flags |= CPU_HAS_FPU; |
134 | break; | 134 | break; |
135 | case 0x700: | 135 | case 0x700: |
136 | cpu_data->type = CPU_SH4_501; | 136 | cpu_data->type = CPU_SH4_501; |
137 | cpu_data->icache.ways = 2; | 137 | cpu_data->icache.ways = 2; |
138 | cpu_data->dcache.ways = 2; | 138 | cpu_data->dcache.ways = 2; |
139 | cpu_data->flags |= CPU_HAS_PTEA; | ||
140 | break; | 139 | break; |
141 | case 0x600: | 140 | case 0x600: |
142 | cpu_data->type = CPU_SH4_202; | 141 | cpu_data->type = CPU_SH4_202; |
143 | cpu_data->icache.ways = 2; | 142 | cpu_data->icache.ways = 2; |
144 | cpu_data->dcache.ways = 2; | 143 | cpu_data->dcache.ways = 2; |
145 | cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; | 144 | cpu_data->flags |= CPU_HAS_FPU; |
146 | break; | 145 | break; |
147 | case 0x500 ... 0x501: | 146 | case 0x500 ... 0x501: |
148 | switch (prr) { | 147 | switch (prr) { |
@@ -160,7 +159,7 @@ int __init detect_cpu_and_cache_system(void) | |||
160 | cpu_data->icache.ways = 2; | 159 | cpu_data->icache.ways = 2; |
161 | cpu_data->dcache.ways = 2; | 160 | cpu_data->dcache.ways = 2; |
162 | 161 | ||
163 | cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PTEA; | 162 | cpu_data->flags |= CPU_HAS_FPU; |
164 | 163 | ||
165 | break; | 164 | break; |
166 | default: | 165 | default: |
@@ -173,6 +172,10 @@ int __init detect_cpu_and_cache_system(void) | |||
173 | cpu_data->dcache.ways = 1; | 172 | cpu_data->dcache.ways = 1; |
174 | #endif | 173 | #endif |
175 | 174 | ||
175 | #ifdef CONFIG_CPU_HAS_PTEA | ||
176 | cpu_data->flags |= CPU_HAS_PTEA; | ||
177 | #endif | ||
178 | |||
176 | /* | 179 | /* |
177 | * On anything that's not a direct-mapped cache, look to the CVR | 180 | * On anything that's not a direct-mapped cache, look to the CVR |
178 | * for I/D-cache specifics. | 181 | * for I/D-cache specifics. |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 50812d57c1c1..bbcb06f18b04 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * SH7750/SH7751 Setup | 2 | * SH7750/SH7751 Setup |
3 | * | 3 | * |
4 | * Copyright (C) 2006 Paul Mundt | 4 | * Copyright (C) 2006 Paul Mundt |
5 | * Copyright (C) 2006 Jamie Lenehan | ||
5 | * | 6 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
@@ -10,6 +11,7 @@ | |||
10 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/serial.h> | 13 | #include <linux/serial.h> |
14 | #include <linux/io.h> | ||
13 | #include <asm/sci.h> | 15 | #include <asm/sci.h> |
14 | 16 | ||
15 | static struct plat_sci_port sci_platform_data[] = { | 17 | static struct plat_sci_port sci_platform_data[] = { |
@@ -46,3 +48,71 @@ static int __init sh7750_devices_setup(void) | |||
46 | ARRAY_SIZE(sh7750_devices)); | 48 | ARRAY_SIZE(sh7750_devices)); |
47 | } | 49 | } |
48 | __initcall(sh7750_devices_setup); | 50 | __initcall(sh7750_devices_setup); |
51 | |||
52 | static struct ipr_data sh7750_ipr_map[] = { | ||
53 | /* IRQ, IPR-idx, shift, priority */ | ||
54 | { 16, 0, 12, 2 }, /* TMU0 TUNI*/ | ||
55 | { 17, 0, 12, 2 }, /* TMU1 TUNI */ | ||
56 | { 18, 0, 4, 2 }, /* TMU2 TUNI */ | ||
57 | { 19, 0, 4, 2 }, /* TMU2 TIPCI */ | ||
58 | { 27, 1, 12, 2 }, /* WDT ITI */ | ||
59 | { 20, 0, 0, 2 }, /* RTC ATI (alarm) */ | ||
60 | { 21, 0, 0, 2 }, /* RTC PRI (period) */ | ||
61 | { 22, 0, 0, 2 }, /* RTC CUI (carry) */ | ||
62 | { 23, 1, 4, 3 }, /* SCI ERI */ | ||
63 | { 24, 1, 4, 3 }, /* SCI RXI */ | ||
64 | { 25, 1, 4, 3 }, /* SCI TXI */ | ||
65 | { 40, 2, 4, 3 }, /* SCIF ERI */ | ||
66 | { 41, 2, 4, 3 }, /* SCIF RXI */ | ||
67 | { 42, 2, 4, 3 }, /* SCIF BRI */ | ||
68 | { 43, 2, 4, 3 }, /* SCIF TXI */ | ||
69 | { 34, 2, 8, 7 }, /* DMAC DMTE0 */ | ||
70 | { 35, 2, 8, 7 }, /* DMAC DMTE1 */ | ||
71 | { 36, 2, 8, 7 }, /* DMAC DMTE2 */ | ||
72 | { 37, 2, 8, 7 }, /* DMAC DMTE3 */ | ||
73 | { 28, 2, 8, 7 }, /* DMAC DMAE */ | ||
74 | }; | ||
75 | |||
76 | static struct ipr_data sh7751_ipr_map[] = { | ||
77 | { 44, 2, 8, 7 }, /* DMAC DMTE4 */ | ||
78 | { 45, 2, 8, 7 }, /* DMAC DMTE5 */ | ||
79 | { 46, 2, 8, 7 }, /* DMAC DMTE6 */ | ||
80 | { 47, 2, 8, 7 }, /* DMAC DMTE7 */ | ||
81 | /* The following use INTC_INPRI00 for masking, which is a 32-bit | ||
82 | register, not a 16-bit register like the IPRx registers, so it | ||
83 | would need special support */ | ||
84 | /*{ 72, INTPRI00, 8, ? },*/ /* TMU3 TUNI */ | ||
85 | /*{ 76, INTPRI00, 12, ? },*/ /* TMU4 TUNI */ | ||
86 | }; | ||
87 | |||
88 | static unsigned long ipr_offsets[] = { | ||
89 | 0xffd00004UL, /* 0: IPRA */ | ||
90 | 0xffd00008UL, /* 1: IPRB */ | ||
91 | 0xffd0000cUL, /* 2: IPRC */ | ||
92 | 0xffd00010UL, /* 3: IPRD */ | ||
93 | }; | ||
94 | |||
95 | /* given the IPR index return the address of the IPR register */ | ||
96 | unsigned int map_ipridx_to_addr(int idx) | ||
97 | { | ||
98 | if (idx >= ARRAY_SIZE(ipr_offsets)) | ||
99 | return 0; | ||
100 | return ipr_offsets[idx]; | ||
101 | } | ||
102 | |||
103 | #define INTC_ICR 0xffd00000UL | ||
104 | #define INTC_ICR_IRLM (1<<7) | ||
105 | |||
106 | /* enable individual interrupt mode for external interupts */ | ||
107 | void ipr_irq_enable_irlm(void) | ||
108 | { | ||
109 | ctrl_outw(ctrl_inw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); | ||
110 | } | ||
111 | |||
112 | void __init init_IRQ_ipr() | ||
113 | { | ||
114 | make_ipr_irq(sh7750_ipr_map, ARRAY_SIZE(sh7750_ipr_map)); | ||
115 | #ifdef CONFIG_CPU_SUBTYPE_SH7751 | ||
116 | make_ipr_irq(sh7751_ipr_map, ARRAY_SIZE(sh7751_ipr_map)); | ||
117 | #endif | ||
118 | } | ||
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7780.c b/arch/sh/kernel/cpu/sh4/setup-sh7780.c index 814ddb226531..9aeaa2ddaa28 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7780.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7780.c | |||
@@ -79,25 +79,27 @@ static int __init sh7780_devices_setup(void) | |||
79 | __initcall(sh7780_devices_setup); | 79 | __initcall(sh7780_devices_setup); |
80 | 80 | ||
81 | static struct intc2_data intc2_irq_table[] = { | 81 | static struct intc2_data intc2_irq_table[] = { |
82 | { TIMER_IRQ, 0, 24, 0, INTC_TMU0_MSK, 2 }, | 82 | { 28, 0, 24, 0, 0, 2 }, /* TMU0 */ |
83 | { 21, 1, 0, 0, INTC_RTC_MSK, TIMER_PRIORITY }, | ||
84 | { 22, 1, 1, 0, INTC_RTC_MSK, TIMER_PRIORITY }, | ||
85 | { 23, 1, 2, 0, INTC_RTC_MSK, TIMER_PRIORITY }, | ||
86 | { SCIF0_ERI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, | ||
87 | { SCIF0_RXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, | ||
88 | { SCIF0_BRI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, | ||
89 | { SCIF0_TXI_IRQ, 8, 24, 0, INTC_SCIF0_MSK, SCIF0_PRIORITY }, | ||
90 | 83 | ||
91 | { SCIF1_ERI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, | 84 | { 21, 1, 0, 0, 2, 2 }, |
92 | { SCIF1_RXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, | 85 | { 22, 1, 1, 0, 2, 2 }, |
93 | { SCIF1_BRI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, | 86 | { 23, 1, 2, 0, 2, 2 }, |
94 | { SCIF1_TXI_IRQ, 8, 16, 0, INTC_SCIF1_MSK, SCIF1_PRIORITY }, | ||
95 | 87 | ||
96 | { PCIC0_IRQ, 0x10, 8, 0, INTC_PCIC0_MSK, PCIC0_PRIORITY }, | 88 | { 40, 8, 24, 0, 3, 3 }, /* SCIF0 ERI */ |
97 | { PCIC1_IRQ, 0x10, 0, 0, INTC_PCIC1_MSK, PCIC1_PRIORITY }, | 89 | { 41, 8, 24, 0, 3, 3 }, /* SCIF0 RXI */ |
98 | { PCIC2_IRQ, 0x14, 24, 0, INTC_PCIC2_MSK, PCIC2_PRIORITY }, | 90 | { 42, 8, 24, 0, 3, 3 }, /* SCIF0 BRI */ |
99 | { PCIC3_IRQ, 0x14, 16, 0, INTC_PCIC3_MSK, PCIC3_PRIORITY }, | 91 | { 43, 8, 24, 0, 3, 3 }, /* SCIF0 TXI */ |
100 | { PCIC4_IRQ, 0x14, 8, 0, INTC_PCIC4_MSK, PCIC4_PRIORITY }, | 92 | |
93 | { 76, 8, 16, 0, 4, 3 }, /* SCIF1 ERI */ | ||
94 | { 77, 8, 16, 0, 4, 3 }, /* SCIF1 RXI */ | ||
95 | { 78, 8, 16, 0, 4, 3 }, /* SCIF1 BRI */ | ||
96 | { 79, 8, 16, 0, 4, 3 }, /* SCIF1 TXI */ | ||
97 | |||
98 | { 64, 0x10, 8, 0, 14, 2 }, /* PCIC0 */ | ||
99 | { 65, 0x10, 0, 0, 15, 2 }, /* PCIC1 */ | ||
100 | { 66, 0x14, 24, 0, 16, 2 }, /* PCIC2 */ | ||
101 | { 67, 0x14, 16, 0, 17, 2 }, /* PCIC3 */ | ||
102 | { 68, 0x14, 8, 0, 18, 2 }, /* PCIC4 */ | ||
101 | }; | 103 | }; |
102 | 104 | ||
103 | void __init init_IRQ_intc2(void) | 105 | void __init init_IRQ_intc2(void) |
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c index 7bcc73f9b8df..55f43506995a 100644 --- a/arch/sh/kernel/cpu/sh4/sq.c +++ b/arch/sh/kernel/cpu/sh4/sq.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/vmalloc.h> | 20 | #include <linux/vmalloc.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <asm/io.h> | 22 | #include <linux/io.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
25 | #include <asm/cpu/sq.h> | 25 | #include <asm/cpu/sq.h> |
@@ -67,6 +67,7 @@ void sq_flush_range(unsigned long start, unsigned int len) | |||
67 | /* Wait for completion */ | 67 | /* Wait for completion */ |
68 | store_queue_barrier(); | 68 | store_queue_barrier(); |
69 | } | 69 | } |
70 | EXPORT_SYMBOL(sq_flush_range); | ||
70 | 71 | ||
71 | static inline void sq_mapping_list_add(struct sq_mapping *map) | 72 | static inline void sq_mapping_list_add(struct sq_mapping *map) |
72 | { | 73 | { |
@@ -166,7 +167,7 @@ unsigned long sq_remap(unsigned long phys, unsigned int size, | |||
166 | map->size = size; | 167 | map->size = size; |
167 | map->name = name; | 168 | map->name = name; |
168 | 169 | ||
169 | page = bitmap_find_free_region(sq_bitmap, 0x04000000, | 170 | page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, |
170 | get_order(map->size)); | 171 | get_order(map->size)); |
171 | if (unlikely(page < 0)) { | 172 | if (unlikely(page < 0)) { |
172 | ret = -ENOSPC; | 173 | ret = -ENOSPC; |
@@ -193,6 +194,7 @@ out: | |||
193 | kmem_cache_free(sq_cache, map); | 194 | kmem_cache_free(sq_cache, map); |
194 | return ret; | 195 | return ret; |
195 | } | 196 | } |
197 | EXPORT_SYMBOL(sq_remap); | ||
196 | 198 | ||
197 | /** | 199 | /** |
198 | * sq_unmap - Unmap a Store Queue allocation | 200 | * sq_unmap - Unmap a Store Queue allocation |
@@ -234,6 +236,7 @@ void sq_unmap(unsigned long vaddr) | |||
234 | 236 | ||
235 | kmem_cache_free(sq_cache, map); | 237 | kmem_cache_free(sq_cache, map); |
236 | } | 238 | } |
239 | EXPORT_SYMBOL(sq_unmap); | ||
237 | 240 | ||
238 | /* | 241 | /* |
239 | * Needlessly complex sysfs interface. Unfortunately it doesn't seem like | 242 | * Needlessly complex sysfs interface. Unfortunately it doesn't seem like |
@@ -402,7 +405,3 @@ module_exit(sq_api_exit); | |||
402 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); | 405 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); |
403 | MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); | 406 | MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); |
404 | MODULE_LICENSE("GPL"); | 407 | MODULE_LICENSE("GPL"); |
405 | |||
406 | EXPORT_SYMBOL(sq_remap); | ||
407 | EXPORT_SYMBOL(sq_unmap); | ||
408 | EXPORT_SYMBOL(sq_flush_range); | ||
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c index a00022722e9e..60340823798a 100644 --- a/arch/sh/kernel/early_printk.c +++ b/arch/sh/kernel/early_printk.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/console.h> | 12 | #include <linux/console.h> |
13 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <asm/io.h> | 15 | #include <linux/io.h> |
16 | 16 | ||
17 | #ifdef CONFIG_SH_STANDARD_BIOS | 17 | #ifdef CONFIG_SH_STANDARD_BIOS |
18 | #include <asm/sh_bios.h> | 18 | #include <asm/sh_bios.h> |
@@ -62,17 +62,9 @@ static struct console bios_console = { | |||
62 | #include <linux/serial_core.h> | 62 | #include <linux/serial_core.h> |
63 | #include "../../../drivers/serial/sh-sci.h" | 63 | #include "../../../drivers/serial/sh-sci.h" |
64 | 64 | ||
65 | #ifdef CONFIG_CPU_SH4 | ||
66 | #define SCIF_REG 0xffe80000 | ||
67 | #elif defined(CONFIG_CPU_SUBTYPE_SH72060) | ||
68 | #define SCIF_REG 0xfffe9800 | ||
69 | #else | ||
70 | #error "Undefined SCIF for this subtype" | ||
71 | #endif | ||
72 | |||
73 | static struct uart_port scif_port = { | 65 | static struct uart_port scif_port = { |
74 | .mapbase = SCIF_REG, | 66 | .mapbase = CONFIG_EARLY_SCIF_CONSOLE_PORT, |
75 | .membase = (char __iomem *)SCIF_REG, | 67 | .membase = (char __iomem *)CONFIG_EARLY_SCIF_CONSOLE_PORT, |
76 | }; | 68 | }; |
77 | 69 | ||
78 | static void scif_sercon_putc(int c) | 70 | static void scif_sercon_putc(int c) |
@@ -113,23 +105,29 @@ static struct console scif_console = { | |||
113 | .index = -1, | 105 | .index = -1, |
114 | }; | 106 | }; |
115 | 107 | ||
108 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) | ||
109 | /* | ||
110 | * Simple SCIF init, primarily aimed at SH7750 and other similar SH-4 | ||
111 | * devices that aren't using sh-ipl+g. | ||
112 | */ | ||
116 | static void scif_sercon_init(int baud) | 113 | static void scif_sercon_init(int baud) |
117 | { | 114 | { |
118 | ctrl_outw(0, SCIF_REG + 8); | 115 | ctrl_outw(0, scif_port.mapbase + 8); |
119 | ctrl_outw(0, SCIF_REG); | 116 | ctrl_outw(0, scif_port.mapbase); |
120 | 117 | ||
121 | /* Set baud rate */ | 118 | /* Set baud rate */ |
122 | ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) / | 119 | ctrl_outb((CONFIG_SH_PCLK_FREQ + 16 * baud) / |
123 | (32 * baud) - 1, SCIF_REG + 4); | 120 | (32 * baud) - 1, scif_port.mapbase + 4); |
124 | 121 | ||
125 | ctrl_outw(12, SCIF_REG + 24); | 122 | ctrl_outw(12, scif_port.mapbase + 24); |
126 | ctrl_outw(8, SCIF_REG + 24); | 123 | ctrl_outw(8, scif_port.mapbase + 24); |
127 | ctrl_outw(0, SCIF_REG + 32); | 124 | ctrl_outw(0, scif_port.mapbase + 32); |
128 | ctrl_outw(0x60, SCIF_REG + 16); | 125 | ctrl_outw(0x60, scif_port.mapbase + 16); |
129 | ctrl_outw(0, SCIF_REG + 36); | 126 | ctrl_outw(0, scif_port.mapbase + 36); |
130 | ctrl_outw(0x30, SCIF_REG + 8); | 127 | ctrl_outw(0x30, scif_port.mapbase + 8); |
131 | } | 128 | } |
132 | #endif | 129 | #endif /* CONFIG_CPU_SH4 && !CONFIG_SH_STANDARD_BIOS */ |
130 | #endif /* CONFIG_EARLY_SCIF_CONSOLE */ | ||
133 | 131 | ||
134 | /* | 132 | /* |
135 | * Setup a default console, if more than one is compiled in, rely on the | 133 | * Setup a default console, if more than one is compiled in, rely on the |
@@ -168,7 +166,7 @@ int __init setup_early_printk(char *opt) | |||
168 | if (!strncmp(buf, "serial", 6)) { | 166 | if (!strncmp(buf, "serial", 6)) { |
169 | early_console = &scif_console; | 167 | early_console = &scif_console; |
170 | 168 | ||
171 | #ifdef CONFIG_CPU_SH4 | 169 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_STANDARD_BIOS) |
172 | scif_sercon_init(115200); | 170 | scif_sercon_init(115200); |
173 | #endif | 171 | #endif |
174 | } | 172 | } |
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S new file mode 100644 index 000000000000..29136a35d7c7 --- /dev/null +++ b/arch/sh/kernel/entry-common.S | |||
@@ -0,0 +1,433 @@ | |||
1 | /* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $ | ||
2 | * | ||
3 | * linux/arch/sh/entry.S | ||
4 | * | ||
5 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | ||
6 | * Copyright (C) 2003 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | ! NOTE: | ||
15 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address | ||
16 | ! to be jumped is too far, but it causes illegal slot exception. | ||
17 | |||
18 | /* | ||
19 | * entry.S contains the system-call and fault low-level handling routines. | ||
20 | * This also contains the timer-interrupt handler, as well as all interrupts | ||
21 | * and faults that can result in a task-switch. | ||
22 | * | ||
23 | * NOTE: This code handles signal-recognition, which happens every time | ||
24 | * after a timer-interrupt and after each system call. | ||
25 | * | ||
26 | * NOTE: This code uses a convention that instructions in the delay slot | ||
27 | * of a transfer-control instruction are indented by an extra space, thus: | ||
28 | * | ||
29 | * jmp @k0 ! control-transfer instruction | ||
30 | * ldc k1, ssr ! delay slot | ||
31 | * | ||
32 | * Stack layout in 'ret_from_syscall': | ||
33 | * ptrace needs to have all regs on the stack. | ||
34 | * if the order here is changed, it needs to be | ||
35 | * updated in ptrace.c and ptrace.h | ||
36 | * | ||
37 | * r0 | ||
38 | * ... | ||
39 | * r15 = stack pointer | ||
40 | * spc | ||
41 | * pr | ||
42 | * ssr | ||
43 | * gbr | ||
44 | * mach | ||
45 | * macl | ||
46 | * syscall # | ||
47 | * | ||
48 | */ | ||
49 | |||
50 | #if defined(CONFIG_PREEMPT) | ||
51 | # define preempt_stop() cli | ||
52 | #else | ||
53 | # define preempt_stop() | ||
54 | # define resume_kernel __restore_all | ||
55 | #endif | ||
56 | |||
57 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | ||
58 | ! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. | ||
59 | ! If both are configured, handle the debug traps (breakpoints) in SW, | ||
60 | ! but still allow BIOS traps to FW. | ||
61 | |||
62 | .align 2 | ||
63 | debug_kernel: | ||
64 | #if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) | ||
65 | /* Force BIOS call to FW (debug_trap put TRA in r8) */ | ||
66 | mov r8,r0 | ||
67 | shlr2 r0 | ||
68 | cmp/eq #0x3f,r0 | ||
69 | bt debug_kernel_fw | ||
70 | #endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ | ||
71 | |||
72 | debug_enter: | ||
73 | #if defined(CONFIG_SH_KGDB) | ||
74 | /* Jump to kgdb, pass stacked regs as arg */ | ||
75 | debug_kernel_sw: | ||
76 | mov.l 3f, r0 | ||
77 | jmp @r0 | ||
78 | mov r15, r4 | ||
79 | .align 2 | ||
80 | 3: .long kgdb_handle_exception | ||
81 | #endif /* CONFIG_SH_KGDB */ | ||
82 | |||
83 | #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ | ||
84 | |||
85 | |||
86 | .align 2 | ||
87 | debug_trap: | ||
88 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | ||
89 | mov #OFF_SR, r0 | ||
90 | mov.l @(r0,r15), r0 ! get status register | ||
91 | shll r0 | ||
92 | shll r0 ! kernel space? | ||
93 | bt/s debug_kernel | ||
94 | #endif | ||
95 | mov.l @r15, r0 ! Restore R0 value | ||
96 | mov.l 1f, r8 | ||
97 | jmp @r8 | ||
98 | nop | ||
99 | |||
100 | .align 2 | ||
101 | ENTRY(exception_error) | ||
102 | ! | ||
103 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
104 | mov.l 3f, r0 | ||
105 | jsr @r0 | ||
106 | nop | ||
107 | #endif | ||
108 | sti | ||
109 | mov.l 2f, r0 | ||
110 | jmp @r0 | ||
111 | nop | ||
112 | |||
113 | ! | ||
114 | .align 2 | ||
115 | 1: .long break_point_trap_software | ||
116 | 2: .long do_exception_error | ||
117 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
118 | 3: .long trace_hardirqs_on | ||
119 | #endif | ||
120 | |||
121 | .align 2 | ||
122 | ret_from_exception: | ||
123 | preempt_stop() | ||
124 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
125 | mov.l 4f, r0 | ||
126 | jsr @r0 | ||
127 | nop | ||
128 | #endif | ||
129 | ENTRY(ret_from_irq) | ||
130 | ! | ||
131 | mov #OFF_SR, r0 | ||
132 | mov.l @(r0,r15), r0 ! get status register | ||
133 | shll r0 | ||
134 | shll r0 ! kernel space? | ||
135 | get_current_thread_info r8, r0 | ||
136 | bt resume_kernel ! Yes, it's from kernel, go back soon | ||
137 | |||
138 | #ifdef CONFIG_PREEMPT | ||
139 | bra resume_userspace | ||
140 | nop | ||
141 | ENTRY(resume_kernel) | ||
142 | mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count | ||
143 | tst r0, r0 | ||
144 | bf noresched | ||
145 | need_resched: | ||
146 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
147 | tst #_TIF_NEED_RESCHED, r0 ! need_resched set? | ||
148 | bt noresched | ||
149 | |||
150 | mov #OFF_SR, r0 | ||
151 | mov.l @(r0,r15), r0 ! get status register | ||
152 | and #0xf0, r0 ! interrupts off (exception path)? | ||
153 | cmp/eq #0xf0, r0 | ||
154 | bt noresched | ||
155 | |||
156 | mov.l 1f, r0 | ||
157 | mov.l r0, @(TI_PRE_COUNT,r8) | ||
158 | |||
159 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
160 | mov.l 3f, r0 | ||
161 | jsr @r0 | ||
162 | nop | ||
163 | #endif | ||
164 | sti | ||
165 | mov.l 2f, r0 | ||
166 | jsr @r0 | ||
167 | nop | ||
168 | mov #0, r0 | ||
169 | mov.l r0, @(TI_PRE_COUNT,r8) | ||
170 | cli | ||
171 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
172 | mov.l 4f, r0 | ||
173 | jsr @r0 | ||
174 | nop | ||
175 | #endif | ||
176 | |||
177 | bra need_resched | ||
178 | nop | ||
179 | |||
180 | noresched: | ||
181 | bra __restore_all | ||
182 | nop | ||
183 | |||
184 | .align 2 | ||
185 | 1: .long PREEMPT_ACTIVE | ||
186 | 2: .long schedule | ||
187 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
188 | 3: .long trace_hardirqs_on | ||
189 | 4: .long trace_hardirqs_off | ||
190 | #endif | ||
191 | #endif | ||
192 | |||
193 | ENTRY(resume_userspace) | ||
194 | ! r8: current_thread_info | ||
195 | cli | ||
196 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
197 | mov.l 5f, r0 | ||
198 | jsr @r0 | ||
199 | nop | ||
200 | #endif | ||
201 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
202 | tst #_TIF_WORK_MASK, r0 | ||
203 | bt/s __restore_all | ||
204 | tst #_TIF_NEED_RESCHED, r0 | ||
205 | |||
206 | .align 2 | ||
207 | work_pending: | ||
208 | ! r0: current_thread_info->flags | ||
209 | ! r8: current_thread_info | ||
210 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" | ||
211 | bf/s work_resched | ||
212 | tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 | ||
213 | work_notifysig: | ||
214 | bt/s __restore_all | ||
215 | mov r15, r4 | ||
216 | mov r12, r5 ! set arg1(save_r0) | ||
217 | mov r0, r6 | ||
218 | mov.l 2f, r1 | ||
219 | mov.l 3f, r0 | ||
220 | jmp @r1 | ||
221 | lds r0, pr | ||
222 | work_resched: | ||
223 | #ifndef CONFIG_PREEMPT | ||
224 | ! gUSA handling | ||
225 | mov.l @(OFF_SP,r15), r0 ! get user space stack pointer | ||
226 | mov r0, r1 | ||
227 | shll r0 | ||
228 | bf/s 1f | ||
229 | shll r0 | ||
230 | bf/s 1f | ||
231 | mov #OFF_PC, r0 | ||
232 | ! SP >= 0xc0000000 : gUSA mark | ||
233 | mov.l @(r0,r15), r2 ! get user space PC (program counter) | ||
234 | mov.l @(OFF_R0,r15), r3 ! end point | ||
235 | cmp/hs r3, r2 ! r2 >= r3? | ||
236 | bt 1f | ||
237 | add r3, r1 ! rewind point #2 | ||
238 | mov.l r1, @(r0,r15) ! reset PC to rewind point #2 | ||
239 | ! | ||
240 | 1: | ||
241 | #endif | ||
242 | mov.l 1f, r1 | ||
243 | jsr @r1 ! schedule | ||
244 | nop | ||
245 | cli | ||
246 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
247 | mov.l 5f, r0 | ||
248 | jsr @r0 | ||
249 | nop | ||
250 | #endif | ||
251 | ! | ||
252 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
253 | tst #_TIF_WORK_MASK, r0 | ||
254 | bt __restore_all | ||
255 | bra work_pending | ||
256 | tst #_TIF_NEED_RESCHED, r0 | ||
257 | |||
258 | .align 2 | ||
259 | 1: .long schedule | ||
260 | 2: .long do_notify_resume | ||
261 | 3: .long restore_all | ||
262 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
263 | 4: .long trace_hardirqs_on | ||
264 | 5: .long trace_hardirqs_off | ||
265 | #endif | ||
266 | |||
267 | .align 2 | ||
268 | syscall_exit_work: | ||
269 | ! r0: current_thread_info->flags | ||
270 | ! r8: current_thread_info | ||
271 | tst #_TIF_SYSCALL_TRACE, r0 | ||
272 | bt/s work_pending | ||
273 | tst #_TIF_NEED_RESCHED, r0 | ||
274 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
275 | mov.l 5f, r0 | ||
276 | jsr @r0 | ||
277 | nop | ||
278 | #endif | ||
279 | sti | ||
280 | ! XXX setup arguments... | ||
281 | mov.l 4f, r0 ! do_syscall_trace | ||
282 | jsr @r0 | ||
283 | nop | ||
284 | bra resume_userspace | ||
285 | nop | ||
286 | |||
287 | .align 2 | ||
288 | syscall_trace_entry: | ||
289 | ! Yes it is traced. | ||
290 | ! XXX setup arguments... | ||
291 | mov.l 4f, r11 ! Call do_syscall_trace which notifies | ||
292 | jsr @r11 ! superior (will chomp R[0-7]) | ||
293 | nop | ||
294 | ! Reload R0-R4 from kernel stack, where the | ||
295 | ! parent may have modified them using | ||
296 | ! ptrace(POKEUSR). (Note that R0-R2 are | ||
297 | ! used by the system call handler directly | ||
298 | ! from the kernel stack anyway, so don't need | ||
299 | ! to be reloaded here.) This allows the parent | ||
300 | ! to rewrite system calls and args on the fly. | ||
301 | mov.l @(OFF_R4,r15), r4 ! arg0 | ||
302 | mov.l @(OFF_R5,r15), r5 | ||
303 | mov.l @(OFF_R6,r15), r6 | ||
304 | mov.l @(OFF_R7,r15), r7 ! arg3 | ||
305 | mov.l @(OFF_R3,r15), r3 ! syscall_nr | ||
306 | ! | ||
307 | mov.l 2f, r10 ! Number of syscalls | ||
308 | cmp/hs r10, r3 | ||
309 | bf syscall_call | ||
310 | mov #-ENOSYS, r0 | ||
311 | bra syscall_exit | ||
312 | mov.l r0, @(OFF_R0,r15) ! Return value | ||
313 | |||
314 | __restore_all: | ||
315 | mov.l 1f, r0 | ||
316 | jmp @r0 | ||
317 | nop | ||
318 | |||
319 | .align 2 | ||
320 | 1: .long restore_all | ||
321 | |||
322 | .align 2 | ||
323 | not_syscall_tra: | ||
324 | bra debug_trap | ||
325 | nop | ||
326 | |||
327 | .align 2 | ||
328 | syscall_badsys: ! Bad syscall number | ||
329 | mov #-ENOSYS, r0 | ||
330 | bra resume_userspace | ||
331 | mov.l r0, @(OFF_R0,r15) ! Return value | ||
332 | |||
333 | |||
334 | /* | ||
335 | * Syscall interface: | ||
336 | * | ||
337 | * Syscall #: R3 | ||
338 | * Arguments #0 to #3: R4--R7 | ||
339 | * Arguments #4 to #6: R0, R1, R2 | ||
340 | * TRA: (number of arguments + 0x10) x 4 | ||
341 | * | ||
342 | * This code also handles delegating other traps to the BIOS/gdb stub | ||
343 | * according to: | ||
344 | * | ||
345 | * Trap number | ||
346 | * (TRA>>2) Purpose | ||
347 | * -------- ------- | ||
348 | * 0x0-0xf old syscall ABI | ||
349 | * 0x10-0x1f new syscall ABI | ||
350 | * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. | ||
351 | * | ||
352 | * Note: When we're first called, the TRA value must be shifted | ||
353 | * right 2 bits in order to get the value that was used as the "trapa" | ||
354 | * argument. | ||
355 | */ | ||
356 | |||
357 | .align 2 | ||
358 | .globl ret_from_fork | ||
359 | ret_from_fork: | ||
360 | mov.l 1f, r8 | ||
361 | jsr @r8 | ||
362 | mov r0, r4 | ||
363 | bra syscall_exit | ||
364 | nop | ||
365 | .align 2 | ||
366 | 1: .long schedule_tail | ||
367 | ! | ||
368 | ENTRY(system_call) | ||
369 | #if !defined(CONFIG_CPU_SH2) | ||
370 | mov.l 1f, r9 | ||
371 | mov.l @r9, r8 ! Read from TRA (Trap Address) Register | ||
372 | #endif | ||
373 | ! | ||
374 | ! Is the trap argument >= 0x20? (TRA will be >= 0x80) | ||
375 | mov #0x7f, r9 | ||
376 | cmp/hi r9, r8 | ||
377 | bt/s not_syscall_tra | ||
378 | mov #OFF_TRA, r9 | ||
379 | add r15, r9 | ||
380 | mov.l r8, @r9 ! set TRA value to tra | ||
381 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
382 | mov.l 5f, r10 | ||
383 | jsr @r10 | ||
384 | nop | ||
385 | #endif | ||
386 | sti | ||
387 | |||
388 | ! | ||
389 | get_current_thread_info r8, r10 | ||
390 | mov.l @(TI_FLAGS,r8), r8 | ||
391 | mov #_TIF_SYSCALL_TRACE, r10 | ||
392 | tst r10, r8 | ||
393 | bf syscall_trace_entry | ||
394 | ! | ||
395 | mov.l 2f, r8 ! Number of syscalls | ||
396 | cmp/hs r8, r3 | ||
397 | bt syscall_badsys | ||
398 | ! | ||
399 | syscall_call: | ||
400 | shll2 r3 ! x4 | ||
401 | mov.l 3f, r8 ! Load the address of sys_call_table | ||
402 | add r8, r3 | ||
403 | mov.l @r3, r8 | ||
404 | jsr @r8 ! jump to specific syscall handler | ||
405 | nop | ||
406 | mov.l @(OFF_R0,r15), r12 ! save r0 | ||
407 | mov.l r0, @(OFF_R0,r15) ! save the return value | ||
408 | ! | ||
409 | syscall_exit: | ||
410 | cli | ||
411 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
412 | mov.l 6f, r0 | ||
413 | jsr @r0 | ||
414 | nop | ||
415 | #endif | ||
416 | ! | ||
417 | get_current_thread_info r8, r0 | ||
418 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | ||
419 | tst #_TIF_ALLWORK_MASK, r0 | ||
420 | bf syscall_exit_work | ||
421 | bra __restore_all | ||
422 | nop | ||
423 | .align 2 | ||
424 | #if !defined(CONFIG_CPU_SH2) | ||
425 | 1: .long TRA | ||
426 | #endif | ||
427 | 2: .long NR_syscalls | ||
428 | 3: .long sys_call_table | ||
429 | 4: .long do_syscall_trace | ||
430 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
431 | 5: .long trace_hardirqs_on | ||
432 | 6: .long trace_hardirqs_off | ||
433 | #endif | ||
diff --git a/arch/sh/kernel/head.S b/arch/sh/kernel/head.S index f5f53d14f245..6aca4bc6ec5d 100644 --- a/arch/sh/kernel/head.S +++ b/arch/sh/kernel/head.S | |||
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page) | |||
33 | .long 0x00360000 /* INITRD_START */ | 33 | .long 0x00360000 /* INITRD_START */ |
34 | .long 0x000a0000 /* INITRD_SIZE */ | 34 | .long 0x000a0000 /* INITRD_SIZE */ |
35 | .long 0 | 35 | .long 0 |
36 | .balign 4096,0,4096 | 36 | .balign PAGE_SIZE,0,PAGE_SIZE |
37 | 37 | ||
38 | .text | 38 | .text |
39 | /* | 39 | /* |
@@ -53,8 +53,10 @@ ENTRY(_stext) | |||
53 | ldc r0, sr | 53 | ldc r0, sr |
54 | ! Initialize global interrupt mask | 54 | ! Initialize global interrupt mask |
55 | mov #0, r0 | 55 | mov #0, r0 |
56 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
56 | ldc r0, r6_bank | 57 | ldc r0, r6_bank |
57 | 58 | #endif | |
59 | |||
58 | /* | 60 | /* |
59 | * Prefetch if possible to reduce cache miss penalty. | 61 | * Prefetch if possible to reduce cache miss penalty. |
60 | * | 62 | * |
@@ -68,11 +70,14 @@ ENTRY(_stext) | |||
68 | ! | 70 | ! |
69 | mov.l 2f, r0 | 71 | mov.l 2f, r0 |
70 | mov r0, r15 ! Set initial r15 (stack pointer) | 72 | mov r0, r15 ! Set initial r15 (stack pointer) |
71 | mov #(THREAD_SIZE >> 8), r1 | 73 | mov #(THREAD_SIZE >> 10), r1 |
72 | shll8 r1 ! r1 = THREAD_SIZE | 74 | shll8 r1 ! r1 = THREAD_SIZE |
75 | shll2 r1 | ||
73 | sub r1, r0 ! | 76 | sub r1, r0 ! |
77 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
74 | ldc r0, r7_bank ! ... and initial thread_info | 78 | ldc r0, r7_bank ! ... and initial thread_info |
75 | 79 | #endif | |
80 | |||
76 | ! Clear BSS area | 81 | ! Clear BSS area |
77 | mov.l 3f, r1 | 82 | mov.l 3f, r1 |
78 | add #4, r1 | 83 | add #4, r1 |
@@ -95,7 +100,11 @@ ENTRY(_stext) | |||
95 | nop | 100 | nop |
96 | 101 | ||
97 | .balign 4 | 102 | .balign 4 |
103 | #if defined(CONFIG_CPU_SH2) | ||
104 | 1: .long 0x000000F0 ! IMASK=0xF | ||
105 | #else | ||
98 | 1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF | 106 | 1: .long 0x400080F0 ! MD=1, RB=0, BL=0, FD=1, IMASK=0xF |
107 | #endif | ||
99 | 2: .long init_thread_union+THREAD_SIZE | 108 | 2: .long init_thread_union+THREAD_SIZE |
100 | 3: .long __bss_start | 109 | 3: .long __bss_start |
101 | 4: .long _end | 110 | 4: .long _end |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 944128ce9706..67be2b6e8cd1 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/kernel_stat.h> | 12 | #include <linux/kernel_stat.h> |
13 | #include <linux/seq_file.h> | 13 | #include <linux/seq_file.h> |
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <asm/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
17 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
18 | #include <asm/thread_info.h> | 18 | #include <asm/thread_info.h> |
@@ -78,15 +78,16 @@ union irq_ctx { | |||
78 | u32 stack[THREAD_SIZE/sizeof(u32)]; | 78 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static union irq_ctx *hardirq_ctx[NR_CPUS]; | 81 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
82 | static union irq_ctx *softirq_ctx[NR_CPUS]; | 82 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, | 85 | asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, |
86 | unsigned long r6, unsigned long r7, | 86 | unsigned long r6, unsigned long r7, |
87 | struct pt_regs regs) | 87 | struct pt_regs __regs) |
88 | { | 88 | { |
89 | struct pt_regs *old_regs = set_irq_regs(®s); | 89 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
90 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
90 | int irq; | 91 | int irq; |
91 | #ifdef CONFIG_4KSTACKS | 92 | #ifdef CONFIG_4KSTACKS |
92 | union irq_ctx *curctx, *irqctx; | 93 | union irq_ctx *curctx, *irqctx; |
@@ -111,7 +112,7 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, | |||
111 | #endif | 112 | #endif |
112 | 113 | ||
113 | #ifdef CONFIG_CPU_HAS_INTEVT | 114 | #ifdef CONFIG_CPU_HAS_INTEVT |
114 | irq = (ctrl_inl(INTEVT) >> 5) - 16; | 115 | irq = evt2irq(ctrl_inl(INTEVT)); |
115 | #else | 116 | #else |
116 | irq = r4; | 117 | irq = r4; |
117 | #endif | 118 | #endif |
@@ -135,17 +136,24 @@ asmlinkage int do_IRQ(unsigned long r4, unsigned long r5, | |||
135 | irqctx->tinfo.task = curctx->tinfo.task; | 136 | irqctx->tinfo.task = curctx->tinfo.task; |
136 | irqctx->tinfo.previous_sp = current_stack_pointer; | 137 | irqctx->tinfo.previous_sp = current_stack_pointer; |
137 | 138 | ||
139 | /* | ||
140 | * Copy the softirq bits in preempt_count so that the | ||
141 | * softirq checks work in the hardirq context. | ||
142 | */ | ||
143 | irqctx->tinfo.preempt_count = | ||
144 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | | ||
145 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | ||
146 | |||
138 | __asm__ __volatile__ ( | 147 | __asm__ __volatile__ ( |
139 | "mov %0, r4 \n" | 148 | "mov %0, r4 \n" |
140 | "mov r15, r9 \n" | 149 | "mov r15, r8 \n" |
141 | "jsr @%1 \n" | 150 | "jsr @%1 \n" |
142 | /* swith to the irq stack */ | 151 | /* swith to the irq stack */ |
143 | " mov %2, r15 \n" | 152 | " mov %2, r15 \n" |
144 | /* restore the stack (ring zero) */ | 153 | /* restore the stack (ring zero) */ |
145 | "mov r9, r15 \n" | 154 | "mov r8, r15 \n" |
146 | : /* no outputs */ | 155 | : /* no outputs */ |
147 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) | 156 | : "r" (irq), "r" (generic_handle_irq), "r" (isp) |
148 | /* XXX: A somewhat excessive clobber list? -PFM */ | ||
149 | : "memory", "r0", "r1", "r2", "r3", "r4", | 157 | : "memory", "r0", "r1", "r2", "r3", "r4", |
150 | "r5", "r6", "r7", "r8", "t", "pr" | 158 | "r5", "r6", "r7", "r8", "t", "pr" |
151 | ); | 159 | ); |
@@ -193,7 +201,7 @@ void irq_ctx_init(int cpu) | |||
193 | irqctx->tinfo.task = NULL; | 201 | irqctx->tinfo.task = NULL; |
194 | irqctx->tinfo.exec_domain = NULL; | 202 | irqctx->tinfo.exec_domain = NULL; |
195 | irqctx->tinfo.cpu = cpu; | 203 | irqctx->tinfo.cpu = cpu; |
196 | irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; | 204 | irqctx->tinfo.preempt_count = 0; |
197 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 205 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
198 | 206 | ||
199 | softirq_ctx[cpu] = irqctx; | 207 | softirq_ctx[cpu] = irqctx; |
@@ -239,13 +247,38 @@ asmlinkage void do_softirq(void) | |||
239 | "mov r9, r15 \n" | 247 | "mov r9, r15 \n" |
240 | : /* no outputs */ | 248 | : /* no outputs */ |
241 | : "r" (__do_softirq), "r" (isp) | 249 | : "r" (__do_softirq), "r" (isp) |
242 | /* XXX: A somewhat excessive clobber list? -PFM */ | ||
243 | : "memory", "r0", "r1", "r2", "r3", "r4", | 250 | : "memory", "r0", "r1", "r2", "r3", "r4", |
244 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" | 251 | "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" |
245 | ); | 252 | ); |
253 | |||
254 | /* | ||
255 | * Shouldnt happen, we returned above if in_interrupt(): | ||
256 | */ | ||
257 | WARN_ON_ONCE(softirq_count()); | ||
246 | } | 258 | } |
247 | 259 | ||
248 | local_irq_restore(flags); | 260 | local_irq_restore(flags); |
249 | } | 261 | } |
250 | EXPORT_SYMBOL(do_softirq); | 262 | EXPORT_SYMBOL(do_softirq); |
251 | #endif | 263 | #endif |
264 | |||
265 | void __init init_IRQ(void) | ||
266 | { | ||
267 | #ifdef CONFIG_CPU_HAS_PINT_IRQ | ||
268 | init_IRQ_pint(); | ||
269 | #endif | ||
270 | |||
271 | #ifdef CONFIG_CPU_HAS_INTC2_IRQ | ||
272 | init_IRQ_intc2(); | ||
273 | #endif | ||
274 | |||
275 | #ifdef CONFIG_CPU_HAS_IPR_IRQ | ||
276 | init_IRQ_ipr(); | ||
277 | #endif | ||
278 | |||
279 | /* Perform the machine specific initialisation */ | ||
280 | if (sh_mv.mv_init_irq) | ||
281 | sh_mv.mv_init_irq(); | ||
282 | |||
283 | irq_ctx_init(smp_processor_id()); | ||
284 | } | ||
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index a52b13ac6b7f..f3e2631be144 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c | |||
@@ -385,10 +385,11 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne | |||
385 | 385 | ||
386 | asmlinkage int sys_fork(unsigned long r4, unsigned long r5, | 386 | asmlinkage int sys_fork(unsigned long r4, unsigned long r5, |
387 | unsigned long r6, unsigned long r7, | 387 | unsigned long r6, unsigned long r7, |
388 | struct pt_regs regs) | 388 | struct pt_regs __regs) |
389 | { | 389 | { |
390 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
390 | #ifdef CONFIG_MMU | 391 | #ifdef CONFIG_MMU |
391 | return do_fork(SIGCHLD, regs.regs[15], ®s, 0, NULL, NULL); | 392 | return do_fork(SIGCHLD, regs->regs[15], regs, 0, NULL, NULL); |
392 | #else | 393 | #else |
393 | /* fork almost works, enough to trick you into looking elsewhere :-( */ | 394 | /* fork almost works, enough to trick you into looking elsewhere :-( */ |
394 | return -EINVAL; | 395 | return -EINVAL; |
@@ -398,11 +399,12 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5, | |||
398 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | 399 | asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, |
399 | unsigned long parent_tidptr, | 400 | unsigned long parent_tidptr, |
400 | unsigned long child_tidptr, | 401 | unsigned long child_tidptr, |
401 | struct pt_regs regs) | 402 | struct pt_regs __regs) |
402 | { | 403 | { |
404 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
403 | if (!newsp) | 405 | if (!newsp) |
404 | newsp = regs.regs[15]; | 406 | newsp = regs->regs[15]; |
405 | return do_fork(clone_flags, newsp, ®s, 0, | 407 | return do_fork(clone_flags, newsp, regs, 0, |
406 | (int __user *)parent_tidptr, (int __user *)child_tidptr); | 408 | (int __user *)parent_tidptr, (int __user *)child_tidptr); |
407 | } | 409 | } |
408 | 410 | ||
@@ -418,9 +420,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
418 | */ | 420 | */ |
419 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, | 421 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, |
420 | unsigned long r6, unsigned long r7, | 422 | unsigned long r6, unsigned long r7, |
421 | struct pt_regs regs) | 423 | struct pt_regs __regs) |
422 | { | 424 | { |
423 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], ®s, | 425 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
426 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->regs[15], regs, | ||
424 | 0, NULL, NULL); | 427 | 0, NULL, NULL); |
425 | } | 428 | } |
426 | 429 | ||
@@ -429,8 +432,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, | |||
429 | */ | 432 | */ |
430 | asmlinkage int sys_execve(char *ufilename, char **uargv, | 433 | asmlinkage int sys_execve(char *ufilename, char **uargv, |
431 | char **uenvp, unsigned long r7, | 434 | char **uenvp, unsigned long r7, |
432 | struct pt_regs regs) | 435 | struct pt_regs __regs) |
433 | { | 436 | { |
437 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
434 | int error; | 438 | int error; |
435 | char *filename; | 439 | char *filename; |
436 | 440 | ||
@@ -442,7 +446,7 @@ asmlinkage int sys_execve(char *ufilename, char **uargv, | |||
442 | error = do_execve(filename, | 446 | error = do_execve(filename, |
443 | (char __user * __user *)uargv, | 447 | (char __user * __user *)uargv, |
444 | (char __user * __user *)uenvp, | 448 | (char __user * __user *)uenvp, |
445 | ®s); | 449 | regs); |
446 | if (error == 0) { | 450 | if (error == 0) { |
447 | task_lock(current); | 451 | task_lock(current); |
448 | current->ptrace &= ~PT_DTRACE; | 452 | current->ptrace &= ~PT_DTRACE; |
@@ -472,9 +476,7 @@ unsigned long get_wchan(struct task_struct *p) | |||
472 | return pc; | 476 | return pc; |
473 | } | 477 | } |
474 | 478 | ||
475 | asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, | 479 | asmlinkage void break_point_trap(void) |
476 | unsigned long r6, unsigned long r7, | ||
477 | struct pt_regs regs) | ||
478 | { | 480 | { |
479 | /* Clear tracing. */ | 481 | /* Clear tracing. */ |
480 | #if defined(CONFIG_CPU_SH4A) | 482 | #if defined(CONFIG_CPU_SH4A) |
@@ -492,8 +494,10 @@ asmlinkage void break_point_trap(unsigned long r4, unsigned long r5, | |||
492 | 494 | ||
493 | asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, | 495 | asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5, |
494 | unsigned long r6, unsigned long r7, | 496 | unsigned long r6, unsigned long r7, |
495 | struct pt_regs regs) | 497 | struct pt_regs __regs) |
496 | { | 498 | { |
497 | regs.pc -= 2; | 499 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
500 | |||
501 | regs->pc -= 2; | ||
498 | force_sig(SIGTRAP, current); | 502 | force_sig(SIGTRAP, current); |
499 | } | 503 | } |
diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S index 8221b37c9773..c66cb3209db5 100644 --- a/arch/sh/kernel/relocate_kernel.S +++ b/arch/sh/kernel/relocate_kernel.S | |||
@@ -7,11 +7,9 @@ | |||
7 | * This source code is licensed under the GNU General Public License, | 7 | * This source code is licensed under the GNU General Public License, |
8 | * Version 2. See the file COPYING for more details. | 8 | * Version 2. See the file COPYING for more details. |
9 | */ | 9 | */ |
10 | |||
11 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
12 | 11 | #include <asm/addrspace.h> | |
13 | #define PAGE_SIZE 4096 /* must be same value as in <asm/page.h> */ | 12 | #include <asm/page.h> |
14 | |||
15 | 13 | ||
16 | .globl relocate_new_kernel | 14 | .globl relocate_new_kernel |
17 | relocate_new_kernel: | 15 | relocate_new_kernel: |
@@ -20,8 +18,8 @@ relocate_new_kernel: | |||
20 | /* r6 = start_address */ | 18 | /* r6 = start_address */ |
21 | /* r7 = vbr_reg */ | 19 | /* r7 = vbr_reg */ |
22 | 20 | ||
23 | mov.l 10f,r8 /* 4096 */ | 21 | mov.l 10f,r8 /* PAGE_SIZE */ |
24 | mov.l 11f,r9 /* 0xa0000000 */ | 22 | mov.l 11f,r9 /* P2SEG */ |
25 | 23 | ||
26 | /* stack setting */ | 24 | /* stack setting */ |
27 | add r8,r5 | 25 | add r8,r5 |
@@ -32,7 +30,7 @@ relocate_new_kernel: | |||
32 | 0: | 30 | 0: |
33 | mov.l @r4+,r0 /* cmd = *ind++ */ | 31 | mov.l @r4+,r0 /* cmd = *ind++ */ |
34 | 32 | ||
35 | 1: /* addr = (cmd | 0xa0000000) & 0xfffffff0 */ | 33 | 1: /* addr = (cmd | P2SEG) & 0xfffffff0 */ |
36 | mov r0,r2 | 34 | mov r0,r2 |
37 | or r9,r2 | 35 | or r9,r2 |
38 | mov #-16,r1 | 36 | mov #-16,r1 |
@@ -92,7 +90,7 @@ relocate_new_kernel: | |||
92 | 10: | 90 | 10: |
93 | .long PAGE_SIZE | 91 | .long PAGE_SIZE |
94 | 11: | 92 | 11: |
95 | .long 0xa0000000 | 93 | .long P2SEG |
96 | 94 | ||
97 | relocate_new_kernel_end: | 95 | relocate_new_kernel_end: |
98 | 96 | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 36d86f9ac38a..696ca75752d9 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -392,6 +392,7 @@ static int __init topology_init(void) | |||
392 | subsys_initcall(topology_init); | 392 | subsys_initcall(topology_init); |
393 | 393 | ||
394 | static const char *cpu_name[] = { | 394 | static const char *cpu_name[] = { |
395 | [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", | ||
395 | [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300", | 396 | [CPU_SH7604] = "SH7604", [CPU_SH7300] = "SH7300", |
396 | [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", | 397 | [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", |
397 | [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", | 398 | [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", |
@@ -404,6 +405,7 @@ static const char *cpu_name[] = { | |||
404 | [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", | 405 | [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", |
405 | [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", | 406 | [CPU_SH7770] = "SH7770", [CPU_SH7780] = "SH7780", |
406 | [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", | 407 | [CPU_SH7781] = "SH7781", [CPU_SH7343] = "SH7343", |
408 | [CPU_SH7785] = "SH7785", | ||
407 | [CPU_SH_NONE] = "Unknown" | 409 | [CPU_SH_NONE] = "Unknown" |
408 | }; | 410 | }; |
409 | 411 | ||
diff --git a/arch/sh/kernel/sh_ksyms.c b/arch/sh/kernel/sh_ksyms.c index 8a2fd19dc9eb..c706f3bfd897 100644 --- a/arch/sh/kernel/sh_ksyms.c +++ b/arch/sh/kernel/sh_ksyms.c | |||
@@ -73,8 +73,6 @@ DECLARE_EXPORT(__lshrdi3); | |||
73 | DECLARE_EXPORT(__movstr); | 73 | DECLARE_EXPORT(__movstr); |
74 | DECLARE_EXPORT(__movstrSI16); | 74 | DECLARE_EXPORT(__movstrSI16); |
75 | 75 | ||
76 | EXPORT_SYMBOL(strcpy); | ||
77 | |||
78 | #ifdef CONFIG_CPU_SH4 | 76 | #ifdef CONFIG_CPU_SH4 |
79 | DECLARE_EXPORT(__movstr_i4_even); | 77 | DECLARE_EXPORT(__movstr_i4_even); |
80 | DECLARE_EXPORT(__movstr_i4_odd); | 78 | DECLARE_EXPORT(__movstr_i4_odd); |
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c index 5213f5bc6ce0..50d7c4993bef 100644 --- a/arch/sh/kernel/signal.c +++ b/arch/sh/kernel/signal.c | |||
@@ -37,7 +37,7 @@ | |||
37 | asmlinkage int | 37 | asmlinkage int |
38 | sys_sigsuspend(old_sigset_t mask, | 38 | sys_sigsuspend(old_sigset_t mask, |
39 | unsigned long r5, unsigned long r6, unsigned long r7, | 39 | unsigned long r5, unsigned long r6, unsigned long r7, |
40 | struct pt_regs regs) | 40 | struct pt_regs __regs) |
41 | { | 41 | { |
42 | mask &= _BLOCKABLE; | 42 | mask &= _BLOCKABLE; |
43 | spin_lock_irq(¤t->sighand->siglock); | 43 | spin_lock_irq(¤t->sighand->siglock); |
@@ -52,7 +52,7 @@ sys_sigsuspend(old_sigset_t mask, | |||
52 | return -ERESTARTNOHAND; | 52 | return -ERESTARTNOHAND; |
53 | } | 53 | } |
54 | 54 | ||
55 | asmlinkage int | 55 | asmlinkage int |
56 | sys_sigaction(int sig, const struct old_sigaction __user *act, | 56 | sys_sigaction(int sig, const struct old_sigaction __user *act, |
57 | struct old_sigaction __user *oact) | 57 | struct old_sigaction __user *oact) |
58 | { | 58 | { |
@@ -87,9 +87,11 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, | |||
87 | asmlinkage int | 87 | asmlinkage int |
88 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | 88 | sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, |
89 | unsigned long r6, unsigned long r7, | 89 | unsigned long r6, unsigned long r7, |
90 | struct pt_regs regs) | 90 | struct pt_regs __regs) |
91 | { | 91 | { |
92 | return do_sigaltstack(uss, uoss, regs.regs[15]); | 92 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
93 | |||
94 | return do_sigaltstack(uss, uoss, regs->regs[15]); | ||
93 | } | 95 | } |
94 | 96 | ||
95 | 97 | ||
@@ -98,7 +100,11 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, | |||
98 | */ | 100 | */ |
99 | 101 | ||
100 | #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ | 102 | #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ |
101 | #define TRAP16 0xc310 /* Syscall w/no args (NR in R3) */ | 103 | #if defined(CONFIG_CPU_SH2) || defined(CONFIG_CPU_SH2A) |
104 | #define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ | ||
105 | #else | ||
106 | #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ | ||
107 | #endif | ||
102 | #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ | 108 | #define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ |
103 | 109 | ||
104 | struct sigframe | 110 | struct sigframe |
@@ -194,9 +200,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p | |||
194 | 200 | ||
195 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, | 201 | asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, |
196 | unsigned long r6, unsigned long r7, | 202 | unsigned long r6, unsigned long r7, |
197 | struct pt_regs regs) | 203 | struct pt_regs __regs) |
198 | { | 204 | { |
199 | struct sigframe __user *frame = (struct sigframe __user *)regs.regs[15]; | 205 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
206 | struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; | ||
200 | sigset_t set; | 207 | sigset_t set; |
201 | int r0; | 208 | int r0; |
202 | 209 | ||
@@ -216,7 +223,7 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5, | |||
216 | recalc_sigpending(); | 223 | recalc_sigpending(); |
217 | spin_unlock_irq(¤t->sighand->siglock); | 224 | spin_unlock_irq(¤t->sighand->siglock); |
218 | 225 | ||
219 | if (restore_sigcontext(®s, &frame->sc, &r0)) | 226 | if (restore_sigcontext(regs, &frame->sc, &r0)) |
220 | goto badframe; | 227 | goto badframe; |
221 | return r0; | 228 | return r0; |
222 | 229 | ||
@@ -227,9 +234,10 @@ badframe: | |||
227 | 234 | ||
228 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, | 235 | asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, |
229 | unsigned long r6, unsigned long r7, | 236 | unsigned long r6, unsigned long r7, |
230 | struct pt_regs regs) | 237 | struct pt_regs __regs) |
231 | { | 238 | { |
232 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs.regs[15]; | 239 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); |
240 | struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; | ||
233 | sigset_t set; | 241 | sigset_t set; |
234 | stack_t st; | 242 | stack_t st; |
235 | int r0; | 243 | int r0; |
@@ -246,14 +254,14 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5, | |||
246 | recalc_sigpending(); | 254 | recalc_sigpending(); |
247 | spin_unlock_irq(¤t->sighand->siglock); | 255 | spin_unlock_irq(¤t->sighand->siglock); |
248 | 256 | ||
249 | if (restore_sigcontext(®s, &frame->uc.uc_mcontext, &r0)) | 257 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) |
250 | goto badframe; | 258 | goto badframe; |
251 | 259 | ||
252 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) | 260 | if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) |
253 | goto badframe; | 261 | goto badframe; |
254 | /* It is more difficult to avoid calling this function than to | 262 | /* It is more difficult to avoid calling this function than to |
255 | call it and ignore errors. */ | 263 | call it and ignore errors. */ |
256 | do_sigaltstack(&st, NULL, regs.regs[15]); | 264 | do_sigaltstack(&st, NULL, regs->regs[15]); |
257 | 265 | ||
258 | return r0; | 266 | return r0; |
259 | 267 | ||
@@ -350,7 +358,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
350 | } else { | 358 | } else { |
351 | /* Generate return code (system call to sigreturn) */ | 359 | /* Generate return code (system call to sigreturn) */ |
352 | err |= __put_user(MOVW(7), &frame->retcode[0]); | 360 | err |= __put_user(MOVW(7), &frame->retcode[0]); |
353 | err |= __put_user(TRAP16, &frame->retcode[1]); | 361 | err |= __put_user(TRAP_NOARG, &frame->retcode[1]); |
354 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); | 362 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); |
355 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); | 363 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); |
356 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); | 364 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); |
@@ -430,7 +438,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
430 | } else { | 438 | } else { |
431 | /* Generate return code (system call to rt_sigreturn) */ | 439 | /* Generate return code (system call to rt_sigreturn) */ |
432 | err |= __put_user(MOVW(7), &frame->retcode[0]); | 440 | err |= __put_user(MOVW(7), &frame->retcode[0]); |
433 | err |= __put_user(TRAP16, &frame->retcode[1]); | 441 | err |= __put_user(TRAP_NOARG, &frame->retcode[1]); |
434 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); | 442 | err |= __put_user(OR_R0_R0, &frame->retcode[2]); |
435 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); | 443 | err |= __put_user(OR_R0_R0, &frame->retcode[3]); |
436 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); | 444 | err |= __put_user(OR_R0_R0, &frame->retcode[4]); |
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c new file mode 100644 index 000000000000..0d5268afe80f --- /dev/null +++ b/arch/sh/kernel/stacktrace.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/stacktrace.c | ||
3 | * | ||
4 | * Stack trace management functions | ||
5 | * | ||
6 | * Copyright (C) 2006 Paul Mundt | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/stacktrace.h> | ||
14 | #include <linux/thread_info.h> | ||
15 | #include <asm/ptrace.h> | ||
16 | |||
17 | /* | ||
18 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
19 | */ | ||
20 | void save_stack_trace(struct stack_trace *trace, struct task_struct *task) | ||
21 | { | ||
22 | unsigned long *sp; | ||
23 | |||
24 | if (!task) | ||
25 | task = current; | ||
26 | if (task == current) | ||
27 | sp = (unsigned long *)current_stack_pointer; | ||
28 | else | ||
29 | sp = (unsigned long *)task->thread.sp; | ||
30 | |||
31 | while (!kstack_end(sp)) { | ||
32 | unsigned long addr = *sp++; | ||
33 | |||
34 | if (__kernel_text_address(addr)) { | ||
35 | if (trace->skip > 0) | ||
36 | trace->skip--; | ||
37 | else | ||
38 | trace->entries[trace->nr_entries++] = addr; | ||
39 | if (trace->nr_entries >= trace->max_entries) | ||
40 | break; | ||
41 | } | ||
42 | } | ||
43 | } | ||
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c index 8fde95001c34..5083b6ed4b39 100644 --- a/arch/sh/kernel/sys_sh.c +++ b/arch/sh/kernel/sys_sh.c | |||
@@ -33,14 +33,15 @@ | |||
33 | */ | 33 | */ |
34 | asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, | 34 | asmlinkage int sys_pipe(unsigned long r4, unsigned long r5, |
35 | unsigned long r6, unsigned long r7, | 35 | unsigned long r6, unsigned long r7, |
36 | struct pt_regs regs) | 36 | struct pt_regs __regs) |
37 | { | 37 | { |
38 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
38 | int fd[2]; | 39 | int fd[2]; |
39 | int error; | 40 | int error; |
40 | 41 | ||
41 | error = do_pipe(fd); | 42 | error = do_pipe(fd); |
42 | if (!error) { | 43 | if (!error) { |
43 | regs.regs[1] = fd[1]; | 44 | regs->regs[1] = fd[1]; |
44 | return fd[0]; | 45 | return fd[0]; |
45 | } | 46 | } |
46 | return error; | 47 | return error; |
@@ -50,6 +51,7 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | |||
50 | 51 | ||
51 | EXPORT_SYMBOL(shm_align_mask); | 52 | EXPORT_SYMBOL(shm_align_mask); |
52 | 53 | ||
54 | #ifdef CONFIG_MMU | ||
53 | /* | 55 | /* |
54 | * To avoid cache aliases, we map the shared page with same color. | 56 | * To avoid cache aliases, we map the shared page with same color. |
55 | */ | 57 | */ |
@@ -135,6 +137,7 @@ full_search: | |||
135 | addr = COLOUR_ALIGN(addr, pgoff); | 137 | addr = COLOUR_ALIGN(addr, pgoff); |
136 | } | 138 | } |
137 | } | 139 | } |
140 | #endif /* CONFIG_MMU */ | ||
138 | 141 | ||
139 | static inline long | 142 | static inline long |
140 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, | 143 | do_mmap2(unsigned long addr, unsigned long len, unsigned long prot, |
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c index 57e708d7b52d..c206c9504c4b 100644 --- a/arch/sh/kernel/time.c +++ b/arch/sh/kernel/time.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/profile.h> | 15 | #include <linux/profile.h> |
16 | #include <linux/timex.h> | ||
17 | #include <linux/sched.h> | ||
16 | #include <asm/clock.h> | 18 | #include <asm/clock.h> |
17 | #include <asm/rtc.h> | 19 | #include <asm/rtc.h> |
18 | #include <asm/timer.h> | 20 | #include <asm/timer.h> |
@@ -50,15 +52,20 @@ unsigned long long __attribute__ ((weak)) sched_clock(void) | |||
50 | #ifndef CONFIG_GENERIC_TIME | 52 | #ifndef CONFIG_GENERIC_TIME |
51 | void do_gettimeofday(struct timeval *tv) | 53 | void do_gettimeofday(struct timeval *tv) |
52 | { | 54 | { |
55 | unsigned long flags; | ||
53 | unsigned long seq; | 56 | unsigned long seq; |
54 | unsigned long usec, sec; | 57 | unsigned long usec, sec; |
55 | 58 | ||
56 | do { | 59 | do { |
57 | seq = read_seqbegin(&xtime_lock); | 60 | /* |
61 | * Turn off IRQs when grabbing xtime_lock, so that | ||
62 | * the sys_timer get_offset code doesn't have to handle it. | ||
63 | */ | ||
64 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
58 | usec = get_timer_offset(); | 65 | usec = get_timer_offset(); |
59 | sec = xtime.tv_sec; | 66 | sec = xtime.tv_sec; |
60 | usec += xtime.tv_nsec / 1000; | 67 | usec += xtime.tv_nsec / NSEC_PER_USEC; |
61 | } while (read_seqretry(&xtime_lock, seq)); | 68 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); |
62 | 69 | ||
63 | while (usec >= 1000000) { | 70 | while (usec >= 1000000) { |
64 | usec -= 1000000; | 71 | usec -= 1000000; |
@@ -85,7 +92,7 @@ int do_settimeofday(struct timespec *tv) | |||
85 | * wall time. Discover what correction gettimeofday() would have | 92 | * wall time. Discover what correction gettimeofday() would have |
86 | * made, and then undo it! | 93 | * made, and then undo it! |
87 | */ | 94 | */ |
88 | nsec -= 1000 * get_timer_offset(); | 95 | nsec -= get_timer_offset() * NSEC_PER_USEC; |
89 | 96 | ||
90 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | 97 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); |
91 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | 98 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); |
@@ -169,6 +176,108 @@ static struct sysdev_class timer_sysclass = { | |||
169 | .resume = timer_resume, | 176 | .resume = timer_resume, |
170 | }; | 177 | }; |
171 | 178 | ||
179 | #ifdef CONFIG_NO_IDLE_HZ | ||
180 | static int timer_dyn_tick_enable(void) | ||
181 | { | ||
182 | struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; | ||
183 | unsigned long flags; | ||
184 | int ret = -ENODEV; | ||
185 | |||
186 | if (dyn_tick) { | ||
187 | spin_lock_irqsave(&dyn_tick->lock, flags); | ||
188 | ret = 0; | ||
189 | if (!(dyn_tick->state & DYN_TICK_ENABLED)) { | ||
190 | ret = dyn_tick->enable(); | ||
191 | |||
192 | if (ret == 0) | ||
193 | dyn_tick->state |= DYN_TICK_ENABLED; | ||
194 | } | ||
195 | spin_unlock_irqrestore(&dyn_tick->lock, flags); | ||
196 | } | ||
197 | |||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | static int timer_dyn_tick_disable(void) | ||
202 | { | ||
203 | struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; | ||
204 | unsigned long flags; | ||
205 | int ret = -ENODEV; | ||
206 | |||
207 | if (dyn_tick) { | ||
208 | spin_lock_irqsave(&dyn_tick->lock, flags); | ||
209 | ret = 0; | ||
210 | if (dyn_tick->state & DYN_TICK_ENABLED) { | ||
211 | ret = dyn_tick->disable(); | ||
212 | |||
213 | if (ret == 0) | ||
214 | dyn_tick->state &= ~DYN_TICK_ENABLED; | ||
215 | } | ||
216 | spin_unlock_irqrestore(&dyn_tick->lock, flags); | ||
217 | } | ||
218 | |||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Reprogram the system timer for at least the calculated time interval. | ||
224 | * This function should be called from the idle thread with IRQs disabled, | ||
225 | * immediately before sleeping. | ||
226 | */ | ||
227 | void timer_dyn_reprogram(void) | ||
228 | { | ||
229 | struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; | ||
230 | unsigned long next, seq, flags; | ||
231 | |||
232 | if (!dyn_tick) | ||
233 | return; | ||
234 | |||
235 | spin_lock_irqsave(&dyn_tick->lock, flags); | ||
236 | if (dyn_tick->state & DYN_TICK_ENABLED) { | ||
237 | next = next_timer_interrupt(); | ||
238 | do { | ||
239 | seq = read_seqbegin(&xtime_lock); | ||
240 | dyn_tick->reprogram(next - jiffies); | ||
241 | } while (read_seqretry(&xtime_lock, seq)); | ||
242 | } | ||
243 | spin_unlock_irqrestore(&dyn_tick->lock, flags); | ||
244 | } | ||
245 | |||
246 | static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) | ||
247 | { | ||
248 | return sprintf(buf, "%i\n", | ||
249 | (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); | ||
250 | } | ||
251 | |||
252 | static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, | ||
253 | size_t count) | ||
254 | { | ||
255 | unsigned int enable = simple_strtoul(buf, NULL, 2); | ||
256 | |||
257 | if (enable) | ||
258 | timer_dyn_tick_enable(); | ||
259 | else | ||
260 | timer_dyn_tick_disable(); | ||
261 | |||
262 | return count; | ||
263 | } | ||
264 | static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick); | ||
265 | |||
266 | /* | ||
267 | * dyntick=enable|disable | ||
268 | */ | ||
269 | static char dyntick_str[4] __initdata = ""; | ||
270 | |||
271 | static int __init dyntick_setup(char *str) | ||
272 | { | ||
273 | if (str) | ||
274 | strlcpy(dyntick_str, str, sizeof(dyntick_str)); | ||
275 | return 1; | ||
276 | } | ||
277 | |||
278 | __setup("dyntick=", dyntick_setup); | ||
279 | #endif | ||
280 | |||
172 | static int __init timer_init_sysfs(void) | 281 | static int __init timer_init_sysfs(void) |
173 | { | 282 | { |
174 | int ret = sysdev_class_register(&timer_sysclass); | 283 | int ret = sysdev_class_register(&timer_sysclass); |
@@ -176,7 +285,22 @@ static int __init timer_init_sysfs(void) | |||
176 | return ret; | 285 | return ret; |
177 | 286 | ||
178 | sys_timer->dev.cls = &timer_sysclass; | 287 | sys_timer->dev.cls = &timer_sysclass; |
179 | return sysdev_register(&sys_timer->dev); | 288 | ret = sysdev_register(&sys_timer->dev); |
289 | |||
290 | #ifdef CONFIG_NO_IDLE_HZ | ||
291 | if (ret == 0 && sys_timer->dyn_tick) { | ||
292 | ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick); | ||
293 | |||
294 | /* | ||
295 | * Turn on dynamic tick after calibrate delay | ||
296 | * for correct bogomips | ||
297 | */ | ||
298 | if (ret == 0 && dyntick_str[0] == 'e') | ||
299 | ret = timer_dyn_tick_enable(); | ||
300 | } | ||
301 | #endif | ||
302 | |||
303 | return ret; | ||
180 | } | 304 | } |
181 | device_initcall(timer_init_sysfs); | 305 | device_initcall(timer_init_sysfs); |
182 | 306 | ||
@@ -200,6 +324,11 @@ void __init time_init(void) | |||
200 | sys_timer = get_sys_timer(); | 324 | sys_timer = get_sys_timer(); |
201 | printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); | 325 | printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); |
202 | 326 | ||
327 | #ifdef CONFIG_NO_IDLE_HZ | ||
328 | if (sys_timer->dyn_tick) | ||
329 | spin_lock_init(&sys_timer->dyn_tick->lock); | ||
330 | #endif | ||
331 | |||
203 | #if defined(CONFIG_SH_KGDB) | 332 | #if defined(CONFIG_SH_KGDB) |
204 | /* | 333 | /* |
205 | * Set up kgdb as requested. We do it here because the serial | 334 | * Set up kgdb as requested. We do it here because the serial |
diff --git a/arch/sh/kernel/timers/Makefile b/arch/sh/kernel/timers/Makefile index 151a6a304cec..bcf244ff6a12 100644 --- a/arch/sh/kernel/timers/Makefile +++ b/arch/sh/kernel/timers/Makefile | |||
@@ -5,4 +5,6 @@ | |||
5 | obj-y := timer.o | 5 | obj-y := timer.o |
6 | 6 | ||
7 | obj-$(CONFIG_SH_TMU) += timer-tmu.o | 7 | obj-$(CONFIG_SH_TMU) += timer-tmu.o |
8 | obj-$(CONFIG_SH_MTU2) += timer-mtu2.o | ||
9 | obj-$(CONFIG_SH_CMT) += timer-cmt.o | ||
8 | 10 | ||
diff --git a/arch/sh/kernel/timers/timer-cmt.c b/arch/sh/kernel/timers/timer-cmt.c new file mode 100644 index 000000000000..a574b93a4e7b --- /dev/null +++ b/arch/sh/kernel/timers/timer-cmt.c | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/timers/timer-cmt.c - CMT Timer Support | ||
3 | * | ||
4 | * Copyright (C) 2005 Yoshinori Sato | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/seqlock.h> | ||
15 | #include <asm/timer.h> | ||
16 | #include <asm/rtc.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include <asm/clock.h> | ||
20 | |||
21 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
22 | #define CMT_CMSTR 0xf84a0070 | ||
23 | #define CMT_CMCSR_0 0xf84a0072 | ||
24 | #define CMT_CMCNT_0 0xf84a0074 | ||
25 | #define CMT_CMCOR_0 0xf84a0076 | ||
26 | #define CMT_CMCSR_1 0xf84a0078 | ||
27 | #define CMT_CMCNT_1 0xf84a007a | ||
28 | #define CMT_CMCOR_1 0xf84a007c | ||
29 | |||
30 | #define STBCR3 0xf80a0000 | ||
31 | #define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR3) & ~0x10, STBCR3); } while(0) | ||
32 | #define CMT_CMCSR_INIT 0x0040 | ||
33 | #define CMT_CMCSR_CALIB 0x0000 | ||
34 | #elif defined(CONFIG_CPU_SUBTYPE_SH7206) | ||
35 | #define CMT_CMSTR 0xfffec000 | ||
36 | #define CMT_CMCSR_0 0xfffec002 | ||
37 | #define CMT_CMCNT_0 0xfffec004 | ||
38 | #define CMT_CMCOR_0 0xfffec006 | ||
39 | |||
40 | #define STBCR4 0xfffe040c | ||
41 | #define cmt_clock_enable() do { ctrl_outb(ctrl_inb(STBCR4) & ~0x04, STBCR4); } while(0) | ||
42 | #define CMT_CMCSR_INIT 0x0040 | ||
43 | #define CMT_CMCSR_CALIB 0x0000 | ||
44 | #else | ||
45 | #error "Unknown CPU SUBTYPE" | ||
46 | #endif | ||
47 | |||
48 | static unsigned long cmt_timer_get_offset(void) | ||
49 | { | ||
50 | int count; | ||
51 | static unsigned short count_p = 0xffff; /* for the first call after boot */ | ||
52 | static unsigned long jiffies_p = 0; | ||
53 | |||
54 | /* | ||
55 | * cache volatile jiffies temporarily; we have IRQs turned off. | ||
56 | */ | ||
57 | unsigned long jiffies_t; | ||
58 | |||
59 | /* timer count may underflow right here */ | ||
60 | count = ctrl_inw(CMT_CMCOR_0); | ||
61 | count -= ctrl_inw(CMT_CMCNT_0); | ||
62 | |||
63 | jiffies_t = jiffies; | ||
64 | |||
65 | /* | ||
66 | * avoiding timer inconsistencies (they are rare, but they happen)... | ||
67 | * there is one kind of problem that must be avoided here: | ||
68 | * 1. the timer counter underflows | ||
69 | */ | ||
70 | |||
71 | if (jiffies_t == jiffies_p) { | ||
72 | if (count > count_p) { | ||
73 | /* the nutcase */ | ||
74 | if (ctrl_inw(CMT_CMCSR_0) & 0x80) { /* Check CMF bit */ | ||
75 | count -= LATCH; | ||
76 | } else { | ||
77 | printk("%s (): hardware timer problem?\n", | ||
78 | __FUNCTION__); | ||
79 | } | ||
80 | } | ||
81 | } else | ||
82 | jiffies_p = jiffies_t; | ||
83 | |||
84 | count_p = count; | ||
85 | |||
86 | count = ((LATCH-1) - count) * TICK_SIZE; | ||
87 | count = (count + LATCH/2) / LATCH; | ||
88 | |||
89 | return count; | ||
90 | } | ||
91 | |||
92 | static irqreturn_t cmt_timer_interrupt(int irq, void *dev_id) | ||
93 | { | ||
94 | unsigned long timer_status; | ||
95 | |||
96 | /* Clear CMF bit */ | ||
97 | timer_status = ctrl_inw(CMT_CMCSR_0); | ||
98 | timer_status &= ~0x80; | ||
99 | ctrl_outw(timer_status, CMT_CMCSR_0); | ||
100 | |||
101 | /* | ||
102 | * Here we are in the timer irq handler. We just have irqs locally | ||
103 | * disabled but we don't know if the timer_bh is running on the other | ||
104 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | ||
105 | * the irq version of write_lock because as just said we have irq | ||
106 | * locally disabled. -arca | ||
107 | */ | ||
108 | write_seqlock(&xtime_lock); | ||
109 | handle_timer_tick(); | ||
110 | write_sequnlock(&xtime_lock); | ||
111 | |||
112 | return IRQ_HANDLED; | ||
113 | } | ||
114 | |||
115 | static struct irqaction cmt_irq = { | ||
116 | .name = "timer", | ||
117 | .handler = cmt_timer_interrupt, | ||
118 | .flags = IRQF_DISABLED | IRQF_TIMER, | ||
119 | .mask = CPU_MASK_NONE, | ||
120 | }; | ||
121 | |||
122 | static void cmt_clk_init(struct clk *clk) | ||
123 | { | ||
124 | u8 divisor = CMT_CMCSR_INIT & 0x3; | ||
125 | ctrl_inw(CMT_CMCSR_0); | ||
126 | ctrl_outw(CMT_CMCSR_INIT, CMT_CMCSR_0); | ||
127 | clk->parent = clk_get(NULL, "module_clk"); | ||
128 | clk->rate = clk->parent->rate / (8 << (divisor << 1)); | ||
129 | } | ||
130 | |||
131 | static void cmt_clk_recalc(struct clk *clk) | ||
132 | { | ||
133 | u8 divisor = ctrl_inw(CMT_CMCSR_0) & 0x3; | ||
134 | clk->rate = clk->parent->rate / (8 << (divisor << 1)); | ||
135 | } | ||
136 | |||
137 | static struct clk_ops cmt_clk_ops = { | ||
138 | .init = cmt_clk_init, | ||
139 | .recalc = cmt_clk_recalc, | ||
140 | }; | ||
141 | |||
142 | static struct clk cmt0_clk = { | ||
143 | .name = "cmt0_clk", | ||
144 | .ops = &cmt_clk_ops, | ||
145 | }; | ||
146 | |||
147 | static int cmt_timer_start(void) | ||
148 | { | ||
149 | ctrl_outw(ctrl_inw(CMT_CMSTR) | 0x01, CMT_CMSTR); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int cmt_timer_stop(void) | ||
154 | { | ||
155 | ctrl_outw(ctrl_inw(CMT_CMSTR) & ~0x01, CMT_CMSTR); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int cmt_timer_init(void) | ||
160 | { | ||
161 | unsigned long interval; | ||
162 | |||
163 | cmt_clock_enable(); | ||
164 | |||
165 | setup_irq(CONFIG_SH_TIMER_IRQ, &cmt_irq); | ||
166 | |||
167 | cmt0_clk.parent = clk_get(NULL, "module_clk"); | ||
168 | |||
169 | cmt_timer_stop(); | ||
170 | |||
171 | interval = cmt0_clk.parent->rate / 8 / HZ; | ||
172 | printk(KERN_INFO "Interval = %ld\n", interval); | ||
173 | |||
174 | ctrl_outw(interval, CMT_CMCOR_0); | ||
175 | |||
176 | clk_register(&cmt0_clk); | ||
177 | clk_enable(&cmt0_clk); | ||
178 | |||
179 | cmt_timer_start(); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | struct sys_timer_ops cmt_timer_ops = { | ||
185 | .init = cmt_timer_init, | ||
186 | .start = cmt_timer_start, | ||
187 | .stop = cmt_timer_stop, | ||
188 | #ifndef CONFIG_GENERIC_TIME | ||
189 | .get_offset = cmt_timer_get_offset, | ||
190 | #endif | ||
191 | }; | ||
192 | |||
193 | struct sys_timer cmt_timer = { | ||
194 | .name = "cmt", | ||
195 | .ops = &cmt_timer_ops, | ||
196 | }; | ||
diff --git a/arch/sh/kernel/timers/timer-mtu2.c b/arch/sh/kernel/timers/timer-mtu2.c new file mode 100644 index 000000000000..fffcd1c09873 --- /dev/null +++ b/arch/sh/kernel/timers/timer-mtu2.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/timers/timer-mtu2.c - MTU2 Timer Support | ||
3 | * | ||
4 | * Copyright (C) 2005 Paul Mundt | ||
5 | * | ||
6 | * Based off of arch/sh/kernel/timers/timer-tmu.c | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/seqlock.h> | ||
16 | #include <asm/timer.h> | ||
17 | #include <asm/io.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include <asm/clock.h> | ||
20 | |||
21 | /* | ||
22 | * We use channel 1 for our lowly system timer. Channel 2 would be the other | ||
23 | * likely candidate, but we leave it alone as it has higher divisors that | ||
24 | * would be of more use to other more interesting applications. | ||
25 | * | ||
26 | * TODO: Presently we only implement a 16-bit single-channel system timer. | ||
27 | * However, we can implement channel cascade if we go the overflow route and | ||
28 | * get away with using 2 MTU2 channels as a 32-bit timer. | ||
29 | */ | ||
30 | #define MTU2_TSTR 0xfffe4280 | ||
31 | #define MTU2_TCR_1 0xfffe4380 | ||
32 | #define MTU2_TMDR_1 0xfffe4381 | ||
33 | #define MTU2_TIOR_1 0xfffe4382 | ||
34 | #define MTU2_TIER_1 0xfffe4384 | ||
35 | #define MTU2_TSR_1 0xfffe4385 | ||
36 | #define MTU2_TCNT_1 0xfffe4386 /* 16-bit counter */ | ||
37 | #define MTU2_TGRA_1 0xfffe438a | ||
38 | |||
39 | #define STBCR3 0xfffe0408 | ||
40 | |||
41 | #define MTU2_TSTR_CST1 (1 << 1) /* Counter Start 1 */ | ||
42 | |||
43 | #define MTU2_TSR_TGFA (1 << 0) /* GRA compare match */ | ||
44 | |||
45 | #define MTU2_TIER_TGIEA (1 << 0) /* GRA compare match interrupt enable */ | ||
46 | |||
47 | #define MTU2_TCR_INIT 0x22 | ||
48 | |||
49 | #define MTU2_TCR_CALIB 0x00 | ||
50 | |||
51 | static unsigned long mtu2_timer_get_offset(void) | ||
52 | { | ||
53 | int count; | ||
54 | static int count_p = 0x7fff; /* for the first call after boot */ | ||
55 | static unsigned long jiffies_p = 0; | ||
56 | |||
57 | /* | ||
58 | * cache volatile jiffies temporarily; we have IRQs turned off. | ||
59 | */ | ||
60 | unsigned long jiffies_t; | ||
61 | |||
62 | /* timer count may underflow right here */ | ||
63 | count = ctrl_inw(MTU2_TCNT_1); /* read the latched count */ | ||
64 | |||
65 | jiffies_t = jiffies; | ||
66 | |||
67 | /* | ||
68 | * avoiding timer inconsistencies (they are rare, but they happen)... | ||
69 | * there is one kind of problem that must be avoided here: | ||
70 | * 1. the timer counter underflows | ||
71 | */ | ||
72 | |||
73 | if (jiffies_t == jiffies_p) { | ||
74 | if (count > count_p) { | ||
75 | if (ctrl_inb(MTU2_TSR_1) & MTU2_TSR_TGFA) { | ||
76 | count -= LATCH; | ||
77 | } else { | ||
78 | printk("%s (): hardware timer problem?\n", | ||
79 | __FUNCTION__); | ||
80 | } | ||
81 | } | ||
82 | } else | ||
83 | jiffies_p = jiffies_t; | ||
84 | |||
85 | count_p = count; | ||
86 | |||
87 | count = ((LATCH-1) - count) * TICK_SIZE; | ||
88 | count = (count + LATCH/2) / LATCH; | ||
89 | |||
90 | return count; | ||
91 | } | ||
92 | |||
93 | static irqreturn_t mtu2_timer_interrupt(int irq, void *dev_id) | ||
94 | { | ||
95 | unsigned long timer_status; | ||
96 | |||
97 | /* Clear TGFA bit */ | ||
98 | timer_status = ctrl_inb(MTU2_TSR_1); | ||
99 | timer_status &= ~MTU2_TSR_TGFA; | ||
100 | ctrl_outb(timer_status, MTU2_TSR_1); | ||
101 | |||
102 | /* Do timer tick */ | ||
103 | write_seqlock(&xtime_lock); | ||
104 | handle_timer_tick(); | ||
105 | write_sequnlock(&xtime_lock); | ||
106 | |||
107 | return IRQ_HANDLED; | ||
108 | } | ||
109 | |||
110 | static struct irqaction mtu2_irq = { | ||
111 | .name = "timer", | ||
112 | .handler = mtu2_timer_interrupt, | ||
113 | .flags = IRQF_DISABLED | IRQF_TIMER, | ||
114 | .mask = CPU_MASK_NONE, | ||
115 | }; | ||
116 | |||
117 | static unsigned int divisors[] = { 1, 4, 16, 64, 1, 1, 256 }; | ||
118 | |||
119 | static void mtu2_clk_init(struct clk *clk) | ||
120 | { | ||
121 | u8 idx = MTU2_TCR_INIT & 0x7; | ||
122 | |||
123 | clk->rate = clk->parent->rate / divisors[idx]; | ||
124 | /* Start TCNT counting */ | ||
125 | ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); | ||
126 | |||
127 | } | ||
128 | |||
129 | static void mtu2_clk_recalc(struct clk *clk) | ||
130 | { | ||
131 | u8 idx = ctrl_inb(MTU2_TCR_1) & 0x7; | ||
132 | clk->rate = clk->parent->rate / divisors[idx]; | ||
133 | } | ||
134 | |||
135 | static struct clk_ops mtu2_clk_ops = { | ||
136 | .init = mtu2_clk_init, | ||
137 | .recalc = mtu2_clk_recalc, | ||
138 | }; | ||
139 | |||
140 | static struct clk mtu2_clk1 = { | ||
141 | .name = "mtu2_clk1", | ||
142 | .ops = &mtu2_clk_ops, | ||
143 | }; | ||
144 | |||
145 | static int mtu2_timer_start(void) | ||
146 | { | ||
147 | ctrl_outb(ctrl_inb(MTU2_TSTR) | MTU2_TSTR_CST1, MTU2_TSTR); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int mtu2_timer_stop(void) | ||
152 | { | ||
153 | ctrl_outb(ctrl_inb(MTU2_TSTR) & ~MTU2_TSTR_CST1, MTU2_TSTR); | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static int mtu2_timer_init(void) | ||
158 | { | ||
159 | u8 tmp; | ||
160 | unsigned long interval; | ||
161 | |||
162 | setup_irq(CONFIG_SH_TIMER_IRQ, &mtu2_irq); | ||
163 | |||
164 | mtu2_clk1.parent = clk_get(NULL, "module_clk"); | ||
165 | |||
166 | ctrl_outb(ctrl_inb(STBCR3) & (~0x20), STBCR3); | ||
167 | |||
168 | /* Normal operation */ | ||
169 | ctrl_outb(0, MTU2_TMDR_1); | ||
170 | ctrl_outb(MTU2_TCR_INIT, MTU2_TCR_1); | ||
171 | ctrl_outb(0x01, MTU2_TIOR_1); | ||
172 | |||
173 | /* Enable underflow interrupt */ | ||
174 | ctrl_outb(ctrl_inb(MTU2_TIER_1) | MTU2_TIER_TGIEA, MTU2_TIER_1); | ||
175 | |||
176 | interval = CONFIG_SH_PCLK_FREQ / 16 / HZ; | ||
177 | printk(KERN_INFO "Interval = %ld\n", interval); | ||
178 | |||
179 | ctrl_outw(interval, MTU2_TGRA_1); | ||
180 | ctrl_outw(0, MTU2_TCNT_1); | ||
181 | |||
182 | clk_register(&mtu2_clk1); | ||
183 | clk_enable(&mtu2_clk1); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | struct sys_timer_ops mtu2_timer_ops = { | ||
189 | .init = mtu2_timer_init, | ||
190 | .start = mtu2_timer_start, | ||
191 | .stop = mtu2_timer_stop, | ||
192 | #ifndef CONFIG_GENERIC_TIME | ||
193 | .get_offset = mtu2_timer_get_offset, | ||
194 | #endif | ||
195 | }; | ||
196 | |||
197 | struct sys_timer mtu2_timer = { | ||
198 | .name = "mtu2", | ||
199 | .ops = &mtu2_timer_ops, | ||
200 | }; | ||
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 24927015dc31..e060e71d0785 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/seqlock.h> | 20 | #include <linux/seqlock.h> |
22 | #include <asm/timer.h> | 21 | #include <asm/timer.h> |
23 | #include <asm/rtc.h> | 22 | #include <asm/rtc.h> |
@@ -31,13 +30,9 @@ | |||
31 | 30 | ||
32 | #define TMU0_TCR_CALIB 0x0000 | 31 | #define TMU0_TCR_CALIB 0x0000 |
33 | 32 | ||
34 | static DEFINE_SPINLOCK(tmu0_lock); | ||
35 | |||
36 | static unsigned long tmu_timer_get_offset(void) | 33 | static unsigned long tmu_timer_get_offset(void) |
37 | { | 34 | { |
38 | int count; | 35 | int count; |
39 | unsigned long flags; | ||
40 | |||
41 | static int count_p = 0x7fffffff; /* for the first call after boot */ | 36 | static int count_p = 0x7fffffff; /* for the first call after boot */ |
42 | static unsigned long jiffies_p = 0; | 37 | static unsigned long jiffies_p = 0; |
43 | 38 | ||
@@ -46,7 +41,6 @@ static unsigned long tmu_timer_get_offset(void) | |||
46 | */ | 41 | */ |
47 | unsigned long jiffies_t; | 42 | unsigned long jiffies_t; |
48 | 43 | ||
49 | spin_lock_irqsave(&tmu0_lock, flags); | ||
50 | /* timer count may underflow right here */ | 44 | /* timer count may underflow right here */ |
51 | count = ctrl_inl(TMU0_TCNT); /* read the latched count */ | 45 | count = ctrl_inl(TMU0_TCNT); /* read the latched count */ |
52 | 46 | ||
@@ -72,7 +66,6 @@ static unsigned long tmu_timer_get_offset(void) | |||
72 | jiffies_p = jiffies_t; | 66 | jiffies_p = jiffies_t; |
73 | 67 | ||
74 | count_p = count; | 68 | count_p = count; |
75 | spin_unlock_irqrestore(&tmu0_lock, flags); | ||
76 | 69 | ||
77 | count = ((LATCH-1) - count) * TICK_SIZE; | 70 | count = ((LATCH-1) - count) * TICK_SIZE; |
78 | count = (count + LATCH/2) / LATCH; | 71 | count = (count + LATCH/2) / LATCH; |
@@ -106,7 +99,7 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) | |||
106 | static struct irqaction tmu_irq = { | 99 | static struct irqaction tmu_irq = { |
107 | .name = "timer", | 100 | .name = "timer", |
108 | .handler = tmu_timer_interrupt, | 101 | .handler = tmu_timer_interrupt, |
109 | .flags = IRQF_DISABLED, | 102 | .flags = IRQF_DISABLED | IRQF_TIMER, |
110 | .mask = CPU_MASK_NONE, | 103 | .mask = CPU_MASK_NONE, |
111 | }; | 104 | }; |
112 | 105 | ||
@@ -149,9 +142,9 @@ static int tmu_timer_init(void) | |||
149 | { | 142 | { |
150 | unsigned long interval; | 143 | unsigned long interval; |
151 | 144 | ||
152 | setup_irq(TIMER_IRQ, &tmu_irq); | 145 | setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); |
153 | 146 | ||
154 | tmu0_clk.parent = clk_get("module_clk"); | 147 | tmu0_clk.parent = clk_get(NULL, "module_clk"); |
155 | 148 | ||
156 | /* Start TMU0 */ | 149 | /* Start TMU0 */ |
157 | tmu_timer_stop(); | 150 | tmu_timer_stop(); |
diff --git a/arch/sh/kernel/timers/timer.c b/arch/sh/kernel/timers/timer.c index dc1f631053a8..a6bcc913d25e 100644 --- a/arch/sh/kernel/timers/timer.c +++ b/arch/sh/kernel/timers/timer.c | |||
@@ -17,6 +17,12 @@ static struct sys_timer *sys_timers[] __initdata = { | |||
17 | #ifdef CONFIG_SH_TMU | 17 | #ifdef CONFIG_SH_TMU |
18 | &tmu_timer, | 18 | &tmu_timer, |
19 | #endif | 19 | #endif |
20 | #ifdef CONFIG_SH_MTU2 | ||
21 | &mtu2_timer, | ||
22 | #endif | ||
23 | #ifdef CONFIG_SH_CMT | ||
24 | &cmt_timer, | ||
25 | #endif | ||
20 | NULL, | 26 | NULL, |
21 | }; | 27 | }; |
22 | 28 | ||
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index 53dfa55f3156..3762d9dc2046 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -18,13 +18,14 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/kallsyms.h> | 19 | #include <linux/kallsyms.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/debug_locks.h> | ||
21 | #include <asm/system.h> | 22 | #include <asm/system.h> |
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | 24 | ||
24 | #ifdef CONFIG_SH_KGDB | 25 | #ifdef CONFIG_SH_KGDB |
25 | #include <asm/kgdb.h> | 26 | #include <asm/kgdb.h> |
26 | #define CHK_REMOTE_DEBUG(regs) \ | 27 | #define CHK_REMOTE_DEBUG(regs) \ |
27 | { \ | 28 | { \ |
28 | if (kgdb_debug_hook && !user_mode(regs))\ | 29 | if (kgdb_debug_hook && !user_mode(regs))\ |
29 | (*kgdb_debug_hook)(regs); \ | 30 | (*kgdb_debug_hook)(regs); \ |
30 | } | 31 | } |
@@ -33,8 +34,13 @@ | |||
33 | #endif | 34 | #endif |
34 | 35 | ||
35 | #ifdef CONFIG_CPU_SH2 | 36 | #ifdef CONFIG_CPU_SH2 |
36 | #define TRAP_RESERVED_INST 4 | 37 | # define TRAP_RESERVED_INST 4 |
37 | #define TRAP_ILLEGAL_SLOT_INST 6 | 38 | # define TRAP_ILLEGAL_SLOT_INST 6 |
39 | # define TRAP_ADDRESS_ERROR 9 | ||
40 | # ifdef CONFIG_CPU_SH2A | ||
41 | # define TRAP_DIVZERO_ERROR 17 | ||
42 | # define TRAP_DIVOVF_ERROR 18 | ||
43 | # endif | ||
38 | #else | 44 | #else |
39 | #define TRAP_RESERVED_INST 12 | 45 | #define TRAP_RESERVED_INST 12 |
40 | #define TRAP_ILLEGAL_SLOT_INST 13 | 46 | #define TRAP_ILLEGAL_SLOT_INST 13 |
@@ -88,7 +94,7 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
88 | 94 | ||
89 | if (!user_mode(regs) || in_interrupt()) | 95 | if (!user_mode(regs) || in_interrupt()) |
90 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + | 96 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + |
91 | (unsigned long)task_stack_page(current)); | 97 | (unsigned long)task_stack_page(current)); |
92 | 98 | ||
93 | bust_spinlocks(0); | 99 | bust_spinlocks(0); |
94 | spin_unlock_irq(&die_lock); | 100 | spin_unlock_irq(&die_lock); |
@@ -102,8 +108,6 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs, | |||
102 | die(str, regs, err); | 108 | die(str, regs, err); |
103 | } | 109 | } |
104 | 110 | ||
105 | static int handle_unaligned_notify_count = 10; | ||
106 | |||
107 | /* | 111 | /* |
108 | * try and fix up kernelspace address errors | 112 | * try and fix up kernelspace address errors |
109 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | 113 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV |
@@ -198,7 +202,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
198 | if (copy_to_user(dst,src,4)) | 202 | if (copy_to_user(dst,src,4)) |
199 | goto fetch_fault; | 203 | goto fetch_fault; |
200 | ret = 0; | 204 | ret = 0; |
201 | break; | 205 | break; |
202 | 206 | ||
203 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | 207 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ |
204 | if (instruction & 4) | 208 | if (instruction & 4) |
@@ -222,7 +226,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
222 | if (copy_from_user(dst,src,4)) | 226 | if (copy_from_user(dst,src,4)) |
223 | goto fetch_fault; | 227 | goto fetch_fault; |
224 | ret = 0; | 228 | ret = 0; |
225 | break; | 229 | break; |
226 | 230 | ||
227 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | 231 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ |
228 | src = (unsigned char*) *rm; | 232 | src = (unsigned char*) *rm; |
@@ -230,7 +234,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
230 | *rm += count; | 234 | *rm += count; |
231 | dst = (unsigned char*) rn; | 235 | dst = (unsigned char*) rn; |
232 | *(unsigned long*)dst = 0; | 236 | *(unsigned long*)dst = 0; |
233 | 237 | ||
234 | #ifdef __LITTLE_ENDIAN__ | 238 | #ifdef __LITTLE_ENDIAN__ |
235 | if (copy_from_user(dst, src, count)) | 239 | if (copy_from_user(dst, src, count)) |
236 | goto fetch_fault; | 240 | goto fetch_fault; |
@@ -241,7 +245,7 @@ static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |||
241 | } | 245 | } |
242 | #else | 246 | #else |
243 | dst += 4-count; | 247 | dst += 4-count; |
244 | 248 | ||
245 | if (copy_from_user(dst, src, count)) | 249 | if (copy_from_user(dst, src, count)) |
246 | goto fetch_fault; | 250 | goto fetch_fault; |
247 | 251 | ||
@@ -320,7 +324,8 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
320 | return -EFAULT; | 324 | return -EFAULT; |
321 | 325 | ||
322 | /* kernel */ | 326 | /* kernel */ |
323 | die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); | 327 | die("delay-slot-insn faulting in handle_unaligned_delayslot", |
328 | regs, 0); | ||
324 | } | 329 | } |
325 | 330 | ||
326 | return handle_unaligned_ins(instruction,regs); | 331 | return handle_unaligned_ins(instruction,regs); |
@@ -342,6 +347,13 @@ static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |||
342 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | 347 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) |
343 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | 348 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) |
344 | 349 | ||
350 | /* | ||
351 | * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit | ||
352 | * opcodes.. | ||
353 | */ | ||
354 | #ifndef CONFIG_CPU_SH2A | ||
355 | static int handle_unaligned_notify_count = 10; | ||
356 | |||
345 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | 357 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) |
346 | { | 358 | { |
347 | u_int rm; | 359 | u_int rm; |
@@ -354,7 +366,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
354 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | 366 | if (user_mode(regs) && handle_unaligned_notify_count>0) { |
355 | handle_unaligned_notify_count--; | 367 | handle_unaligned_notify_count--; |
356 | 368 | ||
357 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | 369 | printk(KERN_NOTICE "Fixing up unaligned userspace access " |
370 | "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | ||
358 | current->comm,current->pid,(u16*)regs->pc,instruction); | 371 | current->comm,current->pid,(u16*)regs->pc,instruction); |
359 | } | 372 | } |
360 | 373 | ||
@@ -478,32 +491,58 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) | |||
478 | regs->pc += 2; | 491 | regs->pc += 2; |
479 | return ret; | 492 | return ret; |
480 | } | 493 | } |
494 | #endif /* CONFIG_CPU_SH2A */ | ||
495 | |||
496 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
497 | #define lookup_exception_vector(x) \ | ||
498 | __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) | ||
499 | #else | ||
500 | #define lookup_exception_vector(x) \ | ||
501 | __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) | ||
502 | #endif | ||
481 | 503 | ||
482 | /* | 504 | /* |
483 | * Handle various address error exceptions | 505 | * Handle various address error exceptions: |
506 | * - instruction address error: | ||
507 | * misaligned PC | ||
508 | * PC >= 0x80000000 in user mode | ||
509 | * - data address error (read and write) | ||
510 | * misaligned data access | ||
511 | * access to >= 0x80000000 is user mode | ||
512 | * Unfortuntaly we can't distinguish between instruction address error | ||
513 | * and data address errors caused by read acceses. | ||
484 | */ | 514 | */ |
485 | asmlinkage void do_address_error(struct pt_regs *regs, | 515 | asmlinkage void do_address_error(struct pt_regs *regs, |
486 | unsigned long writeaccess, | 516 | unsigned long writeaccess, |
487 | unsigned long address) | 517 | unsigned long address) |
488 | { | 518 | { |
489 | unsigned long error_code; | 519 | unsigned long error_code = 0; |
490 | mm_segment_t oldfs; | 520 | mm_segment_t oldfs; |
521 | siginfo_t info; | ||
522 | #ifndef CONFIG_CPU_SH2A | ||
491 | u16 instruction; | 523 | u16 instruction; |
492 | int tmp; | 524 | int tmp; |
525 | #endif | ||
493 | 526 | ||
494 | asm volatile("stc r2_bank,%0": "=r" (error_code)); | 527 | /* Intentional ifdef */ |
528 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
529 | lookup_exception_vector(error_code); | ||
530 | #endif | ||
495 | 531 | ||
496 | oldfs = get_fs(); | 532 | oldfs = get_fs(); |
497 | 533 | ||
498 | if (user_mode(regs)) { | 534 | if (user_mode(regs)) { |
535 | int si_code = BUS_ADRERR; | ||
536 | |||
499 | local_irq_enable(); | 537 | local_irq_enable(); |
500 | current->thread.error_code = error_code; | ||
501 | current->thread.trap_no = (writeaccess) ? 8 : 7; | ||
502 | 538 | ||
503 | /* bad PC is not something we can fix */ | 539 | /* bad PC is not something we can fix */ |
504 | if (regs->pc & 1) | 540 | if (regs->pc & 1) { |
541 | si_code = BUS_ADRALN; | ||
505 | goto uspace_segv; | 542 | goto uspace_segv; |
543 | } | ||
506 | 544 | ||
545 | #ifndef CONFIG_CPU_SH2A | ||
507 | set_fs(USER_DS); | 546 | set_fs(USER_DS); |
508 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 547 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
509 | /* Argh. Fault on the instruction itself. | 548 | /* Argh. Fault on the instruction itself. |
@@ -518,14 +557,23 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
518 | 557 | ||
519 | if (tmp==0) | 558 | if (tmp==0) |
520 | return; /* sorted */ | 559 | return; /* sorted */ |
560 | #endif | ||
521 | 561 | ||
522 | uspace_segv: | 562 | uspace_segv: |
523 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | 563 | printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " |
524 | force_sig(SIGSEGV, current); | 564 | "access (PC %lx PR %lx)\n", current->comm, regs->pc, |
565 | regs->pr); | ||
566 | |||
567 | info.si_signo = SIGBUS; | ||
568 | info.si_errno = 0; | ||
569 | info.si_code = si_code; | ||
570 | info.si_addr = (void *) address; | ||
571 | force_sig_info(SIGBUS, &info, current); | ||
525 | } else { | 572 | } else { |
526 | if (regs->pc & 1) | 573 | if (regs->pc & 1) |
527 | die("unaligned program counter", regs, error_code); | 574 | die("unaligned program counter", regs, error_code); |
528 | 575 | ||
576 | #ifndef CONFIG_CPU_SH2A | ||
529 | set_fs(KERNEL_DS); | 577 | set_fs(KERNEL_DS); |
530 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | 578 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { |
531 | /* Argh. Fault on the instruction itself. | 579 | /* Argh. Fault on the instruction itself. |
@@ -537,6 +585,12 @@ asmlinkage void do_address_error(struct pt_regs *regs, | |||
537 | 585 | ||
538 | handle_unaligned_access(instruction, regs); | 586 | handle_unaligned_access(instruction, regs); |
539 | set_fs(oldfs); | 587 | set_fs(oldfs); |
588 | #else | ||
589 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned " | ||
590 | "access\n", current->comm); | ||
591 | |||
592 | force_sig(SIGSEGV, current); | ||
593 | #endif | ||
540 | } | 594 | } |
541 | } | 595 | } |
542 | 596 | ||
@@ -548,7 +602,7 @@ int is_dsp_inst(struct pt_regs *regs) | |||
548 | { | 602 | { |
549 | unsigned short inst; | 603 | unsigned short inst; |
550 | 604 | ||
551 | /* | 605 | /* |
552 | * Safe guard if DSP mode is already enabled or we're lacking | 606 | * Safe guard if DSP mode is already enabled or we're lacking |
553 | * the DSP altogether. | 607 | * the DSP altogether. |
554 | */ | 608 | */ |
@@ -569,27 +623,49 @@ int is_dsp_inst(struct pt_regs *regs) | |||
569 | #define is_dsp_inst(regs) (0) | 623 | #define is_dsp_inst(regs) (0) |
570 | #endif /* CONFIG_SH_DSP */ | 624 | #endif /* CONFIG_SH_DSP */ |
571 | 625 | ||
626 | #ifdef CONFIG_CPU_SH2A | ||
627 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | ||
628 | unsigned long r6, unsigned long r7, | ||
629 | struct pt_regs __regs) | ||
630 | { | ||
631 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
632 | siginfo_t info; | ||
633 | |||
634 | switch (r4) { | ||
635 | case TRAP_DIVZERO_ERROR: | ||
636 | info.si_code = FPE_INTDIV; | ||
637 | break; | ||
638 | case TRAP_DIVOVF_ERROR: | ||
639 | info.si_code = FPE_INTOVF; | ||
640 | break; | ||
641 | } | ||
642 | |||
643 | force_sig_info(SIGFPE, &info, current); | ||
644 | } | ||
645 | #endif | ||
646 | |||
572 | /* arch/sh/kernel/cpu/sh4/fpu.c */ | 647 | /* arch/sh/kernel/cpu/sh4/fpu.c */ |
573 | extern int do_fpu_inst(unsigned short, struct pt_regs *); | 648 | extern int do_fpu_inst(unsigned short, struct pt_regs *); |
574 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, | 649 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, |
575 | unsigned long r6, unsigned long r7, struct pt_regs regs); | 650 | unsigned long r6, unsigned long r7, struct pt_regs __regs); |
576 | 651 | ||
577 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | 652 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, |
578 | unsigned long r6, unsigned long r7, | 653 | unsigned long r6, unsigned long r7, |
579 | struct pt_regs regs) | 654 | struct pt_regs __regs) |
580 | { | 655 | { |
656 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
581 | unsigned long error_code; | 657 | unsigned long error_code; |
582 | struct task_struct *tsk = current; | 658 | struct task_struct *tsk = current; |
583 | 659 | ||
584 | #ifdef CONFIG_SH_FPU_EMU | 660 | #ifdef CONFIG_SH_FPU_EMU |
585 | unsigned short inst; | 661 | unsigned short inst = 0; |
586 | int err; | 662 | int err; |
587 | 663 | ||
588 | get_user(inst, (unsigned short*)regs.pc); | 664 | get_user(inst, (unsigned short*)regs->pc); |
589 | 665 | ||
590 | err = do_fpu_inst(inst, ®s); | 666 | err = do_fpu_inst(inst, regs); |
591 | if (!err) { | 667 | if (!err) { |
592 | regs.pc += 2; | 668 | regs->pc += 2; |
593 | return; | 669 | return; |
594 | } | 670 | } |
595 | /* not a FPU inst. */ | 671 | /* not a FPU inst. */ |
@@ -597,20 +673,19 @@ asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | |||
597 | 673 | ||
598 | #ifdef CONFIG_SH_DSP | 674 | #ifdef CONFIG_SH_DSP |
599 | /* Check if it's a DSP instruction */ | 675 | /* Check if it's a DSP instruction */ |
600 | if (is_dsp_inst(®s)) { | 676 | if (is_dsp_inst(regs)) { |
601 | /* Enable DSP mode, and restart instruction. */ | 677 | /* Enable DSP mode, and restart instruction. */ |
602 | regs.sr |= SR_DSP; | 678 | regs->sr |= SR_DSP; |
603 | return; | 679 | return; |
604 | } | 680 | } |
605 | #endif | 681 | #endif |
606 | 682 | ||
607 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 683 | lookup_exception_vector(error_code); |
684 | |||
608 | local_irq_enable(); | 685 | local_irq_enable(); |
609 | tsk->thread.error_code = error_code; | 686 | CHK_REMOTE_DEBUG(regs); |
610 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
611 | CHK_REMOTE_DEBUG(®s); | ||
612 | force_sig(SIGILL, tsk); | 687 | force_sig(SIGILL, tsk); |
613 | die_if_no_fixup("reserved instruction", ®s, error_code); | 688 | die_if_no_fixup("reserved instruction", regs, error_code); |
614 | } | 689 | } |
615 | 690 | ||
616 | #ifdef CONFIG_SH_FPU_EMU | 691 | #ifdef CONFIG_SH_FPU_EMU |
@@ -658,39 +733,41 @@ static int emulate_branch(unsigned short inst, struct pt_regs* regs) | |||
658 | 733 | ||
659 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | 734 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, |
660 | unsigned long r6, unsigned long r7, | 735 | unsigned long r6, unsigned long r7, |
661 | struct pt_regs regs) | 736 | struct pt_regs __regs) |
662 | { | 737 | { |
738 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
663 | unsigned long error_code; | 739 | unsigned long error_code; |
664 | struct task_struct *tsk = current; | 740 | struct task_struct *tsk = current; |
665 | #ifdef CONFIG_SH_FPU_EMU | 741 | #ifdef CONFIG_SH_FPU_EMU |
666 | unsigned short inst; | 742 | unsigned short inst = 0; |
667 | 743 | ||
668 | get_user(inst, (unsigned short *)regs.pc + 1); | 744 | get_user(inst, (unsigned short *)regs->pc + 1); |
669 | if (!do_fpu_inst(inst, ®s)) { | 745 | if (!do_fpu_inst(inst, regs)) { |
670 | get_user(inst, (unsigned short *)regs.pc); | 746 | get_user(inst, (unsigned short *)regs->pc); |
671 | if (!emulate_branch(inst, ®s)) | 747 | if (!emulate_branch(inst, regs)) |
672 | return; | 748 | return; |
673 | /* fault in branch.*/ | 749 | /* fault in branch.*/ |
674 | } | 750 | } |
675 | /* not a FPU inst. */ | 751 | /* not a FPU inst. */ |
676 | #endif | 752 | #endif |
677 | 753 | ||
678 | asm volatile("stc r2_bank, %0": "=r" (error_code)); | 754 | lookup_exception_vector(error_code); |
755 | |||
679 | local_irq_enable(); | 756 | local_irq_enable(); |
680 | tsk->thread.error_code = error_code; | 757 | CHK_REMOTE_DEBUG(regs); |
681 | tsk->thread.trap_no = TRAP_RESERVED_INST; | ||
682 | CHK_REMOTE_DEBUG(®s); | ||
683 | force_sig(SIGILL, tsk); | 758 | force_sig(SIGILL, tsk); |
684 | die_if_no_fixup("illegal slot instruction", ®s, error_code); | 759 | die_if_no_fixup("illegal slot instruction", regs, error_code); |
685 | } | 760 | } |
686 | 761 | ||
687 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | 762 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, |
688 | unsigned long r6, unsigned long r7, | 763 | unsigned long r6, unsigned long r7, |
689 | struct pt_regs regs) | 764 | struct pt_regs __regs) |
690 | { | 765 | { |
766 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0); | ||
691 | long ex; | 767 | long ex; |
692 | asm volatile("stc r2_bank, %0" : "=r" (ex)); | 768 | |
693 | die_if_kernel("exception", ®s, ex); | 769 | lookup_exception_vector(ex); |
770 | die_if_kernel("exception", regs, ex); | ||
694 | } | 771 | } |
695 | 772 | ||
696 | #if defined(CONFIG_SH_STANDARD_BIOS) | 773 | #if defined(CONFIG_SH_STANDARD_BIOS) |
@@ -735,12 +812,16 @@ void *set_exception_table_vec(unsigned int vec, void *handler) | |||
735 | { | 812 | { |
736 | extern void *exception_handling_table[]; | 813 | extern void *exception_handling_table[]; |
737 | void *old_handler; | 814 | void *old_handler; |
738 | 815 | ||
739 | old_handler = exception_handling_table[vec]; | 816 | old_handler = exception_handling_table[vec]; |
740 | exception_handling_table[vec] = handler; | 817 | exception_handling_table[vec] = handler; |
741 | return old_handler; | 818 | return old_handler; |
742 | } | 819 | } |
743 | 820 | ||
821 | extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, | ||
822 | unsigned long r6, unsigned long r7, | ||
823 | struct pt_regs __regs); | ||
824 | |||
744 | void __init trap_init(void) | 825 | void __init trap_init(void) |
745 | { | 826 | { |
746 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); | 827 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); |
@@ -759,7 +840,15 @@ void __init trap_init(void) | |||
759 | set_exception_table_evt(0x800, do_fpu_state_restore); | 840 | set_exception_table_evt(0x800, do_fpu_state_restore); |
760 | set_exception_table_evt(0x820, do_fpu_state_restore); | 841 | set_exception_table_evt(0x820, do_fpu_state_restore); |
761 | #endif | 842 | #endif |
762 | 843 | ||
844 | #ifdef CONFIG_CPU_SH2 | ||
845 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); | ||
846 | #endif | ||
847 | #ifdef CONFIG_CPU_SH2A | ||
848 | set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); | ||
849 | set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); | ||
850 | #endif | ||
851 | |||
763 | /* Setup VBR for boot cpu */ | 852 | /* Setup VBR for boot cpu */ |
764 | per_cpu_trap_init(); | 853 | per_cpu_trap_init(); |
765 | } | 854 | } |
@@ -784,6 +873,11 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, | |||
784 | } | 873 | } |
785 | 874 | ||
786 | printk("\n"); | 875 | printk("\n"); |
876 | |||
877 | if (!tsk) | ||
878 | tsk = current; | ||
879 | |||
880 | debug_show_held_locks(tsk); | ||
787 | } | 881 | } |
788 | 882 | ||
789 | void show_stack(struct task_struct *tsk, unsigned long *sp) | 883 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 9dd606464d23..4e0362f50384 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -4,8 +4,12 @@ menu "Processor selection" | |||
4 | # Processor families | 4 | # Processor families |
5 | # | 5 | # |
6 | config CPU_SH2 | 6 | config CPU_SH2 |
7 | select SH_WRITETHROUGH if !CPU_SH2A | ||
7 | bool | 8 | bool |
8 | select SH_WRITETHROUGH | 9 | |
10 | config CPU_SH2A | ||
11 | bool | ||
12 | select CPU_SH2 | ||
9 | 13 | ||
10 | config CPU_SH3 | 14 | config CPU_SH3 |
11 | bool | 15 | bool |
@@ -16,6 +20,7 @@ config CPU_SH4 | |||
16 | bool | 20 | bool |
17 | select CPU_HAS_INTEVT | 21 | select CPU_HAS_INTEVT |
18 | select CPU_HAS_SR_RB | 22 | select CPU_HAS_SR_RB |
23 | select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40 | ||
19 | 24 | ||
20 | config CPU_SH4A | 25 | config CPU_SH4A |
21 | bool | 26 | bool |
@@ -40,6 +45,16 @@ config CPU_SUBTYPE_SH7604 | |||
40 | bool "Support SH7604 processor" | 45 | bool "Support SH7604 processor" |
41 | select CPU_SH2 | 46 | select CPU_SH2 |
42 | 47 | ||
48 | config CPU_SUBTYPE_SH7619 | ||
49 | bool "Support SH7619 processor" | ||
50 | select CPU_SH2 | ||
51 | |||
52 | comment "SH-2A Processor Support" | ||
53 | |||
54 | config CPU_SUBTYPE_SH7206 | ||
55 | bool "Support SH7206 processor" | ||
56 | select CPU_SH2A | ||
57 | |||
43 | comment "SH-3 Processor Support" | 58 | comment "SH-3 Processor Support" |
44 | 59 | ||
45 | config CPU_SUBTYPE_SH7300 | 60 | config CPU_SUBTYPE_SH7300 |
@@ -89,6 +104,7 @@ comment "SH-4 Processor Support" | |||
89 | config CPU_SUBTYPE_SH7750 | 104 | config CPU_SUBTYPE_SH7750 |
90 | bool "Support SH7750 processor" | 105 | bool "Support SH7750 processor" |
91 | select CPU_SH4 | 106 | select CPU_SH4 |
107 | select CPU_HAS_IPR_IRQ | ||
92 | help | 108 | help |
93 | Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. | 109 | Select SH7750 if you have a 200 Mhz SH-4 HD6417750 CPU. |
94 | 110 | ||
@@ -104,15 +120,18 @@ config CPU_SUBTYPE_SH7750R | |||
104 | bool "Support SH7750R processor" | 120 | bool "Support SH7750R processor" |
105 | select CPU_SH4 | 121 | select CPU_SH4 |
106 | select CPU_SUBTYPE_SH7750 | 122 | select CPU_SUBTYPE_SH7750 |
123 | select CPU_HAS_IPR_IRQ | ||
107 | 124 | ||
108 | config CPU_SUBTYPE_SH7750S | 125 | config CPU_SUBTYPE_SH7750S |
109 | bool "Support SH7750S processor" | 126 | bool "Support SH7750S processor" |
110 | select CPU_SH4 | 127 | select CPU_SH4 |
111 | select CPU_SUBTYPE_SH7750 | 128 | select CPU_SUBTYPE_SH7750 |
129 | select CPU_HAS_IPR_IRQ | ||
112 | 130 | ||
113 | config CPU_SUBTYPE_SH7751 | 131 | config CPU_SUBTYPE_SH7751 |
114 | bool "Support SH7751 processor" | 132 | bool "Support SH7751 processor" |
115 | select CPU_SH4 | 133 | select CPU_SH4 |
134 | select CPU_HAS_IPR_IRQ | ||
116 | help | 135 | help |
117 | Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, | 136 | Select SH7751 if you have a 166 Mhz SH-4 HD6417751 CPU, |
118 | or if you have a HD6417751R CPU. | 137 | or if you have a HD6417751R CPU. |
@@ -121,6 +140,7 @@ config CPU_SUBTYPE_SH7751R | |||
121 | bool "Support SH7751R processor" | 140 | bool "Support SH7751R processor" |
122 | select CPU_SH4 | 141 | select CPU_SH4 |
123 | select CPU_SUBTYPE_SH7751 | 142 | select CPU_SUBTYPE_SH7751 |
143 | select CPU_HAS_IPR_IRQ | ||
124 | 144 | ||
125 | config CPU_SUBTYPE_SH7760 | 145 | config CPU_SUBTYPE_SH7760 |
126 | bool "Support SH7760 processor" | 146 | bool "Support SH7760 processor" |
@@ -157,6 +177,11 @@ config CPU_SUBTYPE_SH7780 | |||
157 | select CPU_SH4A | 177 | select CPU_SH4A |
158 | select CPU_HAS_INTC2_IRQ | 178 | select CPU_HAS_INTC2_IRQ |
159 | 179 | ||
180 | config CPU_SUBTYPE_SH7785 | ||
181 | bool "Support SH7785 processor" | ||
182 | select CPU_SH4A | ||
183 | select CPU_HAS_INTC2_IRQ | ||
184 | |||
160 | comment "SH4AL-DSP Processor Support" | 185 | comment "SH4AL-DSP Processor Support" |
161 | 186 | ||
162 | config CPU_SUBTYPE_SH73180 | 187 | config CPU_SUBTYPE_SH73180 |
@@ -216,13 +241,22 @@ config MEMORY_SIZE | |||
216 | 241 | ||
217 | config 32BIT | 242 | config 32BIT |
218 | bool "Support 32-bit physical addressing through PMB" | 243 | bool "Support 32-bit physical addressing through PMB" |
219 | depends on CPU_SH4A && MMU | 244 | depends on CPU_SH4A && MMU && (!X2TLB || BROKEN) |
220 | default y | 245 | default y |
221 | help | 246 | help |
222 | If you say Y here, physical addressing will be extended to | 247 | If you say Y here, physical addressing will be extended to |
223 | 32-bits through the SH-4A PMB. If this is not set, legacy | 248 | 32-bits through the SH-4A PMB. If this is not set, legacy |
224 | 29-bit physical addressing will be used. | 249 | 29-bit physical addressing will be used. |
225 | 250 | ||
251 | config X2TLB | ||
252 | bool "Enable extended TLB mode" | ||
253 | depends on CPU_SUBTYPE_SH7785 && MMU && EXPERIMENTAL | ||
254 | help | ||
255 | Selecting this option will enable the extended mode of the SH-X2 | ||
256 | TLB. For legacy SH-X behaviour and interoperability, say N. For | ||
257 | all of the fun new features and a willingless to submit bug reports, | ||
258 | say Y. | ||
259 | |||
226 | config VSYSCALL | 260 | config VSYSCALL |
227 | bool "Support vsyscall page" | 261 | bool "Support vsyscall page" |
228 | depends on MMU | 262 | depends on MMU |
@@ -237,16 +271,52 @@ config VSYSCALL | |||
237 | (the default value) say Y. | 271 | (the default value) say Y. |
238 | 272 | ||
239 | choice | 273 | choice |
274 | prompt "Kernel page size" | ||
275 | default PAGE_SIZE_4KB | ||
276 | |||
277 | config PAGE_SIZE_4KB | ||
278 | bool "4kB" | ||
279 | help | ||
280 | This is the default page size used by all SuperH CPUs. | ||
281 | |||
282 | config PAGE_SIZE_8KB | ||
283 | bool "8kB" | ||
284 | depends on EXPERIMENTAL && X2TLB | ||
285 | help | ||
286 | This enables 8kB pages as supported by SH-X2 and later MMUs. | ||
287 | |||
288 | config PAGE_SIZE_64KB | ||
289 | bool "64kB" | ||
290 | depends on EXPERIMENTAL && CPU_SH4 | ||
291 | help | ||
292 | This enables support for 64kB pages, possible on all SH-4 | ||
293 | CPUs and later. Highly experimental, not recommended. | ||
294 | |||
295 | endchoice | ||
296 | |||
297 | choice | ||
240 | prompt "HugeTLB page size" | 298 | prompt "HugeTLB page size" |
241 | depends on HUGETLB_PAGE && CPU_SH4 && MMU | 299 | depends on HUGETLB_PAGE && CPU_SH4 && MMU |
242 | default HUGETLB_PAGE_SIZE_64K | 300 | default HUGETLB_PAGE_SIZE_64K |
243 | 301 | ||
244 | config HUGETLB_PAGE_SIZE_64K | 302 | config HUGETLB_PAGE_SIZE_64K |
245 | bool "64K" | 303 | bool "64kB" |
304 | |||
305 | config HUGETLB_PAGE_SIZE_256K | ||
306 | bool "256kB" | ||
307 | depends on X2TLB | ||
246 | 308 | ||
247 | config HUGETLB_PAGE_SIZE_1MB | 309 | config HUGETLB_PAGE_SIZE_1MB |
248 | bool "1MB" | 310 | bool "1MB" |
249 | 311 | ||
312 | config HUGETLB_PAGE_SIZE_4MB | ||
313 | bool "4MB" | ||
314 | depends on X2TLB | ||
315 | |||
316 | config HUGETLB_PAGE_SIZE_64MB | ||
317 | bool "64MB" | ||
318 | depends on X2TLB | ||
319 | |||
250 | endchoice | 320 | endchoice |
251 | 321 | ||
252 | source "mm/Kconfig" | 322 | source "mm/Kconfig" |
@@ -274,7 +344,6 @@ config SH_DIRECT_MAPPED | |||
274 | 344 | ||
275 | config SH_WRITETHROUGH | 345 | config SH_WRITETHROUGH |
276 | bool "Use write-through caching" | 346 | bool "Use write-through caching" |
277 | default y if CPU_SH2 | ||
278 | help | 347 | help |
279 | Selecting this option will configure the caches in write-through | 348 | Selecting this option will configure the caches in write-through |
280 | mode, as opposed to the default write-back configuration. | 349 | mode, as opposed to the default write-back configuration. |
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index 2689cb24ea2b..6614033f6be9 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Released under the terms of the GNU GPL v2.0. | 6 | * Released under the terms of the GNU GPL v2.0. |
7 | */ | 7 | */ |
8 | |||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
10 | 11 | ||
@@ -14,37 +15,43 @@ | |||
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/io.h> | 16 | #include <asm/io.h> |
16 | 17 | ||
17 | /* | 18 | void __flush_wback_region(void *start, int size) |
18 | * Calculate the OC address and set the way bit on the SH-2. | ||
19 | * | ||
20 | * We must have already jump_to_P2()'ed prior to calling this | ||
21 | * function, since we rely on CCR manipulation to do the | ||
22 | * Right Thing(tm). | ||
23 | */ | ||
24 | unsigned long __get_oc_addr(unsigned long set, unsigned long way) | ||
25 | { | 19 | { |
26 | unsigned long ccr; | 20 | unsigned long v; |
27 | 21 | unsigned long begin, end; | |
28 | /* | 22 | |
29 | * On SH-2 the way bit isn't tracked in the address field | 23 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
30 | * if we're doing address array access .. instead, we need | 24 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
31 | * to manually switch out the way in the CCR. | 25 | & ~(L1_CACHE_BYTES-1); |
32 | */ | 26 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
33 | ccr = ctrl_inl(CCR); | 27 | /* FIXME cache purge */ |
34 | ccr &= ~0x00c0; | 28 | ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); |
35 | ccr |= way << cpu_data->dcache.way_shift; | 29 | } |
36 | 30 | } | |
37 | /* | 31 | |
38 | * Despite the number of sets being halved, we end up losing | 32 | void __flush_purge_region(void *start, int size) |
39 | * the first 2 ways to OCRAM instead of the last 2 (if we're | 33 | { |
40 | * 4-way). As a result, forcibly setting the W1 bit handily | 34 | unsigned long v; |
41 | * bumps us up 2 ways. | 35 | unsigned long begin, end; |
42 | */ | 36 | |
43 | if (ccr & CCR_CACHE_ORA) | 37 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); |
44 | ccr |= 1 << (cpu_data->dcache.way_shift + 1); | 38 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) |
45 | 39 | & ~(L1_CACHE_BYTES-1); | |
46 | ctrl_outl(ccr, CCR); | 40 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { |
47 | 41 | ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); | |
48 | return CACHE_OC_ADDRESS_ARRAY | (set << cpu_data->dcache.entry_shift); | 42 | } |
43 | } | ||
44 | |||
45 | void __flush_invalidate_region(void *start, int size) | ||
46 | { | ||
47 | unsigned long v; | ||
48 | unsigned long begin, end; | ||
49 | |||
50 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
51 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
52 | & ~(L1_CACHE_BYTES-1); | ||
53 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
54 | ctrl_outl((v & 0x1ffffc00), (v & 0x00000ff0) | 0x00000008); | ||
55 | } | ||
49 | } | 56 | } |
50 | 57 | ||
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index e48cc22724d9..ae531affccbd 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -11,12 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <asm/addrspace.h> | 14 | #include <linux/io.h> |
15 | #include <asm/pgtable.h> | 15 | #include <linux/mutex.h> |
16 | #include <asm/processor.h> | ||
17 | #include <asm/cache.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
21 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
22 | 18 | ||
@@ -83,9 +79,9 @@ static void __init emit_cache_params(void) | |||
83 | */ | 79 | */ |
84 | 80 | ||
85 | /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ | 81 | /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */ |
86 | #define MAX_P3_SEMAPHORES 16 | 82 | #define MAX_P3_MUTEXES 16 |
87 | 83 | ||
88 | struct semaphore p3map_sem[MAX_P3_SEMAPHORES]; | 84 | struct mutex p3map_mutex[MAX_P3_MUTEXES]; |
89 | 85 | ||
90 | void __init p3_cache_init(void) | 86 | void __init p3_cache_init(void) |
91 | { | 87 | { |
@@ -115,7 +111,7 @@ void __init p3_cache_init(void) | |||
115 | panic("%s failed.", __FUNCTION__); | 111 | panic("%s failed.", __FUNCTION__); |
116 | 112 | ||
117 | for (i = 0; i < cpu_data->dcache.n_aliases; i++) | 113 | for (i = 0; i < cpu_data->dcache.n_aliases; i++) |
118 | sema_init(&p3map_sem[i], 1); | 114 | mutex_init(&p3map_mutex[i]); |
119 | } | 115 | } |
120 | 116 | ||
121 | /* | 117 | /* |
@@ -229,7 +225,7 @@ static inline void flush_cache_4096(unsigned long start, | |||
229 | */ | 225 | */ |
230 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || | 226 | if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) || |
231 | (start < CACHE_OC_ADDRESS_ARRAY)) | 227 | (start < CACHE_OC_ADDRESS_ARRAY)) |
232 | exec_offset = 0x20000000; | 228 | exec_offset = 0x20000000; |
233 | 229 | ||
234 | local_irq_save(flags); | 230 | local_irq_save(flags); |
235 | __flush_cache_4096(start | SH_CACHE_ASSOC, | 231 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
@@ -250,7 +246,7 @@ void flush_dcache_page(struct page *page) | |||
250 | 246 | ||
251 | /* Loop all the D-cache */ | 247 | /* Loop all the D-cache */ |
252 | n = cpu_data->dcache.n_aliases; | 248 | n = cpu_data->dcache.n_aliases; |
253 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | 249 | for (i = 0; i < n; i++, addr += 4096) |
254 | flush_cache_4096(addr, phys); | 250 | flush_cache_4096(addr, phys); |
255 | } | 251 | } |
256 | 252 | ||
diff --git a/arch/sh/mm/clear_page.S b/arch/sh/mm/clear_page.S index 7b96425ae270..8a706131e521 100644 --- a/arch/sh/mm/clear_page.S +++ b/arch/sh/mm/clear_page.S | |||
@@ -1,12 +1,12 @@ | |||
1 | /* $Id: clear_page.S,v 1.13 2003/08/25 17:03:10 lethal Exp $ | 1 | /* |
2 | * | ||
3 | * __clear_user_page, __clear_user, clear_page implementation of SuperH | 2 | * __clear_user_page, __clear_user, clear_page implementation of SuperH |
4 | * | 3 | * |
5 | * Copyright (C) 2001 Kaz Kojima | 4 | * Copyright (C) 2001 Kaz Kojima |
6 | * Copyright (C) 2001, 2002 Niibe Yutaka | 5 | * Copyright (C) 2001, 2002 Niibe Yutaka |
7 | * | 6 | * Copyright (C) 2006 Paul Mundt |
8 | */ | 7 | */ |
9 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/page.h> | ||
10 | 10 | ||
11 | /* | 11 | /* |
12 | * clear_page_slow | 12 | * clear_page_slow |
@@ -18,11 +18,11 @@ | |||
18 | /* | 18 | /* |
19 | * r0 --- scratch | 19 | * r0 --- scratch |
20 | * r4 --- to | 20 | * r4 --- to |
21 | * r5 --- to + 4096 | 21 | * r5 --- to + PAGE_SIZE |
22 | */ | 22 | */ |
23 | ENTRY(clear_page_slow) | 23 | ENTRY(clear_page_slow) |
24 | mov r4,r5 | 24 | mov r4,r5 |
25 | mov.w .Llimit,r0 | 25 | mov.l .Llimit,r0 |
26 | add r0,r5 | 26 | add r0,r5 |
27 | mov #0,r0 | 27 | mov #0,r0 |
28 | ! | 28 | ! |
@@ -50,7 +50,7 @@ ENTRY(clear_page_slow) | |||
50 | ! | 50 | ! |
51 | rts | 51 | rts |
52 | nop | 52 | nop |
53 | .Llimit: .word (4096-28) | 53 | .Llimit: .long (PAGE_SIZE-28) |
54 | 54 | ||
55 | ENTRY(__clear_user) | 55 | ENTRY(__clear_user) |
56 | ! | 56 | ! |
@@ -164,10 +164,10 @@ ENTRY(__clear_user) | |||
164 | * r0 --- scratch | 164 | * r0 --- scratch |
165 | * r4 --- to | 165 | * r4 --- to |
166 | * r5 --- orig_to | 166 | * r5 --- orig_to |
167 | * r6 --- to + 4096 | 167 | * r6 --- to + PAGE_SIZE |
168 | */ | 168 | */ |
169 | ENTRY(__clear_user_page) | 169 | ENTRY(__clear_user_page) |
170 | mov.w .L4096,r0 | 170 | mov.l .Lpsz,r0 |
171 | mov r4,r6 | 171 | mov r4,r6 |
172 | add r0,r6 | 172 | add r0,r6 |
173 | mov #0,r0 | 173 | mov #0,r0 |
@@ -191,7 +191,7 @@ ENTRY(__clear_user_page) | |||
191 | ! | 191 | ! |
192 | rts | 192 | rts |
193 | nop | 193 | nop |
194 | .L4096: .word 4096 | 194 | .Lpsz: .long PAGE_SIZE |
195 | 195 | ||
196 | #endif | 196 | #endif |
197 | 197 | ||
diff --git a/arch/sh/mm/copy_page.S b/arch/sh/mm/copy_page.S index 1addffe117c3..397c94c97315 100644 --- a/arch/sh/mm/copy_page.S +++ b/arch/sh/mm/copy_page.S | |||
@@ -1,12 +1,12 @@ | |||
1 | /* $Id: copy_page.S,v 1.8 2003/08/25 17:03:10 lethal Exp $ | 1 | /* |
2 | * | ||
3 | * copy_page, __copy_user_page, __copy_user implementation of SuperH | 2 | * copy_page, __copy_user_page, __copy_user implementation of SuperH |
4 | * | 3 | * |
5 | * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima | 4 | * Copyright (C) 2001 Niibe Yutaka & Kaz Kojima |
6 | * Copyright (C) 2002 Toshinobu Sugioka | 5 | * Copyright (C) 2002 Toshinobu Sugioka |
7 | * | 6 | * Copyright (C) 2006 Paul Mundt |
8 | */ | 7 | */ |
9 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
9 | #include <asm/page.h> | ||
10 | 10 | ||
11 | /* | 11 | /* |
12 | * copy_page_slow | 12 | * copy_page_slow |
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | /* | 19 | /* |
20 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch | 20 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch |
21 | * r8 --- from + 4096 | 21 | * r8 --- from + PAGE_SIZE |
22 | * r9 --- not used | 22 | * r9 --- not used |
23 | * r10 --- to | 23 | * r10 --- to |
24 | * r11 --- from | 24 | * r11 --- from |
@@ -30,7 +30,7 @@ ENTRY(copy_page_slow) | |||
30 | mov r4,r10 | 30 | mov r4,r10 |
31 | mov r5,r11 | 31 | mov r5,r11 |
32 | mov r5,r8 | 32 | mov r5,r8 |
33 | mov.w .L4096,r0 | 33 | mov.l .Lpsz,r0 |
34 | add r0,r8 | 34 | add r0,r8 |
35 | ! | 35 | ! |
36 | 1: mov.l @r11+,r0 | 36 | 1: mov.l @r11+,r0 |
@@ -80,7 +80,7 @@ ENTRY(copy_page_slow) | |||
80 | 80 | ||
81 | /* | 81 | /* |
82 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch | 82 | * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch |
83 | * r8 --- from + 4096 | 83 | * r8 --- from + PAGE_SIZE |
84 | * r9 --- orig_to | 84 | * r9 --- orig_to |
85 | * r10 --- to | 85 | * r10 --- to |
86 | * r11 --- from | 86 | * r11 --- from |
@@ -94,7 +94,7 @@ ENTRY(__copy_user_page) | |||
94 | mov r5,r11 | 94 | mov r5,r11 |
95 | mov r6,r9 | 95 | mov r6,r9 |
96 | mov r5,r8 | 96 | mov r5,r8 |
97 | mov.w .L4096,r0 | 97 | mov.l .Lpsz,r0 |
98 | add r0,r8 | 98 | add r0,r8 |
99 | ! | 99 | ! |
100 | 1: ocbi @r9 | 100 | 1: ocbi @r9 |
@@ -129,7 +129,7 @@ ENTRY(__copy_user_page) | |||
129 | rts | 129 | rts |
130 | nop | 130 | nop |
131 | #endif | 131 | #endif |
132 | .L4096: .word 4096 | 132 | .Lpsz: .long PAGE_SIZE |
133 | /* | 133 | /* |
134 | * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | 134 | * __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); |
135 | * Return the number of bytes NOT copied | 135 | * Return the number of bytes NOT copied |
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 68663b8f99ae..716ebf568af2 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -26,13 +26,19 @@ extern void die(const char *,struct pt_regs *,long); | |||
26 | * and the problem, and then passes it off to one of the appropriate | 26 | * and the problem, and then passes it off to one of the appropriate |
27 | * routines. | 27 | * routines. |
28 | */ | 28 | */ |
29 | asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | 29 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
30 | unsigned long address) | 30 | unsigned long writeaccess, |
31 | unsigned long address) | ||
31 | { | 32 | { |
32 | struct task_struct *tsk; | 33 | struct task_struct *tsk; |
33 | struct mm_struct *mm; | 34 | struct mm_struct *mm; |
34 | struct vm_area_struct * vma; | 35 | struct vm_area_struct * vma; |
35 | unsigned long page; | 36 | unsigned long page; |
37 | int si_code; | ||
38 | siginfo_t info; | ||
39 | |||
40 | trace_hardirqs_on(); | ||
41 | local_irq_enable(); | ||
36 | 42 | ||
37 | #ifdef CONFIG_SH_KGDB | 43 | #ifdef CONFIG_SH_KGDB |
38 | if (kgdb_nofault && kgdb_bus_err_hook) | 44 | if (kgdb_nofault && kgdb_bus_err_hook) |
@@ -41,6 +47,46 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
41 | 47 | ||
42 | tsk = current; | 48 | tsk = current; |
43 | mm = tsk->mm; | 49 | mm = tsk->mm; |
50 | si_code = SEGV_MAPERR; | ||
51 | |||
52 | if (unlikely(address >= TASK_SIZE)) { | ||
53 | /* | ||
54 | * Synchronize this task's top level page-table | ||
55 | * with the 'reference' page table. | ||
56 | * | ||
57 | * Do _not_ use "tsk" here. We might be inside | ||
58 | * an interrupt in the middle of a task switch.. | ||
59 | */ | ||
60 | int offset = pgd_index(address); | ||
61 | pgd_t *pgd, *pgd_k; | ||
62 | pud_t *pud, *pud_k; | ||
63 | pmd_t *pmd, *pmd_k; | ||
64 | |||
65 | pgd = get_TTB() + offset; | ||
66 | pgd_k = swapper_pg_dir + offset; | ||
67 | |||
68 | /* This will never happen with the folded page table. */ | ||
69 | if (!pgd_present(*pgd)) { | ||
70 | if (!pgd_present(*pgd_k)) | ||
71 | goto bad_area_nosemaphore; | ||
72 | set_pgd(pgd, *pgd_k); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | pud = pud_offset(pgd, address); | ||
77 | pud_k = pud_offset(pgd_k, address); | ||
78 | if (pud_present(*pud) || !pud_present(*pud_k)) | ||
79 | goto bad_area_nosemaphore; | ||
80 | set_pud(pud, *pud_k); | ||
81 | |||
82 | pmd = pmd_offset(pud, address); | ||
83 | pmd_k = pmd_offset(pud_k, address); | ||
84 | if (pmd_present(*pmd) || !pmd_present(*pmd_k)) | ||
85 | goto bad_area_nosemaphore; | ||
86 | set_pmd(pmd, *pmd_k); | ||
87 | |||
88 | return; | ||
89 | } | ||
44 | 90 | ||
45 | /* | 91 | /* |
46 | * If we're in an interrupt or have no user | 92 | * If we're in an interrupt or have no user |
@@ -65,6 +111,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
65 | * we can handle it.. | 111 | * we can handle it.. |
66 | */ | 112 | */ |
67 | good_area: | 113 | good_area: |
114 | si_code = SEGV_ACCERR; | ||
68 | if (writeaccess) { | 115 | if (writeaccess) { |
69 | if (!(vma->vm_flags & VM_WRITE)) | 116 | if (!(vma->vm_flags & VM_WRITE)) |
70 | goto bad_area; | 117 | goto bad_area; |
@@ -104,10 +151,13 @@ survive: | |||
104 | bad_area: | 151 | bad_area: |
105 | up_read(&mm->mmap_sem); | 152 | up_read(&mm->mmap_sem); |
106 | 153 | ||
154 | bad_area_nosemaphore: | ||
107 | if (user_mode(regs)) { | 155 | if (user_mode(regs)) { |
108 | tsk->thread.address = address; | 156 | info.si_signo = SIGSEGV; |
109 | tsk->thread.error_code = writeaccess; | 157 | info.si_errno = 0; |
110 | force_sig(SIGSEGV, tsk); | 158 | info.si_code = si_code; |
159 | info.si_addr = (void *) address; | ||
160 | force_sig_info(SIGSEGV, &info, tsk); | ||
111 | return; | 161 | return; |
112 | } | 162 | } |
113 | 163 | ||
@@ -127,11 +177,9 @@ no_context: | |||
127 | printk(KERN_ALERT "Unable to handle kernel paging request"); | 177 | printk(KERN_ALERT "Unable to handle kernel paging request"); |
128 | printk(" at virtual address %08lx\n", address); | 178 | printk(" at virtual address %08lx\n", address); |
129 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); | 179 | printk(KERN_ALERT "pc = %08lx\n", regs->pc); |
130 | asm volatile("mov.l %1, %0" | 180 | page = (unsigned long)get_TTB(); |
131 | : "=r" (page) | ||
132 | : "m" (__m(MMU_TTB))); | ||
133 | if (page) { | 181 | if (page) { |
134 | page = ((unsigned long *) page)[address >> 22]; | 182 | page = ((unsigned long *) page)[address >> PGDIR_SHIFT]; |
135 | printk(KERN_ALERT "*pde = %08lx\n", page); | 183 | printk(KERN_ALERT "*pde = %08lx\n", page); |
136 | if (page & _PAGE_PRESENT) { | 184 | if (page & _PAGE_PRESENT) { |
137 | page &= PAGE_MASK; | 185 | page &= PAGE_MASK; |
@@ -166,98 +214,13 @@ do_sigbus: | |||
166 | * Send a sigbus, regardless of whether we were in kernel | 214 | * Send a sigbus, regardless of whether we were in kernel |
167 | * or user mode. | 215 | * or user mode. |
168 | */ | 216 | */ |
169 | tsk->thread.address = address; | 217 | info.si_signo = SIGBUS; |
170 | tsk->thread.error_code = writeaccess; | 218 | info.si_errno = 0; |
171 | tsk->thread.trap_no = 14; | 219 | info.si_code = BUS_ADRERR; |
172 | force_sig(SIGBUS, tsk); | 220 | info.si_addr = (void *)address; |
221 | force_sig_info(SIGBUS, &info, tsk); | ||
173 | 222 | ||
174 | /* Kernel mode? Handle exceptions or die */ | 223 | /* Kernel mode? Handle exceptions or die */ |
175 | if (!user_mode(regs)) | 224 | if (!user_mode(regs)) |
176 | goto no_context; | 225 | goto no_context; |
177 | } | 226 | } |
178 | |||
179 | #ifdef CONFIG_SH_STORE_QUEUES | ||
180 | /* | ||
181 | * This is a special case for the SH-4 store queues, as pages for this | ||
182 | * space still need to be faulted in before it's possible to flush the | ||
183 | * store queue cache for writeout to the remapped region. | ||
184 | */ | ||
185 | #define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000) | ||
186 | #else | ||
187 | #define P3_ADDR_MAX P4SEG | ||
188 | #endif | ||
189 | |||
190 | /* | ||
191 | * Called with interrupts disabled. | ||
192 | */ | ||
193 | asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs, | ||
194 | unsigned long writeaccess, | ||
195 | unsigned long address) | ||
196 | { | ||
197 | pgd_t *pgd; | ||
198 | pud_t *pud; | ||
199 | pmd_t *pmd; | ||
200 | pte_t *pte; | ||
201 | pte_t entry; | ||
202 | struct mm_struct *mm = current->mm; | ||
203 | spinlock_t *ptl; | ||
204 | int ret = 1; | ||
205 | |||
206 | #ifdef CONFIG_SH_KGDB | ||
207 | if (kgdb_nofault && kgdb_bus_err_hook) | ||
208 | kgdb_bus_err_hook(); | ||
209 | #endif | ||
210 | |||
211 | /* | ||
212 | * We don't take page faults for P1, P2, and parts of P4, these | ||
213 | * are always mapped, whether it be due to legacy behaviour in | ||
214 | * 29-bit mode, or due to PMB configuration in 32-bit mode. | ||
215 | */ | ||
216 | if (address >= P3SEG && address < P3_ADDR_MAX) { | ||
217 | pgd = pgd_offset_k(address); | ||
218 | mm = NULL; | ||
219 | } else { | ||
220 | if (unlikely(address >= TASK_SIZE || !mm)) | ||
221 | return 1; | ||
222 | |||
223 | pgd = pgd_offset(mm, address); | ||
224 | } | ||
225 | |||
226 | pud = pud_offset(pgd, address); | ||
227 | if (pud_none_or_clear_bad(pud)) | ||
228 | return 1; | ||
229 | pmd = pmd_offset(pud, address); | ||
230 | if (pmd_none_or_clear_bad(pmd)) | ||
231 | return 1; | ||
232 | |||
233 | if (mm) | ||
234 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | ||
235 | else | ||
236 | pte = pte_offset_kernel(pmd, address); | ||
237 | |||
238 | entry = *pte; | ||
239 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | ||
240 | goto unlock; | ||
241 | if (unlikely(writeaccess && !pte_write(entry))) | ||
242 | goto unlock; | ||
243 | |||
244 | if (writeaccess) | ||
245 | entry = pte_mkdirty(entry); | ||
246 | entry = pte_mkyoung(entry); | ||
247 | |||
248 | #ifdef CONFIG_CPU_SH4 | ||
249 | /* | ||
250 | * ITLB is not affected by "ldtlb" instruction. | ||
251 | * So, we need to flush the entry by ourselves. | ||
252 | */ | ||
253 | __flush_tlb_page(get_asid(), address & PAGE_MASK); | ||
254 | #endif | ||
255 | |||
256 | set_pte(pte, entry); | ||
257 | update_mmu_cache(NULL, address, entry); | ||
258 | ret = 0; | ||
259 | unlock: | ||
260 | if (mm) | ||
261 | pte_unmap_unlock(pte, ptl); | ||
262 | return ret; | ||
263 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 7154d1ce9785..59f4cc18235b 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -84,30 +84,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
84 | pmd_t *pmd; | 84 | pmd_t *pmd; |
85 | pte_t *pte; | 85 | pte_t *pte; |
86 | 86 | ||
87 | pgd = swapper_pg_dir + pgd_index(addr); | 87 | pgd = pgd_offset_k(addr); |
88 | if (pgd_none(*pgd)) { | 88 | if (pgd_none(*pgd)) { |
89 | pgd_ERROR(*pgd); | 89 | pgd_ERROR(*pgd); |
90 | return; | 90 | return; |
91 | } | 91 | } |
92 | 92 | ||
93 | pud = pud_offset(pgd, addr); | 93 | pud = pud_alloc(NULL, pgd, addr); |
94 | if (pud_none(*pud)) { | 94 | if (unlikely(!pud)) { |
95 | pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); | 95 | pud_ERROR(*pud); |
96 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); | 96 | return; |
97 | if (pmd != pmd_offset(pud, 0)) { | ||
98 | pud_ERROR(*pud); | ||
99 | return; | ||
100 | } | ||
101 | } | 97 | } |
102 | 98 | ||
103 | pmd = pmd_offset(pud, addr); | 99 | pmd = pmd_alloc(NULL, pud, addr); |
104 | if (pmd_none(*pmd)) { | 100 | if (unlikely(!pmd)) { |
105 | pte = (pte_t *)get_zeroed_page(GFP_ATOMIC); | 101 | pmd_ERROR(*pmd); |
106 | set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER)); | 102 | return; |
107 | if (pte != pte_offset_kernel(pmd, 0)) { | ||
108 | pmd_ERROR(*pmd); | ||
109 | return; | ||
110 | } | ||
111 | } | 103 | } |
112 | 104 | ||
113 | pte = pte_offset_kernel(pmd, addr); | 105 | pte = pte_offset_kernel(pmd, addr); |
@@ -155,9 +147,6 @@ extern char __init_begin, __init_end; | |||
155 | 147 | ||
156 | /* | 148 | /* |
157 | * paging_init() sets up the page tables | 149 | * paging_init() sets up the page tables |
158 | * | ||
159 | * This routines also unmaps the page at virtual kernel address 0, so | ||
160 | * that we can trap those pesky NULL-reference errors in the kernel. | ||
161 | */ | 150 | */ |
162 | void __init paging_init(void) | 151 | void __init paging_init(void) |
163 | { | 152 | { |
@@ -180,14 +169,11 @@ void __init paging_init(void) | |||
180 | */ | 169 | */ |
181 | { | 170 | { |
182 | unsigned long max_dma, low, start_pfn; | 171 | unsigned long max_dma, low, start_pfn; |
183 | pgd_t *pg_dir; | ||
184 | int i; | ||
185 | |||
186 | /* We don't need kernel mapping as hardware support that. */ | ||
187 | pg_dir = swapper_pg_dir; | ||
188 | 172 | ||
189 | for (i = 0; i < PTRS_PER_PGD; i++) | 173 | /* We don't need to map the kernel through the TLB, as |
190 | pgd_val(pg_dir[i]) = 0; | 174 | * it is permanatly mapped using P1. So clear the |
175 | * entire pgd. */ | ||
176 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | ||
191 | 177 | ||
192 | /* Turn on the MMU */ | 178 | /* Turn on the MMU */ |
193 | enable_mmu(); | 179 | enable_mmu(); |
@@ -206,6 +192,10 @@ void __init paging_init(void) | |||
206 | } | 192 | } |
207 | } | 193 | } |
208 | 194 | ||
195 | /* Set an initial value for the MMU.TTB so we don't have to | ||
196 | * check for a null value. */ | ||
197 | set_TTB(swapper_pg_dir); | ||
198 | |||
209 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) | 199 | #elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) |
210 | /* | 200 | /* |
211 | * If we don't have CONFIG_MMU set and the processor in question | 201 | * If we don't have CONFIG_MMU set and the processor in question |
@@ -227,7 +217,6 @@ static struct kcore_list kcore_mem, kcore_vmalloc; | |||
227 | 217 | ||
228 | void __init mem_init(void) | 218 | void __init mem_init(void) |
229 | { | 219 | { |
230 | extern unsigned long empty_zero_page[1024]; | ||
231 | int codesize, reservedpages, datasize, initsize; | 220 | int codesize, reservedpages, datasize, initsize; |
232 | int tmp; | 221 | int tmp; |
233 | extern unsigned long memory_start; | 222 | extern unsigned long memory_start; |
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a9fe80cfc233..11d54c149821 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c | |||
@@ -28,9 +28,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address, | |||
28 | { | 28 | { |
29 | unsigned long end; | 29 | unsigned long end; |
30 | unsigned long pfn; | 30 | unsigned long pfn; |
31 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | | 31 | pgprot_t pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); |
32 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
33 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | flags); | ||
34 | 32 | ||
35 | address &= ~PMD_MASK; | 33 | address &= ~PMD_MASK; |
36 | end = address + size; | 34 | end = address + size; |
diff --git a/arch/sh/mm/pg-dma.c b/arch/sh/mm/pg-dma.c index 1406d2e348ca..bb23679369d6 100644 --- a/arch/sh/mm/pg-dma.c +++ b/arch/sh/mm/pg-dma.c | |||
@@ -39,8 +39,6 @@ static void copy_page_dma(void *to, void *from) | |||
39 | 39 | ||
40 | static void clear_page_dma(void *to) | 40 | static void clear_page_dma(void *to) |
41 | { | 41 | { |
42 | extern unsigned long empty_zero_page[1024]; | ||
43 | |||
44 | /* | 42 | /* |
45 | * We get invoked quite early on, if the DMAC hasn't been initialized | 43 | * We get invoked quite early on, if the DMAC hasn't been initialized |
46 | * yet, fall back on the slow manual implementation. | 44 | * yet, fall back on the slow manual implementation. |
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c index 07371ed7a313..3f98d2a4f936 100644 --- a/arch/sh/mm/pg-sh4.c +++ b/arch/sh/mm/pg-sh4.c | |||
@@ -6,22 +6,12 @@ | |||
6 | * | 6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | 7 | * Released under the terms of the GNU GPL v2.0. |
8 | */ | 8 | */ |
9 | #include <linux/init.h> | ||
10 | #include <linux/mman.h> | ||
11 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
12 | #include <linux/threads.h> | 10 | #include <linux/mutex.h> |
13 | #include <asm/addrspace.h> | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgtable.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/cache.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | #include <asm/pgalloc.h> | ||
21 | #include <asm/mmu_context.h> | 11 | #include <asm/mmu_context.h> |
22 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
23 | 13 | ||
24 | extern struct semaphore p3map_sem[]; | 14 | extern struct mutex p3map_mutex[]; |
25 | 15 | ||
26 | #define CACHE_ALIAS (cpu_data->dcache.alias_mask) | 16 | #define CACHE_ALIAS (cpu_data->dcache.alias_mask) |
27 | 17 | ||
@@ -37,10 +27,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
37 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 27 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
38 | clear_page(to); | 28 | clear_page(to); |
39 | else { | 29 | else { |
40 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
41 | _PAGE_RW | _PAGE_CACHABLE | | ||
42 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
43 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
44 | unsigned long phys_addr = PHYSADDR(to); | 30 | unsigned long phys_addr = PHYSADDR(to); |
45 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 31 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
46 | pgd_t *pgd = pgd_offset_k(p3_addr); | 32 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -50,8 +36,8 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
50 | pte_t entry; | 36 | pte_t entry; |
51 | unsigned long flags; | 37 | unsigned long flags; |
52 | 38 | ||
53 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 39 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
54 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 40 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
55 | set_pte(pte, entry); | 41 | set_pte(pte, entry); |
56 | local_irq_save(flags); | 42 | local_irq_save(flags); |
57 | __flush_tlb_page(get_asid(), p3_addr); | 43 | __flush_tlb_page(get_asid(), p3_addr); |
@@ -59,7 +45,7 @@ void clear_user_page(void *to, unsigned long address, struct page *page) | |||
59 | update_mmu_cache(NULL, p3_addr, entry); | 45 | update_mmu_cache(NULL, p3_addr, entry); |
60 | __clear_user_page((void *)p3_addr, to); | 46 | __clear_user_page((void *)p3_addr, to); |
61 | pte_clear(&init_mm, p3_addr, pte); | 47 | pte_clear(&init_mm, p3_addr, pte); |
62 | up(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 48 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
63 | } | 49 | } |
64 | } | 50 | } |
65 | 51 | ||
@@ -77,10 +63,6 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
77 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) | 63 | if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) |
78 | copy_page(to, from); | 64 | copy_page(to, from); |
79 | else { | 65 | else { |
80 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | | ||
81 | _PAGE_RW | _PAGE_CACHABLE | | ||
82 | _PAGE_DIRTY | _PAGE_ACCESSED | | ||
83 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); | ||
84 | unsigned long phys_addr = PHYSADDR(to); | 66 | unsigned long phys_addr = PHYSADDR(to); |
85 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); | 67 | unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); |
86 | pgd_t *pgd = pgd_offset_k(p3_addr); | 68 | pgd_t *pgd = pgd_offset_k(p3_addr); |
@@ -90,8 +72,8 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
90 | pte_t entry; | 72 | pte_t entry; |
91 | unsigned long flags; | 73 | unsigned long flags; |
92 | 74 | ||
93 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); | 75 | entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); |
94 | down(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 76 | mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
95 | set_pte(pte, entry); | 77 | set_pte(pte, entry); |
96 | local_irq_save(flags); | 78 | local_irq_save(flags); |
97 | __flush_tlb_page(get_asid(), p3_addr); | 79 | __flush_tlb_page(get_asid(), p3_addr); |
@@ -99,7 +81,7 @@ void copy_user_page(void *to, void *from, unsigned long address, | |||
99 | update_mmu_cache(NULL, p3_addr, entry); | 81 | update_mmu_cache(NULL, p3_addr, entry); |
100 | __copy_user_page((void *)p3_addr, from, to); | 82 | __copy_user_page((void *)p3_addr, from, to); |
101 | pte_clear(&init_mm, p3_addr, pte); | 83 | pte_clear(&init_mm, p3_addr, pte); |
102 | up(&p3map_sem[(address & CACHE_ALIAS)>>12]); | 84 | mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); |
103 | } | 85 | } |
104 | } | 86 | } |
105 | 87 | ||
@@ -122,4 +104,3 @@ inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t | |||
122 | } | 104 | } |
123 | return pte; | 105 | return pte; |
124 | } | 106 | } |
125 | |||
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types index ac57638977ee..0571755e9a84 100644 --- a/arch/sh/tools/mach-types +++ b/arch/sh/tools/mach-types | |||
@@ -30,3 +30,5 @@ R7780MP SH_R7780MP | |||
30 | TITAN SH_TITAN | 30 | TITAN SH_TITAN |
31 | SHMIN SH_SHMIN | 31 | SHMIN SH_SHMIN |
32 | 7710VOIPGW SH_7710VOIPGW | 32 | 7710VOIPGW SH_7710VOIPGW |
33 | 7206SE SH_7206_SOLUTION_ENGINE | ||
34 | 7619SE SH_7619_SOLUTION_ENGINE | ||
diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c index 3576b3cc505e..7d4190e55654 100644 --- a/arch/um/drivers/chan_kern.c +++ b/arch/um/drivers/chan_kern.c | |||
@@ -638,7 +638,7 @@ int chan_out_fd(struct list_head *chans) | |||
638 | return -1; | 638 | return -1; |
639 | } | 639 | } |
640 | 640 | ||
641 | void chan_interrupt(struct list_head *chans, struct work_struct *task, | 641 | void chan_interrupt(struct list_head *chans, struct delayed_work *task, |
642 | struct tty_struct *tty, int irq) | 642 | struct tty_struct *tty, int irq) |
643 | { | 643 | { |
644 | struct list_head *ele, *next; | 644 | struct list_head *ele, *next; |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index 7b172160fe04..96f0189327af 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -56,7 +56,7 @@ static struct notifier_block reboot_notifier = { | |||
56 | 56 | ||
57 | static LIST_HEAD(mc_requests); | 57 | static LIST_HEAD(mc_requests); |
58 | 58 | ||
59 | static void mc_work_proc(void *unused) | 59 | static void mc_work_proc(struct work_struct *unused) |
60 | { | 60 | { |
61 | struct mconsole_entry *req; | 61 | struct mconsole_entry *req; |
62 | unsigned long flags; | 62 | unsigned long flags; |
@@ -72,7 +72,7 @@ static void mc_work_proc(void *unused) | |||
72 | } | 72 | } |
73 | } | 73 | } |
74 | 74 | ||
75 | static DECLARE_WORK(mconsole_work, mc_work_proc, NULL); | 75 | static DECLARE_WORK(mconsole_work, mc_work_proc); |
76 | 76 | ||
77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) | 77 | static irqreturn_t mconsole_interrupt(int irq, void *dev_id) |
78 | { | 78 | { |
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index ec9eb8bd9432..286bc0b3207f 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -99,6 +99,7 @@ irqreturn_t uml_net_interrupt(int irq, void *dev_id) | |||
99 | * same device, since it tests for (dev->flags & IFF_UP). So | 99 | * same device, since it tests for (dev->flags & IFF_UP). So |
100 | * there's no harm in delaying the device shutdown. */ | 100 | * there's no harm in delaying the device shutdown. */ |
101 | schedule_work(&close_work); | 101 | schedule_work(&close_work); |
102 | #error this is not permitted - close_work will go out of scope | ||
102 | goto out; | 103 | goto out; |
103 | } | 104 | } |
104 | reactivate_fd(lp->fd, UM_ETH_IRQ); | 105 | reactivate_fd(lp->fd, UM_ETH_IRQ); |
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c index ce9f3733f73e..6dfe632f1c14 100644 --- a/arch/um/drivers/port_kern.c +++ b/arch/um/drivers/port_kern.c | |||
@@ -132,7 +132,7 @@ static int port_accept(struct port_list *port) | |||
132 | DECLARE_MUTEX(ports_sem); | 132 | DECLARE_MUTEX(ports_sem); |
133 | struct list_head ports = LIST_HEAD_INIT(ports); | 133 | struct list_head ports = LIST_HEAD_INIT(ports); |
134 | 134 | ||
135 | void port_work_proc(void *unused) | 135 | void port_work_proc(struct work_struct *unused) |
136 | { | 136 | { |
137 | struct port_list *port; | 137 | struct port_list *port; |
138 | struct list_head *ele; | 138 | struct list_head *ele; |
@@ -150,7 +150,7 @@ void port_work_proc(void *unused) | |||
150 | local_irq_restore(flags); | 150 | local_irq_restore(flags); |
151 | } | 151 | } |
152 | 152 | ||
153 | DECLARE_WORK(port_work, port_work_proc, NULL); | 153 | DECLARE_WORK(port_work, port_work_proc); |
154 | 154 | ||
155 | static irqreturn_t port_interrupt(int irq, void *data) | 155 | static irqreturn_t port_interrupt(int irq, void *data) |
156 | { | 156 | { |
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c index bbea88801d88..c7587fc39015 100644 --- a/arch/x86_64/kernel/mce.c +++ b/arch/x86_64/kernel/mce.c | |||
@@ -306,8 +306,8 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
306 | */ | 306 | */ |
307 | 307 | ||
308 | static int check_interval = 5 * 60; /* 5 minutes */ | 308 | static int check_interval = 5 * 60; /* 5 minutes */ |
309 | static void mcheck_timer(void *data); | 309 | static void mcheck_timer(struct work_struct *work); |
310 | static DECLARE_WORK(mcheck_work, mcheck_timer, NULL); | 310 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); |
311 | 311 | ||
312 | static void mcheck_check_cpu(void *info) | 312 | static void mcheck_check_cpu(void *info) |
313 | { | 313 | { |
@@ -315,7 +315,7 @@ static void mcheck_check_cpu(void *info) | |||
315 | do_machine_check(NULL, 0); | 315 | do_machine_check(NULL, 0); |
316 | } | 316 | } |
317 | 317 | ||
318 | static void mcheck_timer(void *data) | 318 | static void mcheck_timer(struct work_struct *work) |
319 | { | 319 | { |
320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 320 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); |
321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); | 321 | schedule_delayed_work(&mcheck_work, check_interval * HZ); |
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 62c2e747af58..9800147c4c68 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c | |||
@@ -753,14 +753,16 @@ static int __cpuinit wakeup_secondary_via_INIT(int phys_apicid, unsigned int sta | |||
753 | } | 753 | } |
754 | 754 | ||
755 | struct create_idle { | 755 | struct create_idle { |
756 | struct work_struct work; | ||
756 | struct task_struct *idle; | 757 | struct task_struct *idle; |
757 | struct completion done; | 758 | struct completion done; |
758 | int cpu; | 759 | int cpu; |
759 | }; | 760 | }; |
760 | 761 | ||
761 | void do_fork_idle(void *_c_idle) | 762 | void do_fork_idle(struct work_struct *work) |
762 | { | 763 | { |
763 | struct create_idle *c_idle = _c_idle; | 764 | struct create_idle *c_idle = |
765 | container_of(work, struct create_idle, work); | ||
764 | 766 | ||
765 | c_idle->idle = fork_idle(c_idle->cpu); | 767 | c_idle->idle = fork_idle(c_idle->cpu); |
766 | complete(&c_idle->done); | 768 | complete(&c_idle->done); |
@@ -775,10 +777,10 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
775 | int timeout; | 777 | int timeout; |
776 | unsigned long start_rip; | 778 | unsigned long start_rip; |
777 | struct create_idle c_idle = { | 779 | struct create_idle c_idle = { |
780 | .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle), | ||
778 | .cpu = cpu, | 781 | .cpu = cpu, |
779 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 782 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
780 | }; | 783 | }; |
781 | DECLARE_WORK(work, do_fork_idle, &c_idle); | ||
782 | 784 | ||
783 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | 785 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ |
784 | if (!cpu_gdt_descr[cpu].address && | 786 | if (!cpu_gdt_descr[cpu].address && |
@@ -825,9 +827,9 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid) | |||
825 | * thread. | 827 | * thread. |
826 | */ | 828 | */ |
827 | if (!keventd_up() || current_is_keventd()) | 829 | if (!keventd_up() || current_is_keventd()) |
828 | work.func(work.data); | 830 | c_idle.work.func(&c_idle.work); |
829 | else { | 831 | else { |
830 | schedule_work(&work); | 832 | schedule_work(&c_idle.work); |
831 | wait_for_completion(&c_idle.done); | 833 | wait_for_completion(&c_idle.done); |
832 | } | 834 | } |
833 | 835 | ||
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index e3ef544d2cfb..9f05bc9b2dad 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c | |||
@@ -563,7 +563,7 @@ static unsigned int cpufreq_delayed_issched = 0; | |||
563 | static unsigned int cpufreq_init = 0; | 563 | static unsigned int cpufreq_init = 0; |
564 | static struct work_struct cpufreq_delayed_get_work; | 564 | static struct work_struct cpufreq_delayed_get_work; |
565 | 565 | ||
566 | static void handle_cpufreq_delayed_get(void *v) | 566 | static void handle_cpufreq_delayed_get(struct work_struct *v) |
567 | { | 567 | { |
568 | unsigned int cpu; | 568 | unsigned int cpu; |
569 | for_each_online_cpu(cpu) { | 569 | for_each_online_cpu(cpu) { |
@@ -639,7 +639,7 @@ static struct notifier_block time_cpufreq_notifier_block = { | |||
639 | 639 | ||
640 | static int __init cpufreq_tsc(void) | 640 | static int __init cpufreq_tsc(void) |
641 | { | 641 | { |
642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL); | 642 | INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get); |
643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, | 643 | if (!cpufreq_register_notifier(&time_cpufreq_notifier_block, |
644 | CPUFREQ_TRANSITION_NOTIFIER)) | 644 | CPUFREQ_TRANSITION_NOTIFIER)) |
645 | cpufreq_init = 1; | 645 | cpufreq_init = 1; |
diff --git a/block/as-iosched.c b/block/as-iosched.c index 00242111a457..5934c4bfd52a 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1274,9 +1274,10 @@ static void as_merged_requests(request_queue_t *q, struct request *req, | |||
1274 | * | 1274 | * |
1275 | * FIXME! dispatch queue is not a queue at all! | 1275 | * FIXME! dispatch queue is not a queue at all! |
1276 | */ | 1276 | */ |
1277 | static void as_work_handler(void *data) | 1277 | static void as_work_handler(struct work_struct *work) |
1278 | { | 1278 | { |
1279 | struct request_queue *q = data; | 1279 | struct as_data *ad = container_of(work, struct as_data, antic_work); |
1280 | struct request_queue *q = ad->q; | ||
1280 | unsigned long flags; | 1281 | unsigned long flags; |
1281 | 1282 | ||
1282 | spin_lock_irqsave(q->queue_lock, flags); | 1283 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -1332,7 +1333,7 @@ static void *as_init_queue(request_queue_t *q) | |||
1332 | ad->antic_timer.function = as_antic_timeout; | 1333 | ad->antic_timer.function = as_antic_timeout; |
1333 | ad->antic_timer.data = (unsigned long)q; | 1334 | ad->antic_timer.data = (unsigned long)q; |
1334 | init_timer(&ad->antic_timer); | 1335 | init_timer(&ad->antic_timer); |
1335 | INIT_WORK(&ad->antic_work, as_work_handler, q); | 1336 | INIT_WORK(&ad->antic_work, as_work_handler); |
1336 | 1337 | ||
1337 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); | 1338 | INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]); |
1338 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); | 1339 | INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e9019ed39b73..84e9be073180 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1840,9 +1840,11 @@ queue_fail: | |||
1840 | return 1; | 1840 | return 1; |
1841 | } | 1841 | } |
1842 | 1842 | ||
1843 | static void cfq_kick_queue(void *data) | 1843 | static void cfq_kick_queue(struct work_struct *work) |
1844 | { | 1844 | { |
1845 | request_queue_t *q = data; | 1845 | struct cfq_data *cfqd = |
1846 | container_of(work, struct cfq_data, unplug_work); | ||
1847 | request_queue_t *q = cfqd->queue; | ||
1846 | unsigned long flags; | 1848 | unsigned long flags; |
1847 | 1849 | ||
1848 | spin_lock_irqsave(q->queue_lock, flags); | 1850 | spin_lock_irqsave(q->queue_lock, flags); |
@@ -1986,7 +1988,7 @@ static void *cfq_init_queue(request_queue_t *q) | |||
1986 | cfqd->idle_class_timer.function = cfq_idle_class_timer; | 1988 | cfqd->idle_class_timer.function = cfq_idle_class_timer; |
1987 | cfqd->idle_class_timer.data = (unsigned long) cfqd; | 1989 | cfqd->idle_class_timer.data = (unsigned long) cfqd; |
1988 | 1990 | ||
1989 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | 1991 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
1990 | 1992 | ||
1991 | cfqd->cfq_quantum = cfq_quantum; | 1993 | cfqd->cfq_quantum = cfq_quantum; |
1992 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 1994 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0f82e12f7b67..cc6e95f8e5d9 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | #include <scsi/scsi_cmnd.h> | 35 | #include <scsi/scsi_cmnd.h> |
36 | 36 | ||
37 | static void blk_unplug_work(void *data); | 37 | static void blk_unplug_work(struct work_struct *work); |
38 | static void blk_unplug_timeout(unsigned long data); | 38 | static void blk_unplug_timeout(unsigned long data); |
39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); | 39 | static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); |
40 | static void init_request_from_bio(struct request *req, struct bio *bio); | 40 | static void init_request_from_bio(struct request *req, struct bio *bio); |
@@ -227,7 +227,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
227 | if (q->unplug_delay == 0) | 227 | if (q->unplug_delay == 0) |
228 | q->unplug_delay = 1; | 228 | q->unplug_delay = 1; |
229 | 229 | ||
230 | INIT_WORK(&q->unplug_work, blk_unplug_work, q); | 230 | INIT_WORK(&q->unplug_work, blk_unplug_work); |
231 | 231 | ||
232 | q->unplug_timer.function = blk_unplug_timeout; | 232 | q->unplug_timer.function = blk_unplug_timeout; |
233 | q->unplug_timer.data = (unsigned long)q; | 233 | q->unplug_timer.data = (unsigned long)q; |
@@ -1631,9 +1631,9 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1631 | } | 1631 | } |
1632 | } | 1632 | } |
1633 | 1633 | ||
1634 | static void blk_unplug_work(void *data) | 1634 | static void blk_unplug_work(struct work_struct *work) |
1635 | { | 1635 | { |
1636 | request_queue_t *q = data; | 1636 | request_queue_t *q = container_of(work, request_queue_t, unplug_work); |
1637 | 1637 | ||
1638 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | 1638 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, |
1639 | q->rq.count[READ] + q->rq.count[WRITE]); | 1639 | q->rq.count[READ] + q->rq.count[WRITE]); |
diff --git a/crypto/cryptomgr.c b/crypto/cryptomgr.c index 9b5b15601068..2ebffb84f1d9 100644 --- a/crypto/cryptomgr.c +++ b/crypto/cryptomgr.c | |||
@@ -40,9 +40,10 @@ struct cryptomgr_param { | |||
40 | char template[CRYPTO_MAX_ALG_NAME]; | 40 | char template[CRYPTO_MAX_ALG_NAME]; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static void cryptomgr_probe(void *data) | 43 | static void cryptomgr_probe(struct work_struct *work) |
44 | { | 44 | { |
45 | struct cryptomgr_param *param = data; | 45 | struct cryptomgr_param *param = |
46 | container_of(work, struct cryptomgr_param, work); | ||
46 | struct crypto_template *tmpl; | 47 | struct crypto_template *tmpl; |
47 | struct crypto_instance *inst; | 48 | struct crypto_instance *inst; |
48 | int err; | 49 | int err; |
@@ -112,7 +113,7 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval) | |||
112 | param->larval.type = larval->alg.cra_flags; | 113 | param->larval.type = larval->alg.cra_flags; |
113 | param->larval.mask = larval->mask; | 114 | param->larval.mask = larval->mask; |
114 | 115 | ||
115 | INIT_WORK(¶m->work, cryptomgr_probe, param); | 116 | INIT_WORK(¶m->work, cryptomgr_probe); |
116 | schedule_work(¶m->work); | 117 | schedule_work(¶m->work); |
117 | 118 | ||
118 | return NOTIFY_STOP; | 119 | return NOTIFY_STOP; |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 068fe4f100b0..02b30ae6a68e 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -50,6 +50,7 @@ ACPI_MODULE_NAME("osl") | |||
50 | struct acpi_os_dpc { | 50 | struct acpi_os_dpc { |
51 | acpi_osd_exec_callback function; | 51 | acpi_osd_exec_callback function; |
52 | void *context; | 52 | void *context; |
53 | struct work_struct work; | ||
53 | }; | 54 | }; |
54 | 55 | ||
55 | #ifdef CONFIG_ACPI_CUSTOM_DSDT | 56 | #ifdef CONFIG_ACPI_CUSTOM_DSDT |
@@ -564,12 +565,9 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */ | |||
564 | acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); | 565 | acpi_os_derive_pci_id_2(rhandle, chandle, id, &is_bridge, &bus_number); |
565 | } | 566 | } |
566 | 567 | ||
567 | static void acpi_os_execute_deferred(void *context) | 568 | static void acpi_os_execute_deferred(struct work_struct *work) |
568 | { | 569 | { |
569 | struct acpi_os_dpc *dpc = NULL; | 570 | struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); |
570 | |||
571 | |||
572 | dpc = (struct acpi_os_dpc *)context; | ||
573 | if (!dpc) { | 571 | if (!dpc) { |
574 | printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); | 572 | printk(KERN_ERR PREFIX "Invalid (NULL) context\n"); |
575 | return; | 573 | return; |
@@ -602,7 +600,6 @@ acpi_status acpi_os_execute(acpi_execute_type type, | |||
602 | { | 600 | { |
603 | acpi_status status = AE_OK; | 601 | acpi_status status = AE_OK; |
604 | struct acpi_os_dpc *dpc; | 602 | struct acpi_os_dpc *dpc; |
605 | struct work_struct *task; | ||
606 | 603 | ||
607 | ACPI_FUNCTION_TRACE("os_queue_for_execution"); | 604 | ACPI_FUNCTION_TRACE("os_queue_for_execution"); |
608 | 605 | ||
@@ -615,28 +612,22 @@ acpi_status acpi_os_execute(acpi_execute_type type, | |||
615 | 612 | ||
616 | /* | 613 | /* |
617 | * Allocate/initialize DPC structure. Note that this memory will be | 614 | * Allocate/initialize DPC structure. Note that this memory will be |
618 | * freed by the callee. The kernel handles the tq_struct list in a | 615 | * freed by the callee. The kernel handles the work_struct list in a |
619 | * way that allows us to also free its memory inside the callee. | 616 | * way that allows us to also free its memory inside the callee. |
620 | * Because we may want to schedule several tasks with different | 617 | * Because we may want to schedule several tasks with different |
621 | * parameters we can't use the approach some kernel code uses of | 618 | * parameters we can't use the approach some kernel code uses of |
622 | * having a static tq_struct. | 619 | * having a static work_struct. |
623 | * We can save time and code by allocating the DPC and tq_structs | ||
624 | * from the same memory. | ||
625 | */ | 620 | */ |
626 | 621 | ||
627 | dpc = | 622 | dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); |
628 | kmalloc(sizeof(struct acpi_os_dpc) + sizeof(struct work_struct), | ||
629 | GFP_ATOMIC); | ||
630 | if (!dpc) | 623 | if (!dpc) |
631 | return_ACPI_STATUS(AE_NO_MEMORY); | 624 | return_ACPI_STATUS(AE_NO_MEMORY); |
632 | 625 | ||
633 | dpc->function = function; | 626 | dpc->function = function; |
634 | dpc->context = context; | 627 | dpc->context = context; |
635 | 628 | ||
636 | task = (void *)(dpc + 1); | 629 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
637 | INIT_WORK(task, acpi_os_execute_deferred, (void *)dpc); | 630 | if (!queue_work(kacpid_wq, &dpc->work)) { |
638 | |||
639 | if (!queue_work(kacpid_wq, task)) { | ||
640 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | 631 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, |
641 | "Call to queue_work() failed.\n")); | 632 | "Call to queue_work() failed.\n")); |
642 | kfree(dpc); | 633 | kfree(dpc); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f8ec3896b793..8816e30fb7a4 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -1081,7 +1081,7 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1081 | * ata_port_queue_task - Queue port_task | 1081 | * ata_port_queue_task - Queue port_task |
1082 | * @ap: The ata_port to queue port_task for | 1082 | * @ap: The ata_port to queue port_task for |
1083 | * @fn: workqueue function to be scheduled | 1083 | * @fn: workqueue function to be scheduled |
1084 | * @data: data value to pass to workqueue function | 1084 | * @data: data for @fn to use |
1085 | * @delay: delay time for workqueue function | 1085 | * @delay: delay time for workqueue function |
1086 | * | 1086 | * |
1087 | * Schedule @fn(@data) for execution after @delay jiffies using | 1087 | * Schedule @fn(@data) for execution after @delay jiffies using |
@@ -1096,7 +1096,7 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
1096 | * LOCKING: | 1096 | * LOCKING: |
1097 | * Inherited from caller. | 1097 | * Inherited from caller. |
1098 | */ | 1098 | */ |
1099 | void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, | 1099 | void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data, |
1100 | unsigned long delay) | 1100 | unsigned long delay) |
1101 | { | 1101 | { |
1102 | int rc; | 1102 | int rc; |
@@ -1104,12 +1104,10 @@ void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data, | |||
1104 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) | 1104 | if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK) |
1105 | return; | 1105 | return; |
1106 | 1106 | ||
1107 | PREPARE_WORK(&ap->port_task, fn, data); | 1107 | PREPARE_DELAYED_WORK(&ap->port_task, fn); |
1108 | ap->port_task_data = data; | ||
1108 | 1109 | ||
1109 | if (!delay) | 1110 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); |
1110 | rc = queue_work(ata_wq, &ap->port_task); | ||
1111 | else | ||
1112 | rc = queue_delayed_work(ata_wq, &ap->port_task, delay); | ||
1113 | 1111 | ||
1114 | /* rc == 0 means that another user is using port task */ | 1112 | /* rc == 0 means that another user is using port task */ |
1115 | WARN_ON(rc == 0); | 1113 | WARN_ON(rc == 0); |
@@ -4588,10 +4586,11 @@ fsm_start: | |||
4588 | return poll_next; | 4586 | return poll_next; |
4589 | } | 4587 | } |
4590 | 4588 | ||
4591 | static void ata_pio_task(void *_data) | 4589 | static void ata_pio_task(struct work_struct *work) |
4592 | { | 4590 | { |
4593 | struct ata_queued_cmd *qc = _data; | 4591 | struct ata_port *ap = |
4594 | struct ata_port *ap = qc->ap; | 4592 | container_of(work, struct ata_port, port_task.work); |
4593 | struct ata_queued_cmd *qc = ap->port_task_data; | ||
4595 | u8 status; | 4594 | u8 status; |
4596 | int poll_next; | 4595 | int poll_next; |
4597 | 4596 | ||
@@ -5635,9 +5634,9 @@ void ata_port_init(struct ata_port *ap, struct ata_host *host, | |||
5635 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; | 5634 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
5636 | #endif | 5635 | #endif |
5637 | 5636 | ||
5638 | INIT_WORK(&ap->port_task, NULL, NULL); | 5637 | INIT_DELAYED_WORK(&ap->port_task, NULL); |
5639 | INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap); | 5638 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
5640 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap); | 5639 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
5641 | INIT_LIST_HEAD(&ap->eh_done_q); | 5640 | INIT_LIST_HEAD(&ap->eh_done_q); |
5642 | init_waitqueue_head(&ap->eh_wait_q); | 5641 | init_waitqueue_head(&ap->eh_wait_q); |
5643 | 5642 | ||
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 76a85dfb7307..08ad44b3e48f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -332,7 +332,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
332 | if (ap->pflags & ATA_PFLAG_LOADING) | 332 | if (ap->pflags & ATA_PFLAG_LOADING) |
333 | ap->pflags &= ~ATA_PFLAG_LOADING; | 333 | ap->pflags &= ~ATA_PFLAG_LOADING; |
334 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) | 334 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) |
335 | queue_work(ata_aux_wq, &ap->hotplug_task); | 335 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); |
336 | 336 | ||
337 | if (ap->pflags & ATA_PFLAG_RECOVERED) | 337 | if (ap->pflags & ATA_PFLAG_RECOVERED) |
338 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); | 338 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 8eaace94d963..664e1377b54c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2963,7 +2963,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2963 | 2963 | ||
2964 | /** | 2964 | /** |
2965 | * ata_scsi_hotplug - SCSI part of hotplug | 2965 | * ata_scsi_hotplug - SCSI part of hotplug |
2966 | * @data: Pointer to ATA port to perform SCSI hotplug on | 2966 | * @work: Pointer to ATA port to perform SCSI hotplug on |
2967 | * | 2967 | * |
2968 | * Perform SCSI part of hotplug. It's executed from a separate | 2968 | * Perform SCSI part of hotplug. It's executed from a separate |
2969 | * workqueue after EH completes. This is necessary because SCSI | 2969 | * workqueue after EH completes. This is necessary because SCSI |
@@ -2973,9 +2973,10 @@ static void ata_scsi_remove_dev(struct ata_device *dev) | |||
2973 | * LOCKING: | 2973 | * LOCKING: |
2974 | * Kernel thread context (may sleep). | 2974 | * Kernel thread context (may sleep). |
2975 | */ | 2975 | */ |
2976 | void ata_scsi_hotplug(void *data) | 2976 | void ata_scsi_hotplug(struct work_struct *work) |
2977 | { | 2977 | { |
2978 | struct ata_port *ap = data; | 2978 | struct ata_port *ap = |
2979 | container_of(work, struct ata_port, hotplug_task.work); | ||
2979 | int i; | 2980 | int i; |
2980 | 2981 | ||
2981 | if (ap->pflags & ATA_PFLAG_UNLOADING) { | 2982 | if (ap->pflags & ATA_PFLAG_UNLOADING) { |
@@ -3076,7 +3077,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3076 | 3077 | ||
3077 | /** | 3078 | /** |
3078 | * ata_scsi_dev_rescan - initiate scsi_rescan_device() | 3079 | * ata_scsi_dev_rescan - initiate scsi_rescan_device() |
3079 | * @data: Pointer to ATA port to perform scsi_rescan_device() | 3080 | * @work: Pointer to ATA port to perform scsi_rescan_device() |
3080 | * | 3081 | * |
3081 | * After ATA pass thru (SAT) commands are executed successfully, | 3082 | * After ATA pass thru (SAT) commands are executed successfully, |
3082 | * libata need to propagate the changes to SCSI layer. This | 3083 | * libata need to propagate the changes to SCSI layer. This |
@@ -3086,9 +3087,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3086 | * LOCKING: | 3087 | * LOCKING: |
3087 | * Kernel thread context (may sleep). | 3088 | * Kernel thread context (may sleep). |
3088 | */ | 3089 | */ |
3089 | void ata_scsi_dev_rescan(void *data) | 3090 | void ata_scsi_dev_rescan(struct work_struct *work) |
3090 | { | 3091 | { |
3091 | struct ata_port *ap = data; | 3092 | struct ata_port *ap = |
3093 | container_of(work, struct ata_port, scsi_rescan_task); | ||
3092 | unsigned long flags; | 3094 | unsigned long flags; |
3093 | unsigned int i; | 3095 | unsigned int i; |
3094 | 3096 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 107b2b565229..81ae41d5f23f 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -94,7 +94,7 @@ extern struct scsi_transport_template ata_scsi_transport_template; | |||
94 | 94 | ||
95 | extern void ata_scsi_scan_host(struct ata_port *ap); | 95 | extern void ata_scsi_scan_host(struct ata_port *ap); |
96 | extern int ata_scsi_offline_dev(struct ata_device *dev); | 96 | extern int ata_scsi_offline_dev(struct ata_device *dev); |
97 | extern void ata_scsi_hotplug(void *data); | 97 | extern void ata_scsi_hotplug(struct work_struct *work); |
98 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, | 98 | extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, |
99 | unsigned int buflen); | 99 | unsigned int buflen); |
100 | 100 | ||
@@ -124,7 +124,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, | |||
124 | unsigned int (*actor) (struct ata_scsi_args *args, | 124 | unsigned int (*actor) (struct ata_scsi_args *args, |
125 | u8 *rbuf, unsigned int buflen)); | 125 | u8 *rbuf, unsigned int buflen)); |
126 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); | 126 | extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); |
127 | extern void ata_scsi_dev_rescan(void *data); | 127 | extern void ata_scsi_dev_rescan(struct work_struct *work); |
128 | extern int ata_bus_probe(struct ata_port *ap); | 128 | extern int ata_bus_probe(struct ata_port *ap); |
129 | 129 | ||
130 | /* libata-eh.c */ | 130 | /* libata-eh.c */ |
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 87b17c33b3f9..f40786121948 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c | |||
@@ -135,7 +135,7 @@ static int idt77252_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, | |||
135 | int flags); | 135 | int flags); |
136 | static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, | 136 | static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, |
137 | char *page); | 137 | char *page); |
138 | static void idt77252_softint(void *dev_id); | 138 | static void idt77252_softint(struct work_struct *work); |
139 | 139 | ||
140 | 140 | ||
141 | static struct atmdev_ops idt77252_ops = | 141 | static struct atmdev_ops idt77252_ops = |
@@ -2866,9 +2866,10 @@ out: | |||
2866 | } | 2866 | } |
2867 | 2867 | ||
2868 | static void | 2868 | static void |
2869 | idt77252_softint(void *dev_id) | 2869 | idt77252_softint(struct work_struct *work) |
2870 | { | 2870 | { |
2871 | struct idt77252_dev *card = dev_id; | 2871 | struct idt77252_dev *card = |
2872 | container_of(work, struct idt77252_dev, tqueue); | ||
2872 | u32 stat; | 2873 | u32 stat; |
2873 | int done; | 2874 | int done; |
2874 | 2875 | ||
@@ -3697,7 +3698,7 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) | |||
3697 | card->pcidev = pcidev; | 3698 | card->pcidev = pcidev; |
3698 | sprintf(card->name, "idt77252-%d", card->index); | 3699 | sprintf(card->name, "idt77252-%d", card->index); |
3699 | 3700 | ||
3700 | INIT_WORK(&card->tqueue, idt77252_softint, (void *)card); | 3701 | INIT_WORK(&card->tqueue, idt77252_softint); |
3701 | 3702 | ||
3702 | membase = pci_resource_start(pcidev, 1); | 3703 | membase = pci_resource_start(pcidev, 1); |
3703 | srambase = pci_resource_start(pcidev, 2); | 3704 | srambase = pci_resource_start(pcidev, 2); |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 6d111228cfac..2308e83e5f33 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -159,7 +159,7 @@ void aoecmd_work(struct aoedev *d); | |||
159 | void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); | 159 | void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); |
160 | void aoecmd_ata_rsp(struct sk_buff *); | 160 | void aoecmd_ata_rsp(struct sk_buff *); |
161 | void aoecmd_cfg_rsp(struct sk_buff *); | 161 | void aoecmd_cfg_rsp(struct sk_buff *); |
162 | void aoecmd_sleepwork(void *vp); | 162 | void aoecmd_sleepwork(struct work_struct *); |
163 | struct sk_buff *new_skb(ulong); | 163 | struct sk_buff *new_skb(ulong); |
164 | 164 | ||
165 | int aoedev_init(void); | 165 | int aoedev_init(void); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8a13b1af8bab..97f7f535f412 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -408,9 +408,9 @@ rexmit_timer(ulong vp) | |||
408 | /* this function performs work that has been deferred until sleeping is OK | 408 | /* this function performs work that has been deferred until sleeping is OK |
409 | */ | 409 | */ |
410 | void | 410 | void |
411 | aoecmd_sleepwork(void *vp) | 411 | aoecmd_sleepwork(struct work_struct *work) |
412 | { | 412 | { |
413 | struct aoedev *d = (struct aoedev *) vp; | 413 | struct aoedev *d = container_of(work, struct aoedev, work); |
414 | 414 | ||
415 | if (d->flags & DEVFL_GDALLOC) | 415 | if (d->flags & DEVFL_GDALLOC) |
416 | aoeblk_gdalloc(d); | 416 | aoeblk_gdalloc(d); |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 6125921bbec4..05a97197c918 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -88,7 +88,7 @@ aoedev_newdev(ulong nframes) | |||
88 | kfree(d); | 88 | kfree(d); |
89 | return NULL; | 89 | return NULL; |
90 | } | 90 | } |
91 | INIT_WORK(&d->work, aoecmd_sleepwork, d); | 91 | INIT_WORK(&d->work, aoecmd_sleepwork); |
92 | spin_lock_init(&d->lock); | 92 | spin_lock_init(&d->lock); |
93 | init_timer(&d->timer); | 93 | init_timer(&d->timer); |
94 | d->timer.data = (ulong) d; | 94 | d->timer.data = (ulong) d; |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 9e6d3a87cbe3..3f1b38276e96 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -992,11 +992,11 @@ static void empty(void) | |||
992 | { | 992 | { |
993 | } | 993 | } |
994 | 994 | ||
995 | static DECLARE_WORK(floppy_work, NULL, NULL); | 995 | static DECLARE_WORK(floppy_work, NULL); |
996 | 996 | ||
997 | static void schedule_bh(void (*handler) (void)) | 997 | static void schedule_bh(void (*handler) (void)) |
998 | { | 998 | { |
999 | PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL); | 999 | PREPARE_WORK(&floppy_work, (work_func_t)handler); |
1000 | schedule_work(&floppy_work); | 1000 | schedule_work(&floppy_work); |
1001 | } | 1001 | } |
1002 | 1002 | ||
@@ -1008,7 +1008,7 @@ static void cancel_activity(void) | |||
1008 | 1008 | ||
1009 | spin_lock_irqsave(&floppy_lock, flags); | 1009 | spin_lock_irqsave(&floppy_lock, flags); |
1010 | do_floppy = NULL; | 1010 | do_floppy = NULL; |
1011 | PREPARE_WORK(&floppy_work, (void *)empty, NULL); | 1011 | PREPARE_WORK(&floppy_work, (work_func_t)empty); |
1012 | del_timer(&fd_timer); | 1012 | del_timer(&fd_timer); |
1013 | spin_unlock_irqrestore(&floppy_lock, flags); | 1013 | spin_unlock_irqrestore(&floppy_lock, flags); |
1014 | } | 1014 | } |
@@ -1868,7 +1868,7 @@ static void show_floppy(void) | |||
1868 | printk("fdc_busy=%lu\n", fdc_busy); | 1868 | printk("fdc_busy=%lu\n", fdc_busy); |
1869 | if (do_floppy) | 1869 | if (do_floppy) |
1870 | printk("do_floppy=%p\n", do_floppy); | 1870 | printk("do_floppy=%p\n", do_floppy); |
1871 | if (floppy_work.pending) | 1871 | if (work_pending(&floppy_work)) |
1872 | printk("floppy_work.func=%p\n", floppy_work.func); | 1872 | printk("floppy_work.func=%p\n", floppy_work.func); |
1873 | if (timer_pending(&fd_timer)) | 1873 | if (timer_pending(&fd_timer)) |
1874 | printk("fd_timer.function=%p\n", fd_timer.function); | 1874 | printk("fd_timer.function=%p\n", fd_timer.function); |
@@ -4498,7 +4498,7 @@ static void floppy_release_irq_and_dma(void) | |||
4498 | printk("floppy timer still active:%s\n", timeout_message); | 4498 | printk("floppy timer still active:%s\n", timeout_message); |
4499 | if (timer_pending(&fd_timer)) | 4499 | if (timer_pending(&fd_timer)) |
4500 | printk("auxiliary floppy timer still active\n"); | 4500 | printk("auxiliary floppy timer still active\n"); |
4501 | if (floppy_work.pending) | 4501 | if (work_pending(&floppy_work)) |
4502 | printk("work still pending\n"); | 4502 | printk("work still pending\n"); |
4503 | #endif | 4503 | #endif |
4504 | old_fdc = fdc; | 4504 | old_fdc = fdc; |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 40a11e567970..9d9bff23f426 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -352,19 +352,19 @@ static enum action (*phase)(void); | |||
352 | 352 | ||
353 | static void run_fsm(void); | 353 | static void run_fsm(void); |
354 | 354 | ||
355 | static void ps_tq_int( void *data); | 355 | static void ps_tq_int(struct work_struct *work); |
356 | 356 | ||
357 | static DECLARE_WORK(fsm_tq, ps_tq_int, NULL); | 357 | static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int); |
358 | 358 | ||
359 | static void schedule_fsm(void) | 359 | static void schedule_fsm(void) |
360 | { | 360 | { |
361 | if (!nice) | 361 | if (!nice) |
362 | schedule_work(&fsm_tq); | 362 | schedule_delayed_work(&fsm_tq, 0); |
363 | else | 363 | else |
364 | schedule_delayed_work(&fsm_tq, nice-1); | 364 | schedule_delayed_work(&fsm_tq, nice-1); |
365 | } | 365 | } |
366 | 366 | ||
367 | static void ps_tq_int(void *data) | 367 | static void ps_tq_int(struct work_struct *work) |
368 | { | 368 | { |
369 | run_fsm(); | 369 | run_fsm(); |
370 | } | 370 | } |
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h index 932342d7a8eb..bc3703294143 100644 --- a/drivers/block/paride/pseudo.h +++ b/drivers/block/paride/pseudo.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/sched.h> | 35 | #include <linux/sched.h> |
36 | #include <linux/workqueue.h> | 36 | #include <linux/workqueue.h> |
37 | 37 | ||
38 | static void ps_tq_int( void *data); | 38 | static void ps_tq_int(struct work_struct *work); |
39 | 39 | ||
40 | static void (* ps_continuation)(void); | 40 | static void (* ps_continuation)(void); |
41 | static int (* ps_ready)(void); | 41 | static int (* ps_ready)(void); |
@@ -45,7 +45,7 @@ static int ps_nice = 0; | |||
45 | 45 | ||
46 | static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); | 46 | static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused))); |
47 | 47 | ||
48 | static DECLARE_WORK(ps_tq, ps_tq_int, NULL); | 48 | static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int); |
49 | 49 | ||
50 | static void ps_set_intr(void (*continuation)(void), | 50 | static void ps_set_intr(void (*continuation)(void), |
51 | int (*ready)(void), | 51 | int (*ready)(void), |
@@ -63,14 +63,14 @@ static void ps_set_intr(void (*continuation)(void), | |||
63 | if (!ps_tq_active) { | 63 | if (!ps_tq_active) { |
64 | ps_tq_active = 1; | 64 | ps_tq_active = 1; |
65 | if (!ps_nice) | 65 | if (!ps_nice) |
66 | schedule_work(&ps_tq); | 66 | schedule_delayed_work(&ps_tq, 0); |
67 | else | 67 | else |
68 | schedule_delayed_work(&ps_tq, ps_nice-1); | 68 | schedule_delayed_work(&ps_tq, ps_nice-1); |
69 | } | 69 | } |
70 | spin_unlock_irqrestore(&ps_spinlock,flags); | 70 | spin_unlock_irqrestore(&ps_spinlock,flags); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void ps_tq_int(void *data) | 73 | static void ps_tq_int(struct work_struct *work) |
74 | { | 74 | { |
75 | void (*con)(void); | 75 | void (*con)(void); |
76 | unsigned long flags; | 76 | unsigned long flags; |
@@ -92,7 +92,7 @@ static void ps_tq_int(void *data) | |||
92 | } | 92 | } |
93 | ps_tq_active = 1; | 93 | ps_tq_active = 1; |
94 | if (!ps_nice) | 94 | if (!ps_nice) |
95 | schedule_work(&ps_tq); | 95 | schedule_delayed_work(&ps_tq, 0); |
96 | else | 96 | else |
97 | schedule_delayed_work(&ps_tq, ps_nice-1); | 97 | schedule_delayed_work(&ps_tq, ps_nice-1); |
98 | spin_unlock_irqrestore(&ps_spinlock,flags); | 98 | spin_unlock_irqrestore(&ps_spinlock,flags); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 47d6975268ff..54509eb3391b 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -1244,9 +1244,10 @@ out: | |||
1244 | return IRQ_RETVAL(handled); | 1244 | return IRQ_RETVAL(handled); |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | static void carm_fsm_task (void *_data) | 1247 | static void carm_fsm_task (struct work_struct *work) |
1248 | { | 1248 | { |
1249 | struct carm_host *host = _data; | 1249 | struct carm_host *host = |
1250 | container_of(work, struct carm_host, fsm_task); | ||
1250 | unsigned long flags; | 1251 | unsigned long flags; |
1251 | unsigned int state; | 1252 | unsigned int state; |
1252 | int rc, i, next_dev; | 1253 | int rc, i, next_dev; |
@@ -1619,7 +1620,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1619 | host->pdev = pdev; | 1620 | host->pdev = pdev; |
1620 | host->flags = pci_dac ? FL_DAC : 0; | 1621 | host->flags = pci_dac ? FL_DAC : 0; |
1621 | spin_lock_init(&host->lock); | 1622 | spin_lock_init(&host->lock); |
1622 | INIT_WORK(&host->fsm_task, carm_fsm_task, host); | 1623 | INIT_WORK(&host->fsm_task, carm_fsm_task); |
1623 | init_completion(&host->probe_comp); | 1624 | init_completion(&host->probe_comp); |
1624 | 1625 | ||
1625 | for (i = 0; i < ARRAY_SIZE(host->req); i++) | 1626 | for (i = 0; i < ARRAY_SIZE(host->req); i++) |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 0d5c73f07265..2098eff91e14 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
@@ -376,7 +376,7 @@ static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |||
376 | int stalled_pipe); | 376 | int stalled_pipe); |
377 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | 377 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); |
378 | static void ub_reset_enter(struct ub_dev *sc, int try); | 378 | static void ub_reset_enter(struct ub_dev *sc, int try); |
379 | static void ub_reset_task(void *arg); | 379 | static void ub_reset_task(struct work_struct *work); |
380 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); | 380 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
381 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | 381 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
382 | struct ub_capacity *ret); | 382 | struct ub_capacity *ret); |
@@ -1558,9 +1558,9 @@ static void ub_reset_enter(struct ub_dev *sc, int try) | |||
1558 | schedule_work(&sc->reset_work); | 1558 | schedule_work(&sc->reset_work); |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static void ub_reset_task(void *arg) | 1561 | static void ub_reset_task(struct work_struct *work) |
1562 | { | 1562 | { |
1563 | struct ub_dev *sc = arg; | 1563 | struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); |
1564 | unsigned long flags; | 1564 | unsigned long flags; |
1565 | struct list_head *p; | 1565 | struct list_head *p; |
1566 | struct ub_lun *lun; | 1566 | struct ub_lun *lun; |
@@ -2179,7 +2179,7 @@ static int ub_probe(struct usb_interface *intf, | |||
2179 | usb_init_urb(&sc->work_urb); | 2179 | usb_init_urb(&sc->work_urb); |
2180 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | 2180 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); |
2181 | atomic_set(&sc->poison, 0); | 2181 | atomic_set(&sc->poison, 0); |
2182 | INIT_WORK(&sc->reset_work, ub_reset_task, sc); | 2182 | INIT_WORK(&sc->reset_work, ub_reset_task); |
2183 | init_waitqueue_head(&sc->reset_wait); | 2183 | init_waitqueue_head(&sc->reset_wait); |
2184 | 2184 | ||
2185 | init_timer(&sc->work_timer); | 2185 | init_timer(&sc->work_timer); |
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index 516751754aa9..9256985cbe36 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c | |||
@@ -157,9 +157,10 @@ static void bcm203x_complete(struct urb *urb) | |||
157 | } | 157 | } |
158 | } | 158 | } |
159 | 159 | ||
160 | static void bcm203x_work(void *user_data) | 160 | static void bcm203x_work(struct work_struct *work) |
161 | { | 161 | { |
162 | struct bcm203x_data *data = user_data; | 162 | struct bcm203x_data *data = |
163 | container_of(work, struct bcm203x_data, work); | ||
163 | 164 | ||
164 | if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) | 165 | if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) |
165 | BT_ERR("Can't submit URB"); | 166 | BT_ERR("Can't submit URB"); |
@@ -246,7 +247,7 @@ static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id | |||
246 | 247 | ||
247 | release_firmware(firmware); | 248 | release_firmware(firmware); |
248 | 249 | ||
249 | INIT_WORK(&data->work, bcm203x_work, (void *) data); | 250 | INIT_WORK(&data->work, bcm203x_work); |
250 | 251 | ||
251 | usb_set_intfdata(intf, data); | 252 | usb_set_intfdata(intf, data); |
252 | 253 | ||
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index e608dadece2f..acb2de5e3a98 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
@@ -926,9 +926,10 @@ cy_sched_event(struct cyclades_port *info, int event) | |||
926 | * had to poll every port to see if that port needed servicing. | 926 | * had to poll every port to see if that port needed servicing. |
927 | */ | 927 | */ |
928 | static void | 928 | static void |
929 | do_softint(void *private_) | 929 | do_softint(struct work_struct *work) |
930 | { | 930 | { |
931 | struct cyclades_port *info = (struct cyclades_port *) private_; | 931 | struct cyclades_port *info = |
932 | container_of(work, struct cyclades_port, tqueue); | ||
932 | struct tty_struct *tty; | 933 | struct tty_struct *tty; |
933 | 934 | ||
934 | tty = info->tty; | 935 | tty = info->tty; |
@@ -5328,7 +5329,7 @@ cy_init(void) | |||
5328 | info->blocked_open = 0; | 5329 | info->blocked_open = 0; |
5329 | info->default_threshold = 0; | 5330 | info->default_threshold = 0; |
5330 | info->default_timeout = 0; | 5331 | info->default_timeout = 0; |
5331 | INIT_WORK(&info->tqueue, do_softint, info); | 5332 | INIT_WORK(&info->tqueue, do_softint); |
5332 | init_waitqueue_head(&info->open_wait); | 5333 | init_waitqueue_head(&info->open_wait); |
5333 | init_waitqueue_head(&info->close_wait); | 5334 | init_waitqueue_head(&info->close_wait); |
5334 | init_waitqueue_head(&info->shutdown_wait); | 5335 | init_waitqueue_head(&info->shutdown_wait); |
@@ -5403,7 +5404,7 @@ cy_init(void) | |||
5403 | info->blocked_open = 0; | 5404 | info->blocked_open = 0; |
5404 | info->default_threshold = 0; | 5405 | info->default_threshold = 0; |
5405 | info->default_timeout = 0; | 5406 | info->default_timeout = 0; |
5406 | INIT_WORK(&info->tqueue, do_softint, info); | 5407 | INIT_WORK(&info->tqueue, do_softint); |
5407 | init_waitqueue_head(&info->open_wait); | 5408 | init_waitqueue_head(&info->open_wait); |
5408 | init_waitqueue_head(&info->close_wait); | 5409 | init_waitqueue_head(&info->close_wait); |
5409 | init_waitqueue_head(&info->shutdown_wait); | 5410 | init_waitqueue_head(&info->shutdown_wait); |
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c index 60c1695db300..806f9ce5f47b 100644 --- a/drivers/char/drm/via_dmablit.c +++ b/drivers/char/drm/via_dmablit.c | |||
@@ -500,9 +500,9 @@ via_dmablit_timer(unsigned long data) | |||
500 | 500 | ||
501 | 501 | ||
502 | static void | 502 | static void |
503 | via_dmablit_workqueue(void *data) | 503 | via_dmablit_workqueue(struct work_struct *work) |
504 | { | 504 | { |
505 | drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; | 505 | drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); |
506 | drm_device_t *dev = blitq->dev; | 506 | drm_device_t *dev = blitq->dev; |
507 | unsigned long irqsave; | 507 | unsigned long irqsave; |
508 | drm_via_sg_info_t *cur_sg; | 508 | drm_via_sg_info_t *cur_sg; |
@@ -571,7 +571,7 @@ via_init_dmablit(drm_device_t *dev) | |||
571 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); | 571 | DRM_INIT_WAITQUEUE(blitq->blit_queue + j); |
572 | } | 572 | } |
573 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); | 573 | DRM_INIT_WAITQUEUE(&blitq->busy_queue); |
574 | INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); | 574 | INIT_WORK(&blitq->wq, via_dmablit_workqueue); |
575 | init_timer(&blitq->poll_timer); | 575 | init_timer(&blitq->poll_timer); |
576 | blitq->poll_timer.function = &via_dmablit_timer; | 576 | blitq->poll_timer.function = &via_dmablit_timer; |
577 | blitq->poll_timer.data = (unsigned long) blitq; | 577 | blitq->poll_timer.data = (unsigned long) blitq; |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index 706733c0b36a..7c71eb779802 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
@@ -200,7 +200,7 @@ static int pc_ioctl(struct tty_struct *, struct file *, | |||
200 | static int info_ioctl(struct tty_struct *, struct file *, | 200 | static int info_ioctl(struct tty_struct *, struct file *, |
201 | unsigned int, unsigned long); | 201 | unsigned int, unsigned long); |
202 | static void pc_set_termios(struct tty_struct *, struct termios *); | 202 | static void pc_set_termios(struct tty_struct *, struct termios *); |
203 | static void do_softint(void *); | 203 | static void do_softint(struct work_struct *work); |
204 | static void pc_stop(struct tty_struct *); | 204 | static void pc_stop(struct tty_struct *); |
205 | static void pc_start(struct tty_struct *); | 205 | static void pc_start(struct tty_struct *); |
206 | static void pc_throttle(struct tty_struct * tty); | 206 | static void pc_throttle(struct tty_struct * tty); |
@@ -1505,7 +1505,7 @@ static void post_fep_init(unsigned int crd) | |||
1505 | 1505 | ||
1506 | ch->brdchan = bc; | 1506 | ch->brdchan = bc; |
1507 | ch->mailbox = gd; | 1507 | ch->mailbox = gd; |
1508 | INIT_WORK(&ch->tqueue, do_softint, ch); | 1508 | INIT_WORK(&ch->tqueue, do_softint); |
1509 | ch->board = &boards[crd]; | 1509 | ch->board = &boards[crd]; |
1510 | 1510 | ||
1511 | spin_lock_irqsave(&epca_lock, flags); | 1511 | spin_lock_irqsave(&epca_lock, flags); |
@@ -2566,9 +2566,9 @@ static void pc_set_termios(struct tty_struct *tty, struct termios *old_termios) | |||
2566 | 2566 | ||
2567 | /* --------------------- Begin do_softint ----------------------- */ | 2567 | /* --------------------- Begin do_softint ----------------------- */ |
2568 | 2568 | ||
2569 | static void do_softint(void *private_) | 2569 | static void do_softint(struct work_struct *work) |
2570 | { /* Begin do_softint */ | 2570 | { /* Begin do_softint */ |
2571 | struct channel *ch = (struct channel *) private_; | 2571 | struct channel *ch = container_of(work, struct channel, tqueue); |
2572 | /* Called in response to a modem change event */ | 2572 | /* Called in response to a modem change event */ |
2573 | if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ | 2573 | if (ch && ch->magic == EPCA_MAGIC) { /* Begin EPCA_MAGIC */ |
2574 | struct tty_struct *tty = ch->tty; | 2574 | struct tty_struct *tty = ch->tty; |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index 15a4ea896328..93b551962513 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
@@ -723,9 +723,10 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
723 | * ------------------------------------------------------------------- | 723 | * ------------------------------------------------------------------- |
724 | */ | 724 | */ |
725 | 725 | ||
726 | static void do_softint(void *private_) | 726 | static void do_softint(struct work_struct *work) |
727 | { | 727 | { |
728 | struct esp_struct *info = (struct esp_struct *) private_; | 728 | struct esp_struct *info = |
729 | container_of(work, struct esp_struct, tqueue); | ||
729 | struct tty_struct *tty; | 730 | struct tty_struct *tty; |
730 | 731 | ||
731 | tty = info->tty; | 732 | tty = info->tty; |
@@ -746,9 +747,10 @@ static void do_softint(void *private_) | |||
746 | * do_serial_hangup() -> tty->hangup() -> esp_hangup() | 747 | * do_serial_hangup() -> tty->hangup() -> esp_hangup() |
747 | * | 748 | * |
748 | */ | 749 | */ |
749 | static void do_serial_hangup(void *private_) | 750 | static void do_serial_hangup(struct work_struct *work) |
750 | { | 751 | { |
751 | struct esp_struct *info = (struct esp_struct *) private_; | 752 | struct esp_struct *info = |
753 | container_of(work, struct esp_struct, tqueue_hangup); | ||
752 | struct tty_struct *tty; | 754 | struct tty_struct *tty; |
753 | 755 | ||
754 | tty = info->tty; | 756 | tty = info->tty; |
@@ -2501,8 +2503,8 @@ static int __init espserial_init(void) | |||
2501 | info->magic = ESP_MAGIC; | 2503 | info->magic = ESP_MAGIC; |
2502 | info->close_delay = 5*HZ/10; | 2504 | info->close_delay = 5*HZ/10; |
2503 | info->closing_wait = 30*HZ; | 2505 | info->closing_wait = 30*HZ; |
2504 | INIT_WORK(&info->tqueue, do_softint, info); | 2506 | INIT_WORK(&info->tqueue, do_softint); |
2505 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup, info); | 2507 | INIT_WORK(&info->tqueue_hangup, do_serial_hangup); |
2506 | info->config.rx_timeout = rx_timeout; | 2508 | info->config.rx_timeout = rx_timeout; |
2507 | info->config.flow_on = flow_on; | 2509 | info->config.flow_on = flow_on; |
2508 | info->config.flow_off = flow_off; | 2510 | info->config.flow_off = flow_off; |
diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index 817dc409ac20..23b25ada65ea 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c | |||
@@ -102,7 +102,7 @@ static void gen_rtc_interrupt(unsigned long arg); | |||
102 | * Routine to poll RTC seconds field for change as often as possible, | 102 | * Routine to poll RTC seconds field for change as often as possible, |
103 | * after first RTC_UIE use timer to reduce polling | 103 | * after first RTC_UIE use timer to reduce polling |
104 | */ | 104 | */ |
105 | static void genrtc_troutine(void *data) | 105 | static void genrtc_troutine(struct work_struct *work) |
106 | { | 106 | { |
107 | unsigned int tmp = get_rtc_ss(); | 107 | unsigned int tmp = get_rtc_ss(); |
108 | 108 | ||
@@ -255,7 +255,7 @@ static inline int gen_set_rtc_irq_bit(unsigned char bit) | |||
255 | irq_active = 1; | 255 | irq_active = 1; |
256 | stop_rtc_timers = 0; | 256 | stop_rtc_timers = 0; |
257 | lostint = 0; | 257 | lostint = 0; |
258 | INIT_WORK(&genrtc_task, genrtc_troutine, NULL); | 258 | INIT_WORK(&genrtc_task, genrtc_troutine); |
259 | oldsecs = get_rtc_ss(); | 259 | oldsecs = get_rtc_ss(); |
260 | init_timer(&timer_task); | 260 | init_timer(&timer_task); |
261 | 261 | ||
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index 2cf63e7305a3..82a41d5b4ed0 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) | 69 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) |
70 | 70 | ||
71 | struct hvsi_struct { | 71 | struct hvsi_struct { |
72 | struct work_struct writer; | 72 | struct delayed_work writer; |
73 | struct work_struct handshaker; | 73 | struct work_struct handshaker; |
74 | wait_queue_head_t emptyq; /* woken when outbuf is emptied */ | 74 | wait_queue_head_t emptyq; /* woken when outbuf is emptied */ |
75 | wait_queue_head_t stateq; /* woken when HVSI state changes */ | 75 | wait_queue_head_t stateq; /* woken when HVSI state changes */ |
@@ -744,9 +744,10 @@ static int hvsi_handshake(struct hvsi_struct *hp) | |||
744 | return 0; | 744 | return 0; |
745 | } | 745 | } |
746 | 746 | ||
747 | static void hvsi_handshaker(void *arg) | 747 | static void hvsi_handshaker(struct work_struct *work) |
748 | { | 748 | { |
749 | struct hvsi_struct *hp = (struct hvsi_struct *)arg; | 749 | struct hvsi_struct *hp = |
750 | container_of(work, struct hvsi_struct, handshaker); | ||
750 | 751 | ||
751 | if (hvsi_handshake(hp) >= 0) | 752 | if (hvsi_handshake(hp) >= 0) |
752 | return; | 753 | return; |
@@ -951,9 +952,10 @@ static void hvsi_push(struct hvsi_struct *hp) | |||
951 | } | 952 | } |
952 | 953 | ||
953 | /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ | 954 | /* hvsi_write_worker will keep rescheduling itself until outbuf is empty */ |
954 | static void hvsi_write_worker(void *arg) | 955 | static void hvsi_write_worker(struct work_struct *work) |
955 | { | 956 | { |
956 | struct hvsi_struct *hp = (struct hvsi_struct *)arg; | 957 | struct hvsi_struct *hp = |
958 | container_of(work, struct hvsi_struct, writer.work); | ||
957 | unsigned long flags; | 959 | unsigned long flags; |
958 | #ifdef DEBUG | 960 | #ifdef DEBUG |
959 | static long start_j = 0; | 961 | static long start_j = 0; |
@@ -1287,8 +1289,8 @@ static int __init hvsi_console_init(void) | |||
1287 | } | 1289 | } |
1288 | 1290 | ||
1289 | hp = &hvsi_ports[hvsi_count]; | 1291 | hp = &hvsi_ports[hvsi_count]; |
1290 | INIT_WORK(&hp->writer, hvsi_write_worker, hp); | 1292 | INIT_DELAYED_WORK(&hp->writer, hvsi_write_worker); |
1291 | INIT_WORK(&hp->handshaker, hvsi_handshaker, hp); | 1293 | INIT_WORK(&hp->handshaker, hvsi_handshaker); |
1292 | init_waitqueue_head(&hp->emptyq); | 1294 | init_waitqueue_head(&hp->emptyq); |
1293 | init_waitqueue_head(&hp->stateq); | 1295 | init_waitqueue_head(&hp->stateq); |
1294 | spin_lock_init(&hp->lock); | 1296 | spin_lock_init(&hp->lock); |
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index 54d93f0345e8..c213fdbdb2b0 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
@@ -84,8 +84,8 @@ static void iiSendPendingMail(i2eBordStrPtr); | |||
84 | static void serviceOutgoingFifo(i2eBordStrPtr); | 84 | static void serviceOutgoingFifo(i2eBordStrPtr); |
85 | 85 | ||
86 | // Functions defined in ip2.c as part of interrupt handling | 86 | // Functions defined in ip2.c as part of interrupt handling |
87 | static void do_input(void *); | 87 | static void do_input(struct work_struct *); |
88 | static void do_status(void *); | 88 | static void do_status(struct work_struct *); |
89 | 89 | ||
90 | //*************** | 90 | //*************** |
91 | //* Debug Data * | 91 | //* Debug Data * |
@@ -331,8 +331,8 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) | |||
331 | pCh->ClosingWaitTime = 30*HZ; | 331 | pCh->ClosingWaitTime = 30*HZ; |
332 | 332 | ||
333 | // Initialize task queue objects | 333 | // Initialize task queue objects |
334 | INIT_WORK(&pCh->tqueue_input, do_input, pCh); | 334 | INIT_WORK(&pCh->tqueue_input, do_input); |
335 | INIT_WORK(&pCh->tqueue_status, do_status, pCh); | 335 | INIT_WORK(&pCh->tqueue_status, do_status); |
336 | 336 | ||
337 | #ifdef IP2DEBUG_TRACE | 337 | #ifdef IP2DEBUG_TRACE |
338 | pCh->trace = ip2trace; | 338 | pCh->trace = ip2trace; |
@@ -1573,7 +1573,7 @@ i2StripFifo(i2eBordStrPtr pB) | |||
1573 | #ifdef USE_IQ | 1573 | #ifdef USE_IQ |
1574 | schedule_work(&pCh->tqueue_input); | 1574 | schedule_work(&pCh->tqueue_input); |
1575 | #else | 1575 | #else |
1576 | do_input(pCh); | 1576 | do_input(&pCh->tqueue_input); |
1577 | #endif | 1577 | #endif |
1578 | 1578 | ||
1579 | // Note we do not need to maintain any flow-control credits at this | 1579 | // Note we do not need to maintain any flow-control credits at this |
@@ -1810,7 +1810,7 @@ i2StripFifo(i2eBordStrPtr pB) | |||
1810 | #ifdef USE_IQ | 1810 | #ifdef USE_IQ |
1811 | schedule_work(&pCh->tqueue_status); | 1811 | schedule_work(&pCh->tqueue_status); |
1812 | #else | 1812 | #else |
1813 | do_status(pCh); | 1813 | do_status(&pCh->tqueue_status); |
1814 | #endif | 1814 | #endif |
1815 | } | 1815 | } |
1816 | } | 1816 | } |
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index a3f32d46d2f8..cda2459c1d60 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
@@ -189,12 +189,12 @@ static int ip2_tiocmset(struct tty_struct *tty, struct file *file, | |||
189 | unsigned int set, unsigned int clear); | 189 | unsigned int set, unsigned int clear); |
190 | 190 | ||
191 | static void set_irq(int, int); | 191 | static void set_irq(int, int); |
192 | static void ip2_interrupt_bh(i2eBordStrPtr pB); | 192 | static void ip2_interrupt_bh(struct work_struct *work); |
193 | static irqreturn_t ip2_interrupt(int irq, void *dev_id); | 193 | static irqreturn_t ip2_interrupt(int irq, void *dev_id); |
194 | static void ip2_poll(unsigned long arg); | 194 | static void ip2_poll(unsigned long arg); |
195 | static inline void service_all_boards(void); | 195 | static inline void service_all_boards(void); |
196 | static void do_input(void *p); | 196 | static void do_input(struct work_struct *); |
197 | static void do_status(void *p); | 197 | static void do_status(struct work_struct *); |
198 | 198 | ||
199 | static void ip2_wait_until_sent(PTTY,int); | 199 | static void ip2_wait_until_sent(PTTY,int); |
200 | 200 | ||
@@ -918,7 +918,7 @@ ip2_init_board( int boardnum ) | |||
918 | pCh++; | 918 | pCh++; |
919 | } | 919 | } |
920 | ex_exit: | 920 | ex_exit: |
921 | INIT_WORK(&pB->tqueue_interrupt, (void(*)(void*)) ip2_interrupt_bh, pB); | 921 | INIT_WORK(&pB->tqueue_interrupt, ip2_interrupt_bh); |
922 | return; | 922 | return; |
923 | 923 | ||
924 | err_release_region: | 924 | err_release_region: |
@@ -1125,8 +1125,8 @@ service_all_boards(void) | |||
1125 | 1125 | ||
1126 | 1126 | ||
1127 | /******************************************************************************/ | 1127 | /******************************************************************************/ |
1128 | /* Function: ip2_interrupt_bh(pB) */ | 1128 | /* Function: ip2_interrupt_bh(work) */ |
1129 | /* Parameters: pB - pointer to the board structure */ | 1129 | /* Parameters: work - pointer to the board structure */ |
1130 | /* Returns: Nothing */ | 1130 | /* Returns: Nothing */ |
1131 | /* */ | 1131 | /* */ |
1132 | /* Description: */ | 1132 | /* Description: */ |
@@ -1135,8 +1135,9 @@ service_all_boards(void) | |||
1135 | /* */ | 1135 | /* */ |
1136 | /******************************************************************************/ | 1136 | /******************************************************************************/ |
1137 | static void | 1137 | static void |
1138 | ip2_interrupt_bh(i2eBordStrPtr pB) | 1138 | ip2_interrupt_bh(struct work_struct *work) |
1139 | { | 1139 | { |
1140 | i2eBordStrPtr pB = container_of(work, i2eBordStr, tqueue_interrupt); | ||
1140 | // pB better well be set or we have a problem! We can only get | 1141 | // pB better well be set or we have a problem! We can only get |
1141 | // here from the IMMEDIATE queue. Here, we process the boards. | 1142 | // here from the IMMEDIATE queue. Here, we process the boards. |
1142 | // Checking pB doesn't cost much and it saves us from the sanity checkers. | 1143 | // Checking pB doesn't cost much and it saves us from the sanity checkers. |
@@ -1245,9 +1246,9 @@ ip2_poll(unsigned long arg) | |||
1245 | ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); | 1246 | ip2trace (ITRC_NO_PORT, ITRC_INTR, ITRC_RETURN, 0 ); |
1246 | } | 1247 | } |
1247 | 1248 | ||
1248 | static void do_input(void *p) | 1249 | static void do_input(struct work_struct *work) |
1249 | { | 1250 | { |
1250 | i2ChanStrPtr pCh = p; | 1251 | i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_input); |
1251 | unsigned long flags; | 1252 | unsigned long flags; |
1252 | 1253 | ||
1253 | ip2trace(CHANN, ITRC_INPUT, 21, 0 ); | 1254 | ip2trace(CHANN, ITRC_INPUT, 21, 0 ); |
@@ -1279,9 +1280,9 @@ static inline void isig(int sig, struct tty_struct *tty, int flush) | |||
1279 | } | 1280 | } |
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | static void do_status(void *p) | 1283 | static void do_status(struct work_struct *work) |
1283 | { | 1284 | { |
1284 | i2ChanStrPtr pCh = p; | 1285 | i2ChanStrPtr pCh = container_of(work, i2ChanStr, tqueue_status); |
1285 | int status; | 1286 | int status; |
1286 | 1287 | ||
1287 | status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); | 1288 | status = i2GetStatus( pCh, (I2_BRK|I2_PAR|I2_FRA|I2_OVR) ); |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 58c955e390b3..1637c1d9a4ba 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
@@ -530,9 +530,9 @@ sched_again: | |||
530 | /* Interrupt handlers */ | 530 | /* Interrupt handlers */ |
531 | 531 | ||
532 | 532 | ||
533 | static void isicom_bottomhalf(void *data) | 533 | static void isicom_bottomhalf(struct work_struct *work) |
534 | { | 534 | { |
535 | struct isi_port *port = (struct isi_port *) data; | 535 | struct isi_port *port = container_of(work, struct isi_port, bh_tqueue); |
536 | struct tty_struct *tty = port->tty; | 536 | struct tty_struct *tty = port->tty; |
537 | 537 | ||
538 | if (!tty) | 538 | if (!tty) |
@@ -1474,9 +1474,9 @@ static void isicom_start(struct tty_struct *tty) | |||
1474 | } | 1474 | } |
1475 | 1475 | ||
1476 | /* hangup et all */ | 1476 | /* hangup et all */ |
1477 | static void do_isicom_hangup(void *data) | 1477 | static void do_isicom_hangup(struct work_struct *work) |
1478 | { | 1478 | { |
1479 | struct isi_port *port = data; | 1479 | struct isi_port *port = container_of(work, struct isi_port, hangup_tq); |
1480 | struct tty_struct *tty; | 1480 | struct tty_struct *tty; |
1481 | 1481 | ||
1482 | tty = port->tty; | 1482 | tty = port->tty; |
@@ -1966,8 +1966,8 @@ static int __devinit isicom_setup(void) | |||
1966 | port->channel = channel; | 1966 | port->channel = channel; |
1967 | port->close_delay = 50 * HZ/100; | 1967 | port->close_delay = 50 * HZ/100; |
1968 | port->closing_wait = 3000 * HZ/100; | 1968 | port->closing_wait = 3000 * HZ/100; |
1969 | INIT_WORK(&port->hangup_tq, do_isicom_hangup, port); | 1969 | INIT_WORK(&port->hangup_tq, do_isicom_hangup); |
1970 | INIT_WORK(&port->bh_tqueue, isicom_bottomhalf, port); | 1970 | INIT_WORK(&port->bh_tqueue, isicom_bottomhalf); |
1971 | port->status = 0; | 1971 | port->status = 0; |
1972 | init_waitqueue_head(&port->open_wait); | 1972 | init_waitqueue_head(&port->open_wait); |
1973 | init_waitqueue_head(&port->close_wait); | 1973 | init_waitqueue_head(&port->close_wait); |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index ffdf9df1a67a..bd9195e17956 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
@@ -663,7 +663,7 @@ static int stli_initopen(stlibrd_t *brdp, stliport_t *portp); | |||
663 | static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); | 663 | static int stli_rawopen(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); |
664 | static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); | 664 | static int stli_rawclose(stlibrd_t *brdp, stliport_t *portp, unsigned long arg, int wait); |
665 | static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp); | 665 | static int stli_waitcarrier(stlibrd_t *brdp, stliport_t *portp, struct file *filp); |
666 | static void stli_dohangup(void *arg); | 666 | static void stli_dohangup(struct work_struct *); |
667 | static int stli_setport(stliport_t *portp); | 667 | static int stli_setport(stliport_t *portp); |
668 | static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); | 668 | static int stli_cmdwait(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); |
669 | static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); | 669 | static void stli_sendcmd(stlibrd_t *brdp, stliport_t *portp, unsigned long cmd, void *arg, int size, int copyback); |
@@ -1990,9 +1990,9 @@ static void stli_start(struct tty_struct *tty) | |||
1990 | * aren't that time critical). | 1990 | * aren't that time critical). |
1991 | */ | 1991 | */ |
1992 | 1992 | ||
1993 | static void stli_dohangup(void *arg) | 1993 | static void stli_dohangup(struct work_struct *ugly_api) |
1994 | { | 1994 | { |
1995 | stliport_t *portp = (stliport_t *) arg; | 1995 | stliport_t *portp = container_of(ugly_api, stliport_t, tqhangup); |
1996 | if (portp->tty != NULL) { | 1996 | if (portp->tty != NULL) { |
1997 | tty_hangup(portp->tty); | 1997 | tty_hangup(portp->tty); |
1998 | } | 1998 | } |
@@ -2898,7 +2898,7 @@ static int stli_initports(stlibrd_t *brdp) | |||
2898 | portp->baud_base = STL_BAUDBASE; | 2898 | portp->baud_base = STL_BAUDBASE; |
2899 | portp->close_delay = STL_CLOSEDELAY; | 2899 | portp->close_delay = STL_CLOSEDELAY; |
2900 | portp->closing_wait = 30 * HZ; | 2900 | portp->closing_wait = 30 * HZ; |
2901 | INIT_WORK(&portp->tqhangup, stli_dohangup, portp); | 2901 | INIT_WORK(&portp->tqhangup, stli_dohangup); |
2902 | init_waitqueue_head(&portp->open_wait); | 2902 | init_waitqueue_head(&portp->open_wait); |
2903 | init_waitqueue_head(&portp->close_wait); | 2903 | init_waitqueue_head(&portp->close_wait); |
2904 | init_waitqueue_head(&portp->raw_wait); | 2904 | init_waitqueue_head(&portp->raw_wait); |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 96cb1f07332b..2d025a9fd14d 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -222,7 +222,7 @@ static struct semaphore moxaBuffSem; | |||
222 | /* | 222 | /* |
223 | * static functions: | 223 | * static functions: |
224 | */ | 224 | */ |
225 | static void do_moxa_softint(void *); | 225 | static void do_moxa_softint(struct work_struct *); |
226 | static int moxa_open(struct tty_struct *, struct file *); | 226 | static int moxa_open(struct tty_struct *, struct file *); |
227 | static void moxa_close(struct tty_struct *, struct file *); | 227 | static void moxa_close(struct tty_struct *, struct file *); |
228 | static int moxa_write(struct tty_struct *, const unsigned char *, int); | 228 | static int moxa_write(struct tty_struct *, const unsigned char *, int); |
@@ -363,7 +363,7 @@ static int __init moxa_init(void) | |||
363 | for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { | 363 | for (i = 0, ch = moxaChannels; i < MAX_PORTS; i++, ch++) { |
364 | ch->type = PORT_16550A; | 364 | ch->type = PORT_16550A; |
365 | ch->port = i; | 365 | ch->port = i; |
366 | INIT_WORK(&ch->tqueue, do_moxa_softint, ch); | 366 | INIT_WORK(&ch->tqueue, do_moxa_softint); |
367 | ch->tty = NULL; | 367 | ch->tty = NULL; |
368 | ch->close_delay = 5 * HZ / 10; | 368 | ch->close_delay = 5 * HZ / 10; |
369 | ch->closing_wait = 30 * HZ; | 369 | ch->closing_wait = 30 * HZ; |
@@ -509,9 +509,9 @@ static void __exit moxa_exit(void) | |||
509 | module_init(moxa_init); | 509 | module_init(moxa_init); |
510 | module_exit(moxa_exit); | 510 | module_exit(moxa_exit); |
511 | 511 | ||
512 | static void do_moxa_softint(void *private_) | 512 | static void do_moxa_softint(struct work_struct *work) |
513 | { | 513 | { |
514 | struct moxa_str *ch = (struct moxa_str *) private_; | 514 | struct moxa_str *ch = container_of(work, struct moxa_str, tqueue); |
515 | struct tty_struct *tty; | 515 | struct tty_struct *tty; |
516 | 516 | ||
517 | if (ch && (tty = ch->tty)) { | 517 | if (ch && (tty = ch->tty)) { |
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 048d91142c17..5ed2486b7581 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
@@ -389,7 +389,7 @@ static int mxser_init(void); | |||
389 | /* static void mxser_poll(unsigned long); */ | 389 | /* static void mxser_poll(unsigned long); */ |
390 | static int mxser_get_ISA_conf(int, struct mxser_hwconf *); | 390 | static int mxser_get_ISA_conf(int, struct mxser_hwconf *); |
391 | static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); | 391 | static int mxser_get_PCI_conf(int, int, int, struct mxser_hwconf *); |
392 | static void mxser_do_softint(void *); | 392 | static void mxser_do_softint(struct work_struct *); |
393 | static int mxser_open(struct tty_struct *, struct file *); | 393 | static int mxser_open(struct tty_struct *, struct file *); |
394 | static void mxser_close(struct tty_struct *, struct file *); | 394 | static void mxser_close(struct tty_struct *, struct file *); |
395 | static int mxser_write(struct tty_struct *, const unsigned char *, int); | 395 | static int mxser_write(struct tty_struct *, const unsigned char *, int); |
@@ -590,7 +590,7 @@ static int mxser_initbrd(int board, struct mxser_hwconf *hwconf) | |||
590 | info->custom_divisor = hwconf->baud_base[i] * 16; | 590 | info->custom_divisor = hwconf->baud_base[i] * 16; |
591 | info->close_delay = 5 * HZ / 10; | 591 | info->close_delay = 5 * HZ / 10; |
592 | info->closing_wait = 30 * HZ; | 592 | info->closing_wait = 30 * HZ; |
593 | INIT_WORK(&info->tqueue, mxser_do_softint, info); | 593 | INIT_WORK(&info->tqueue, mxser_do_softint); |
594 | info->normal_termios = mxvar_sdriver->init_termios; | 594 | info->normal_termios = mxvar_sdriver->init_termios; |
595 | init_waitqueue_head(&info->open_wait); | 595 | init_waitqueue_head(&info->open_wait); |
596 | init_waitqueue_head(&info->close_wait); | 596 | init_waitqueue_head(&info->close_wait); |
@@ -917,9 +917,10 @@ static int mxser_init(void) | |||
917 | return 0; | 917 | return 0; |
918 | } | 918 | } |
919 | 919 | ||
920 | static void mxser_do_softint(void *private_) | 920 | static void mxser_do_softint(struct work_struct *work) |
921 | { | 921 | { |
922 | struct mxser_struct *info = private_; | 922 | struct mxser_struct *info = |
923 | container_of(work, struct mxser_struct, tqueue); | ||
923 | struct tty_struct *tty; | 924 | struct tty_struct *tty; |
924 | 925 | ||
925 | tty = info->tty; | 926 | tty = info->tty; |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index f9f72500ea5d..1bd12296dca5 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
@@ -421,7 +421,7 @@ static irqreturn_t mgslpc_isr(int irq, void *dev_id); | |||
421 | /* | 421 | /* |
422 | * Bottom half interrupt handlers | 422 | * Bottom half interrupt handlers |
423 | */ | 423 | */ |
424 | static void bh_handler(void* Context); | 424 | static void bh_handler(struct work_struct *work); |
425 | static void bh_transmit(MGSLPC_INFO *info); | 425 | static void bh_transmit(MGSLPC_INFO *info); |
426 | static void bh_status(MGSLPC_INFO *info); | 426 | static void bh_status(MGSLPC_INFO *info); |
427 | 427 | ||
@@ -547,7 +547,7 @@ static int mgslpc_probe(struct pcmcia_device *link) | |||
547 | 547 | ||
548 | memset(info, 0, sizeof(MGSLPC_INFO)); | 548 | memset(info, 0, sizeof(MGSLPC_INFO)); |
549 | info->magic = MGSLPC_MAGIC; | 549 | info->magic = MGSLPC_MAGIC; |
550 | INIT_WORK(&info->task, bh_handler, info); | 550 | INIT_WORK(&info->task, bh_handler); |
551 | info->max_frame_size = 4096; | 551 | info->max_frame_size = 4096; |
552 | info->close_delay = 5*HZ/10; | 552 | info->close_delay = 5*HZ/10; |
553 | info->closing_wait = 30*HZ; | 553 | info->closing_wait = 30*HZ; |
@@ -835,9 +835,9 @@ static int bh_action(MGSLPC_INFO *info) | |||
835 | return rc; | 835 | return rc; |
836 | } | 836 | } |
837 | 837 | ||
838 | static void bh_handler(void* Context) | 838 | static void bh_handler(struct work_struct *work) |
839 | { | 839 | { |
840 | MGSLPC_INFO *info = (MGSLPC_INFO*)Context; | 840 | MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); |
841 | int action; | 841 | int action; |
842 | 842 | ||
843 | if (!info) | 843 | if (!info) |
diff --git a/drivers/char/random.c b/drivers/char/random.c index d40df30c2b10..4c6782a1ecdb 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1422,9 +1422,9 @@ static struct keydata { | |||
1422 | 1422 | ||
1423 | static unsigned int ip_cnt; | 1423 | static unsigned int ip_cnt; |
1424 | 1424 | ||
1425 | static void rekey_seq_generator(void *private_); | 1425 | static void rekey_seq_generator(struct work_struct *work); |
1426 | 1426 | ||
1427 | static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); | 1427 | static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator); |
1428 | 1428 | ||
1429 | /* | 1429 | /* |
1430 | * Lock avoidance: | 1430 | * Lock avoidance: |
@@ -1438,7 +1438,7 @@ static DECLARE_WORK(rekey_work, rekey_seq_generator, NULL); | |||
1438 | * happen, and even if that happens only a not perfectly compliant | 1438 | * happen, and even if that happens only a not perfectly compliant |
1439 | * ISN is generated, nothing fatal. | 1439 | * ISN is generated, nothing fatal. |
1440 | */ | 1440 | */ |
1441 | static void rekey_seq_generator(void *private_) | 1441 | static void rekey_seq_generator(struct work_struct *work) |
1442 | { | 1442 | { |
1443 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; | 1443 | struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)]; |
1444 | 1444 | ||
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 5ab32b38f45a..722dd3e74185 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
@@ -1516,9 +1516,9 @@ static void rc_start(struct tty_struct * tty) | |||
1516 | * do_rc_hangup() -> tty->hangup() -> rc_hangup() | 1516 | * do_rc_hangup() -> tty->hangup() -> rc_hangup() |
1517 | * | 1517 | * |
1518 | */ | 1518 | */ |
1519 | static void do_rc_hangup(void *private_) | 1519 | static void do_rc_hangup(struct work_struct *ugly_api) |
1520 | { | 1520 | { |
1521 | struct riscom_port *port = (struct riscom_port *) private_; | 1521 | struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue_hangup); |
1522 | struct tty_struct *tty; | 1522 | struct tty_struct *tty; |
1523 | 1523 | ||
1524 | tty = port->tty; | 1524 | tty = port->tty; |
@@ -1567,9 +1567,9 @@ static void rc_set_termios(struct tty_struct * tty, struct termios * old_termios | |||
1567 | } | 1567 | } |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | static void do_softint(void *private_) | 1570 | static void do_softint(struct work_struct *ugly_api) |
1571 | { | 1571 | { |
1572 | struct riscom_port *port = (struct riscom_port *) private_; | 1572 | struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue); |
1573 | struct tty_struct *tty; | 1573 | struct tty_struct *tty; |
1574 | 1574 | ||
1575 | if(!(tty = port->tty)) | 1575 | if(!(tty = port->tty)) |
@@ -1632,8 +1632,8 @@ static inline int rc_init_drivers(void) | |||
1632 | memset(rc_port, 0, sizeof(rc_port)); | 1632 | memset(rc_port, 0, sizeof(rc_port)); |
1633 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { | 1633 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { |
1634 | rc_port[i].magic = RISCOM8_MAGIC; | 1634 | rc_port[i].magic = RISCOM8_MAGIC; |
1635 | INIT_WORK(&rc_port[i].tqueue, do_softint, &rc_port[i]); | 1635 | INIT_WORK(&rc_port[i].tqueue, do_softint); |
1636 | INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup, &rc_port[i]); | 1636 | INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup); |
1637 | rc_port[i].close_delay = 50 * HZ/100; | 1637 | rc_port[i].close_delay = 50 * HZ/100; |
1638 | rc_port[i].closing_wait = 3000 * HZ/100; | 1638 | rc_port[i].closing_wait = 3000 * HZ/100; |
1639 | init_waitqueue_head(&rc_port[i].open_wait); | 1639 | init_waitqueue_head(&rc_port[i].open_wait); |
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 3af7f0958c5d..9ba13af234be 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
@@ -706,9 +706,9 @@ cd2401_rx_interrupt(int irq, void *dev_id) | |||
706 | * had to poll every port to see if that port needed servicing. | 706 | * had to poll every port to see if that port needed servicing. |
707 | */ | 707 | */ |
708 | static void | 708 | static void |
709 | do_softint(void *private_) | 709 | do_softint(struct work_struct *ugly_api) |
710 | { | 710 | { |
711 | struct cyclades_port *info = (struct cyclades_port *) private_; | 711 | struct cyclades_port *info = container_of(ugly_api, struct cyclades_port, tqueue); |
712 | struct tty_struct *tty; | 712 | struct tty_struct *tty; |
713 | 713 | ||
714 | tty = info->tty; | 714 | tty = info->tty; |
@@ -2273,7 +2273,7 @@ scrn[1] = '\0'; | |||
2273 | info->blocked_open = 0; | 2273 | info->blocked_open = 0; |
2274 | info->default_threshold = 0; | 2274 | info->default_threshold = 0; |
2275 | info->default_timeout = 0; | 2275 | info->default_timeout = 0; |
2276 | INIT_WORK(&info->tqueue, do_softint, info); | 2276 | INIT_WORK(&info->tqueue, do_softint); |
2277 | init_waitqueue_head(&info->open_wait); | 2277 | init_waitqueue_head(&info->open_wait); |
2278 | init_waitqueue_head(&info->close_wait); | 2278 | init_waitqueue_head(&info->close_wait); |
2279 | /* info->session */ | 2279 | /* info->session */ |
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index c084149153de..fc87070f1866 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c | |||
@@ -765,7 +765,7 @@ static void sonypi_setbluetoothpower(u8 state) | |||
765 | sonypi_device.bluetooth_power = state; | 765 | sonypi_device.bluetooth_power = state; |
766 | } | 766 | } |
767 | 767 | ||
768 | static void input_keyrelease(void *data) | 768 | static void input_keyrelease(struct work_struct *work) |
769 | { | 769 | { |
770 | struct sonypi_keypress kp; | 770 | struct sonypi_keypress kp; |
771 | 771 | ||
@@ -1412,7 +1412,7 @@ static int __devinit sonypi_probe(struct platform_device *dev) | |||
1412 | goto err_inpdev_unregister; | 1412 | goto err_inpdev_unregister; |
1413 | } | 1413 | } |
1414 | 1414 | ||
1415 | INIT_WORK(&sonypi_device.input_work, input_keyrelease, NULL); | 1415 | INIT_WORK(&sonypi_device.input_work, input_keyrelease); |
1416 | } | 1416 | } |
1417 | 1417 | ||
1418 | sonypi_enable(0); | 1418 | sonypi_enable(0); |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 7e1bd9562c2a..99137ab66b62 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
@@ -2261,9 +2261,10 @@ static void sx_start(struct tty_struct * tty) | |||
2261 | * do_sx_hangup() -> tty->hangup() -> sx_hangup() | 2261 | * do_sx_hangup() -> tty->hangup() -> sx_hangup() |
2262 | * | 2262 | * |
2263 | */ | 2263 | */ |
2264 | static void do_sx_hangup(void *private_) | 2264 | static void do_sx_hangup(struct work_struct *work) |
2265 | { | 2265 | { |
2266 | struct specialix_port *port = (struct specialix_port *) private_; | 2266 | struct specialix_port *port = |
2267 | container_of(work, struct specialix_port, tqueue_hangup); | ||
2267 | struct tty_struct *tty; | 2268 | struct tty_struct *tty; |
2268 | 2269 | ||
2269 | func_enter(); | 2270 | func_enter(); |
@@ -2336,9 +2337,10 @@ static void sx_set_termios(struct tty_struct * tty, struct termios * old_termios | |||
2336 | } | 2337 | } |
2337 | 2338 | ||
2338 | 2339 | ||
2339 | static void do_softint(void *private_) | 2340 | static void do_softint(struct work_struct *work) |
2340 | { | 2341 | { |
2341 | struct specialix_port *port = (struct specialix_port *) private_; | 2342 | struct specialix_port *port = |
2343 | container_of(work, struct specialix_port, tqueue); | ||
2342 | struct tty_struct *tty; | 2344 | struct tty_struct *tty; |
2343 | 2345 | ||
2344 | func_enter(); | 2346 | func_enter(); |
@@ -2411,8 +2413,8 @@ static int sx_init_drivers(void) | |||
2411 | memset(sx_port, 0, sizeof(sx_port)); | 2413 | memset(sx_port, 0, sizeof(sx_port)); |
2412 | for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { | 2414 | for (i = 0; i < SX_NPORT * SX_NBOARD; i++) { |
2413 | sx_port[i].magic = SPECIALIX_MAGIC; | 2415 | sx_port[i].magic = SPECIALIX_MAGIC; |
2414 | INIT_WORK(&sx_port[i].tqueue, do_softint, &sx_port[i]); | 2416 | INIT_WORK(&sx_port[i].tqueue, do_softint); |
2415 | INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup, &sx_port[i]); | 2417 | INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup); |
2416 | sx_port[i].close_delay = 50 * HZ/100; | 2418 | sx_port[i].close_delay = 50 * HZ/100; |
2417 | sx_port[i].closing_wait = 3000 * HZ/100; | 2419 | sx_port[i].closing_wait = 3000 * HZ/100; |
2418 | init_waitqueue_head(&sx_port[i].open_wait); | 2420 | init_waitqueue_head(&sx_port[i].open_wait); |
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 522e88e395cc..5e2de62bce70 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
@@ -500,7 +500,7 @@ static int stl_echatintr(stlbrd_t *brdp); | |||
500 | static int stl_echmcaintr(stlbrd_t *brdp); | 500 | static int stl_echmcaintr(stlbrd_t *brdp); |
501 | static int stl_echpciintr(stlbrd_t *brdp); | 501 | static int stl_echpciintr(stlbrd_t *brdp); |
502 | static int stl_echpci64intr(stlbrd_t *brdp); | 502 | static int stl_echpci64intr(stlbrd_t *brdp); |
503 | static void stl_offintr(void *private); | 503 | static void stl_offintr(struct work_struct *); |
504 | static stlbrd_t *stl_allocbrd(void); | 504 | static stlbrd_t *stl_allocbrd(void); |
505 | static stlport_t *stl_getport(int brdnr, int panelnr, int portnr); | 505 | static stlport_t *stl_getport(int brdnr, int panelnr, int portnr); |
506 | 506 | ||
@@ -2081,14 +2081,12 @@ static int stl_echpci64intr(stlbrd_t *brdp) | |||
2081 | /* | 2081 | /* |
2082 | * Service an off-level request for some channel. | 2082 | * Service an off-level request for some channel. |
2083 | */ | 2083 | */ |
2084 | static void stl_offintr(void *private) | 2084 | static void stl_offintr(struct work_struct *work) |
2085 | { | 2085 | { |
2086 | stlport_t *portp; | 2086 | stlport_t *portp = container_of(work, stlport_t, tqueue); |
2087 | struct tty_struct *tty; | 2087 | struct tty_struct *tty; |
2088 | unsigned int oldsigs; | 2088 | unsigned int oldsigs; |
2089 | 2089 | ||
2090 | portp = private; | ||
2091 | |||
2092 | #ifdef DEBUG | 2090 | #ifdef DEBUG |
2093 | printk("stl_offintr(portp=%x)\n", (int) portp); | 2091 | printk("stl_offintr(portp=%x)\n", (int) portp); |
2094 | #endif | 2092 | #endif |
@@ -2156,7 +2154,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp) | |||
2156 | portp->baud_base = STL_BAUDBASE; | 2154 | portp->baud_base = STL_BAUDBASE; |
2157 | portp->close_delay = STL_CLOSEDELAY; | 2155 | portp->close_delay = STL_CLOSEDELAY; |
2158 | portp->closing_wait = 30 * HZ; | 2156 | portp->closing_wait = 30 * HZ; |
2159 | INIT_WORK(&portp->tqueue, stl_offintr, portp); | 2157 | INIT_WORK(&portp->tqueue, stl_offintr); |
2160 | init_waitqueue_head(&portp->open_wait); | 2158 | init_waitqueue_head(&portp->open_wait); |
2161 | init_waitqueue_head(&portp->close_wait); | 2159 | init_waitqueue_head(&portp->close_wait); |
2162 | portp->stats.brd = portp->brdnr; | 2160 | portp->stats.brd = portp->brdnr; |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 06784adcc35c..147c30da81ea 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
@@ -802,7 +802,7 @@ static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, u | |||
802 | /* | 802 | /* |
803 | * Bottom half interrupt handlers | 803 | * Bottom half interrupt handlers |
804 | */ | 804 | */ |
805 | static void mgsl_bh_handler(void* Context); | 805 | static void mgsl_bh_handler(struct work_struct *work); |
806 | static void mgsl_bh_receive(struct mgsl_struct *info); | 806 | static void mgsl_bh_receive(struct mgsl_struct *info); |
807 | static void mgsl_bh_transmit(struct mgsl_struct *info); | 807 | static void mgsl_bh_transmit(struct mgsl_struct *info); |
808 | static void mgsl_bh_status(struct mgsl_struct *info); | 808 | static void mgsl_bh_status(struct mgsl_struct *info); |
@@ -1071,9 +1071,10 @@ static int mgsl_bh_action(struct mgsl_struct *info) | |||
1071 | /* | 1071 | /* |
1072 | * Perform bottom half processing of work items queued by ISR. | 1072 | * Perform bottom half processing of work items queued by ISR. |
1073 | */ | 1073 | */ |
1074 | static void mgsl_bh_handler(void* Context) | 1074 | static void mgsl_bh_handler(struct work_struct *work) |
1075 | { | 1075 | { |
1076 | struct mgsl_struct *info = (struct mgsl_struct*)Context; | 1076 | struct mgsl_struct *info = |
1077 | container_of(work, struct mgsl_struct, task); | ||
1077 | int action; | 1078 | int action; |
1078 | 1079 | ||
1079 | if (!info) | 1080 | if (!info) |
@@ -4337,7 +4338,7 @@ static struct mgsl_struct* mgsl_allocate_device(void) | |||
4337 | } else { | 4338 | } else { |
4338 | memset(info, 0, sizeof(struct mgsl_struct)); | 4339 | memset(info, 0, sizeof(struct mgsl_struct)); |
4339 | info->magic = MGSL_MAGIC; | 4340 | info->magic = MGSL_MAGIC; |
4340 | INIT_WORK(&info->task, mgsl_bh_handler, info); | 4341 | INIT_WORK(&info->task, mgsl_bh_handler); |
4341 | info->max_frame_size = 4096; | 4342 | info->max_frame_size = 4096; |
4342 | info->close_delay = 5*HZ/10; | 4343 | info->close_delay = 5*HZ/10; |
4343 | info->closing_wait = 30*HZ; | 4344 | info->closing_wait = 30*HZ; |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index d4334c79f8d4..07f34d43dc7f 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
@@ -485,7 +485,7 @@ static void enable_loopback(struct slgt_info *info); | |||
485 | static void set_rate(struct slgt_info *info, u32 data_rate); | 485 | static void set_rate(struct slgt_info *info, u32 data_rate); |
486 | 486 | ||
487 | static int bh_action(struct slgt_info *info); | 487 | static int bh_action(struct slgt_info *info); |
488 | static void bh_handler(void* context); | 488 | static void bh_handler(struct work_struct *work); |
489 | static void bh_transmit(struct slgt_info *info); | 489 | static void bh_transmit(struct slgt_info *info); |
490 | static void isr_serial(struct slgt_info *info); | 490 | static void isr_serial(struct slgt_info *info); |
491 | static void isr_rdma(struct slgt_info *info); | 491 | static void isr_rdma(struct slgt_info *info); |
@@ -1878,9 +1878,9 @@ static int bh_action(struct slgt_info *info) | |||
1878 | /* | 1878 | /* |
1879 | * perform bottom half processing | 1879 | * perform bottom half processing |
1880 | */ | 1880 | */ |
1881 | static void bh_handler(void* context) | 1881 | static void bh_handler(struct work_struct *work) |
1882 | { | 1882 | { |
1883 | struct slgt_info *info = context; | 1883 | struct slgt_info *info = container_of(work, struct slgt_info, task); |
1884 | int action; | 1884 | int action; |
1885 | 1885 | ||
1886 | if (!info) | 1886 | if (!info) |
@@ -3326,7 +3326,7 @@ static struct slgt_info *alloc_dev(int adapter_num, int port_num, struct pci_dev | |||
3326 | } else { | 3326 | } else { |
3327 | memset(info, 0, sizeof(struct slgt_info)); | 3327 | memset(info, 0, sizeof(struct slgt_info)); |
3328 | info->magic = MGSL_MAGIC; | 3328 | info->magic = MGSL_MAGIC; |
3329 | INIT_WORK(&info->task, bh_handler, info); | 3329 | INIT_WORK(&info->task, bh_handler); |
3330 | info->max_frame_size = 4096; | 3330 | info->max_frame_size = 4096; |
3331 | info->raw_rx_size = DMABUFSIZE; | 3331 | info->raw_rx_size = DMABUFSIZE; |
3332 | info->close_delay = 5*HZ/10; | 3332 | info->close_delay = 5*HZ/10; |
@@ -4799,6 +4799,6 @@ static void rx_timeout(unsigned long context) | |||
4799 | spin_lock_irqsave(&info->lock, flags); | 4799 | spin_lock_irqsave(&info->lock, flags); |
4800 | info->pending_bh |= BH_RECEIVE; | 4800 | info->pending_bh |= BH_RECEIVE; |
4801 | spin_unlock_irqrestore(&info->lock, flags); | 4801 | spin_unlock_irqrestore(&info->lock, flags); |
4802 | bh_handler(info); | 4802 | bh_handler(&info->task); |
4803 | } | 4803 | } |
4804 | 4804 | ||
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index 3e932b681371..13a57245cf2e 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
@@ -602,7 +602,7 @@ static void enable_loopback(SLMP_INFO *info, int enable); | |||
602 | static void set_rate(SLMP_INFO *info, u32 data_rate); | 602 | static void set_rate(SLMP_INFO *info, u32 data_rate); |
603 | 603 | ||
604 | static int bh_action(SLMP_INFO *info); | 604 | static int bh_action(SLMP_INFO *info); |
605 | static void bh_handler(void* Context); | 605 | static void bh_handler(struct work_struct *work); |
606 | static void bh_receive(SLMP_INFO *info); | 606 | static void bh_receive(SLMP_INFO *info); |
607 | static void bh_transmit(SLMP_INFO *info); | 607 | static void bh_transmit(SLMP_INFO *info); |
608 | static void bh_status(SLMP_INFO *info); | 608 | static void bh_status(SLMP_INFO *info); |
@@ -2063,9 +2063,9 @@ int bh_action(SLMP_INFO *info) | |||
2063 | 2063 | ||
2064 | /* Perform bottom half processing of work items queued by ISR. | 2064 | /* Perform bottom half processing of work items queued by ISR. |
2065 | */ | 2065 | */ |
2066 | void bh_handler(void* Context) | 2066 | void bh_handler(struct work_struct *work) |
2067 | { | 2067 | { |
2068 | SLMP_INFO *info = (SLMP_INFO*)Context; | 2068 | SLMP_INFO *info = container_of(work, SLMP_INFO, task); |
2069 | int action; | 2069 | int action; |
2070 | 2070 | ||
2071 | if (!info) | 2071 | if (!info) |
@@ -3805,7 +3805,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev) | |||
3805 | } else { | 3805 | } else { |
3806 | memset(info, 0, sizeof(SLMP_INFO)); | 3806 | memset(info, 0, sizeof(SLMP_INFO)); |
3807 | info->magic = MGSL_MAGIC; | 3807 | info->magic = MGSL_MAGIC; |
3808 | INIT_WORK(&info->task, bh_handler, info); | 3808 | INIT_WORK(&info->task, bh_handler); |
3809 | info->max_frame_size = 4096; | 3809 | info->max_frame_size = 4096; |
3810 | info->close_delay = 5*HZ/10; | 3810 | info->close_delay = 5*HZ/10; |
3811 | info->closing_wait = 30*HZ; | 3811 | info->closing_wait = 30*HZ; |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 5f49280779fb..c64f5bcff947 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -219,13 +219,13 @@ static struct sysrq_key_op sysrq_term_op = { | |||
219 | .enable_mask = SYSRQ_ENABLE_SIGNAL, | 219 | .enable_mask = SYSRQ_ENABLE_SIGNAL, |
220 | }; | 220 | }; |
221 | 221 | ||
222 | static void moom_callback(void *ignored) | 222 | static void moom_callback(struct work_struct *ignored) |
223 | { | 223 | { |
224 | out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], | 224 | out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], |
225 | GFP_KERNEL, 0); | 225 | GFP_KERNEL, 0); |
226 | } | 226 | } |
227 | 227 | ||
228 | static DECLARE_WORK(moom_work, moom_callback, NULL); | 228 | static DECLARE_WORK(moom_work, moom_callback); |
229 | 229 | ||
230 | static void sysrq_handle_moom(int key, struct tty_struct *tty) | 230 | static void sysrq_handle_moom(int key, struct tty_struct *tty) |
231 | { | 231 | { |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 6e1329d404d2..774fa861169a 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -325,9 +325,9 @@ static void user_reader_timeout(unsigned long ptr) | |||
325 | schedule_work(&chip->work); | 325 | schedule_work(&chip->work); |
326 | } | 326 | } |
327 | 327 | ||
328 | static void timeout_work(void *ptr) | 328 | static void timeout_work(struct work_struct *work) |
329 | { | 329 | { |
330 | struct tpm_chip *chip = ptr; | 330 | struct tpm_chip *chip = container_of(work, struct tpm_chip, work); |
331 | 331 | ||
332 | down(&chip->buffer_mutex); | 332 | down(&chip->buffer_mutex); |
333 | atomic_set(&chip->data_pending, 0); | 333 | atomic_set(&chip->data_pending, 0); |
@@ -1105,7 +1105,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend | |||
1105 | init_MUTEX(&chip->tpm_mutex); | 1105 | init_MUTEX(&chip->tpm_mutex); |
1106 | INIT_LIST_HEAD(&chip->list); | 1106 | INIT_LIST_HEAD(&chip->list); |
1107 | 1107 | ||
1108 | INIT_WORK(&chip->work, timeout_work, chip); | 1108 | INIT_WORK(&chip->work, timeout_work); |
1109 | 1109 | ||
1110 | init_timer(&chip->user_read_timer); | 1110 | init_timer(&chip->user_read_timer); |
1111 | chip->user_read_timer.function = user_reader_timeout; | 1111 | chip->user_read_timer.function = user_reader_timeout; |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 50dc49205a23..b3cfc8bc613c 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -1254,7 +1254,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); | |||
1254 | 1254 | ||
1255 | /** | 1255 | /** |
1256 | * do_tty_hangup - actual handler for hangup events | 1256 | * do_tty_hangup - actual handler for hangup events |
1257 | * @data: tty device | 1257 | * @work: tty device |
1258 | * | 1258 | * |
1259 | * This can be called by the "eventd" kernel thread. That is process | 1259 | * This can be called by the "eventd" kernel thread. That is process |
1260 | * synchronous but doesn't hold any locks, so we need to make sure we | 1260 | * synchronous but doesn't hold any locks, so we need to make sure we |
@@ -1274,9 +1274,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush); | |||
1274 | * tasklist_lock to walk task list for hangup event | 1274 | * tasklist_lock to walk task list for hangup event |
1275 | * | 1275 | * |
1276 | */ | 1276 | */ |
1277 | static void do_tty_hangup(void *data) | 1277 | static void do_tty_hangup(struct work_struct *work) |
1278 | { | 1278 | { |
1279 | struct tty_struct *tty = (struct tty_struct *) data; | 1279 | struct tty_struct *tty = |
1280 | container_of(work, struct tty_struct, hangup_work); | ||
1280 | struct file * cons_filp = NULL; | 1281 | struct file * cons_filp = NULL; |
1281 | struct file *filp, *f = NULL; | 1282 | struct file *filp, *f = NULL; |
1282 | struct task_struct *p; | 1283 | struct task_struct *p; |
@@ -1433,7 +1434,7 @@ void tty_vhangup(struct tty_struct * tty) | |||
1433 | 1434 | ||
1434 | printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); | 1435 | printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf)); |
1435 | #endif | 1436 | #endif |
1436 | do_tty_hangup((void *) tty); | 1437 | do_tty_hangup(&tty->hangup_work); |
1437 | } | 1438 | } |
1438 | EXPORT_SYMBOL(tty_vhangup); | 1439 | EXPORT_SYMBOL(tty_vhangup); |
1439 | 1440 | ||
@@ -3304,12 +3305,13 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3304 | * Nasty bug: do_SAK is being called in interrupt context. This can | 3305 | * Nasty bug: do_SAK is being called in interrupt context. This can |
3305 | * deadlock. We punt it up to process context. AKPM - 16Mar2001 | 3306 | * deadlock. We punt it up to process context. AKPM - 16Mar2001 |
3306 | */ | 3307 | */ |
3307 | static void __do_SAK(void *arg) | 3308 | static void __do_SAK(struct work_struct *work) |
3308 | { | 3309 | { |
3310 | struct tty_struct *tty = | ||
3311 | container_of(work, struct tty_struct, SAK_work); | ||
3309 | #ifdef TTY_SOFT_SAK | 3312 | #ifdef TTY_SOFT_SAK |
3310 | tty_hangup(tty); | 3313 | tty_hangup(tty); |
3311 | #else | 3314 | #else |
3312 | struct tty_struct *tty = arg; | ||
3313 | struct task_struct *g, *p; | 3315 | struct task_struct *g, *p; |
3314 | int session; | 3316 | int session; |
3315 | int i; | 3317 | int i; |
@@ -3388,7 +3390,7 @@ void do_SAK(struct tty_struct *tty) | |||
3388 | { | 3390 | { |
3389 | if (!tty) | 3391 | if (!tty) |
3390 | return; | 3392 | return; |
3391 | PREPARE_WORK(&tty->SAK_work, __do_SAK, tty); | 3393 | PREPARE_WORK(&tty->SAK_work, __do_SAK); |
3392 | schedule_work(&tty->SAK_work); | 3394 | schedule_work(&tty->SAK_work); |
3393 | } | 3395 | } |
3394 | 3396 | ||
@@ -3396,7 +3398,7 @@ EXPORT_SYMBOL(do_SAK); | |||
3396 | 3398 | ||
3397 | /** | 3399 | /** |
3398 | * flush_to_ldisc | 3400 | * flush_to_ldisc |
3399 | * @private_: tty structure passed from work queue. | 3401 | * @work: tty structure passed from work queue. |
3400 | * | 3402 | * |
3401 | * This routine is called out of the software interrupt to flush data | 3403 | * This routine is called out of the software interrupt to flush data |
3402 | * from the buffer chain to the line discipline. | 3404 | * from the buffer chain to the line discipline. |
@@ -3406,9 +3408,10 @@ EXPORT_SYMBOL(do_SAK); | |||
3406 | * receive_buf method is single threaded for each tty instance. | 3408 | * receive_buf method is single threaded for each tty instance. |
3407 | */ | 3409 | */ |
3408 | 3410 | ||
3409 | static void flush_to_ldisc(void *private_) | 3411 | static void flush_to_ldisc(struct work_struct *work) |
3410 | { | 3412 | { |
3411 | struct tty_struct *tty = (struct tty_struct *) private_; | 3413 | struct tty_struct *tty = |
3414 | container_of(work, struct tty_struct, buf.work.work); | ||
3412 | unsigned long flags; | 3415 | unsigned long flags; |
3413 | struct tty_ldisc *disc; | 3416 | struct tty_ldisc *disc; |
3414 | struct tty_buffer *tbuf, *head; | 3417 | struct tty_buffer *tbuf, *head; |
@@ -3553,7 +3556,7 @@ void tty_flip_buffer_push(struct tty_struct *tty) | |||
3553 | spin_unlock_irqrestore(&tty->buf.lock, flags); | 3556 | spin_unlock_irqrestore(&tty->buf.lock, flags); |
3554 | 3557 | ||
3555 | if (tty->low_latency) | 3558 | if (tty->low_latency) |
3556 | flush_to_ldisc((void *) tty); | 3559 | flush_to_ldisc(&tty->buf.work.work); |
3557 | else | 3560 | else |
3558 | schedule_delayed_work(&tty->buf.work, 1); | 3561 | schedule_delayed_work(&tty->buf.work, 1); |
3559 | } | 3562 | } |
@@ -3580,17 +3583,17 @@ static void initialize_tty_struct(struct tty_struct *tty) | |||
3580 | tty->overrun_time = jiffies; | 3583 | tty->overrun_time = jiffies; |
3581 | tty->buf.head = tty->buf.tail = NULL; | 3584 | tty->buf.head = tty->buf.tail = NULL; |
3582 | tty_buffer_init(tty); | 3585 | tty_buffer_init(tty); |
3583 | INIT_WORK(&tty->buf.work, flush_to_ldisc, tty); | 3586 | INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc); |
3584 | init_MUTEX(&tty->buf.pty_sem); | 3587 | init_MUTEX(&tty->buf.pty_sem); |
3585 | mutex_init(&tty->termios_mutex); | 3588 | mutex_init(&tty->termios_mutex); |
3586 | init_waitqueue_head(&tty->write_wait); | 3589 | init_waitqueue_head(&tty->write_wait); |
3587 | init_waitqueue_head(&tty->read_wait); | 3590 | init_waitqueue_head(&tty->read_wait); |
3588 | INIT_WORK(&tty->hangup_work, do_tty_hangup, tty); | 3591 | INIT_WORK(&tty->hangup_work, do_tty_hangup); |
3589 | mutex_init(&tty->atomic_read_lock); | 3592 | mutex_init(&tty->atomic_read_lock); |
3590 | mutex_init(&tty->atomic_write_lock); | 3593 | mutex_init(&tty->atomic_write_lock); |
3591 | spin_lock_init(&tty->read_lock); | 3594 | spin_lock_init(&tty->read_lock); |
3592 | INIT_LIST_HEAD(&tty->tty_files); | 3595 | INIT_LIST_HEAD(&tty->tty_files); |
3593 | INIT_WORK(&tty->SAK_work, NULL, NULL); | 3596 | INIT_WORK(&tty->SAK_work, NULL); |
3594 | } | 3597 | } |
3595 | 3598 | ||
3596 | /* | 3599 | /* |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 87587b4385ab..75ff0286e1ad 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -155,7 +155,7 @@ static void con_flush_chars(struct tty_struct *tty); | |||
155 | static void set_vesa_blanking(char __user *p); | 155 | static void set_vesa_blanking(char __user *p); |
156 | static void set_cursor(struct vc_data *vc); | 156 | static void set_cursor(struct vc_data *vc); |
157 | static void hide_cursor(struct vc_data *vc); | 157 | static void hide_cursor(struct vc_data *vc); |
158 | static void console_callback(void *ignored); | 158 | static void console_callback(struct work_struct *ignored); |
159 | static void blank_screen_t(unsigned long dummy); | 159 | static void blank_screen_t(unsigned long dummy); |
160 | static void set_palette(struct vc_data *vc); | 160 | static void set_palette(struct vc_data *vc); |
161 | 161 | ||
@@ -174,7 +174,7 @@ static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */ | |||
174 | static int blankinterval = 10*60*HZ; | 174 | static int blankinterval = 10*60*HZ; |
175 | static int vesa_off_interval; | 175 | static int vesa_off_interval; |
176 | 176 | ||
177 | static DECLARE_WORK(console_work, console_callback, NULL); | 177 | static DECLARE_WORK(console_work, console_callback); |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * fg_console is the current virtual console, | 180 | * fg_console is the current virtual console, |
@@ -2154,7 +2154,7 @@ out: | |||
2154 | * with other console code and prevention of re-entrancy is | 2154 | * with other console code and prevention of re-entrancy is |
2155 | * ensured with console_sem. | 2155 | * ensured with console_sem. |
2156 | */ | 2156 | */ |
2157 | static void console_callback(void *ignored) | 2157 | static void console_callback(struct work_struct *ignored) |
2158 | { | 2158 | { |
2159 | acquire_console_sem(); | 2159 | acquire_console_sem(); |
2160 | 2160 | ||
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c index 05f8ce2cfb4a..b418b16e910e 100644 --- a/drivers/connector/cn_queue.c +++ b/drivers/connector/cn_queue.c | |||
@@ -31,9 +31,11 @@ | |||
31 | #include <linux/connector.h> | 31 | #include <linux/connector.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | 33 | ||
34 | void cn_queue_wrapper(void *data) | 34 | void cn_queue_wrapper(struct work_struct *work) |
35 | { | 35 | { |
36 | struct cn_callback_data *d = data; | 36 | struct cn_callback_entry *cbq = |
37 | container_of(work, struct cn_callback_entry, work.work); | ||
38 | struct cn_callback_data *d = &cbq->data; | ||
37 | 39 | ||
38 | d->callback(d->callback_priv); | 40 | d->callback(d->callback_priv); |
39 | 41 | ||
@@ -57,7 +59,7 @@ static struct cn_callback_entry *cn_queue_alloc_callback_entry(char *name, struc | |||
57 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); | 59 | memcpy(&cbq->id.id, id, sizeof(struct cb_id)); |
58 | cbq->data.callback = callback; | 60 | cbq->data.callback = callback; |
59 | 61 | ||
60 | INIT_WORK(&cbq->work, &cn_queue_wrapper, &cbq->data); | 62 | INIT_DELAYED_WORK(&cbq->work, &cn_queue_wrapper); |
61 | return cbq; | 63 | return cbq; |
62 | } | 64 | } |
63 | 65 | ||
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index b49bacfd8de8..5e7cd45d10ee 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c | |||
@@ -135,40 +135,39 @@ static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), v | |||
135 | spin_lock_bh(&dev->cbdev->queue_lock); | 135 | spin_lock_bh(&dev->cbdev->queue_lock); |
136 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { | 136 | list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { |
137 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { | 137 | if (cn_cb_equal(&__cbq->id.id, &msg->id)) { |
138 | if (likely(!test_bit(0, &__cbq->work.pending) && | 138 | if (likely(!test_bit(WORK_STRUCT_PENDING, |
139 | &__cbq->work.work.management) && | ||
139 | __cbq->data.ddata == NULL)) { | 140 | __cbq->data.ddata == NULL)) { |
140 | __cbq->data.callback_priv = msg; | 141 | __cbq->data.callback_priv = msg; |
141 | 142 | ||
142 | __cbq->data.ddata = data; | 143 | __cbq->data.ddata = data; |
143 | __cbq->data.destruct_data = destruct_data; | 144 | __cbq->data.destruct_data = destruct_data; |
144 | 145 | ||
145 | if (queue_work(dev->cbdev->cn_queue, | 146 | if (queue_delayed_work( |
146 | &__cbq->work)) | 147 | dev->cbdev->cn_queue, |
148 | &__cbq->work, 0)) | ||
147 | err = 0; | 149 | err = 0; |
148 | } else { | 150 | } else { |
149 | struct work_struct *w; | ||
150 | struct cn_callback_data *d; | 151 | struct cn_callback_data *d; |
151 | 152 | ||
152 | w = kzalloc(sizeof(*w) + sizeof(*d), GFP_ATOMIC); | 153 | __cbq = kzalloc(sizeof(*__cbq), GFP_ATOMIC); |
153 | if (w) { | 154 | if (__cbq) { |
154 | d = (struct cn_callback_data *)(w+1); | 155 | d = &__cbq->data; |
155 | |||
156 | d->callback_priv = msg; | 156 | d->callback_priv = msg; |
157 | d->callback = __cbq->data.callback; | 157 | d->callback = __cbq->data.callback; |
158 | d->ddata = data; | 158 | d->ddata = data; |
159 | d->destruct_data = destruct_data; | 159 | d->destruct_data = destruct_data; |
160 | d->free = w; | 160 | d->free = __cbq; |
161 | 161 | ||
162 | INIT_LIST_HEAD(&w->entry); | 162 | INIT_DELAYED_WORK(&__cbq->work, |
163 | w->pending = 0; | 163 | &cn_queue_wrapper); |
164 | w->func = &cn_queue_wrapper; | ||
165 | w->data = d; | ||
166 | init_timer(&w->timer); | ||
167 | 164 | ||
168 | if (queue_work(dev->cbdev->cn_queue, w)) | 165 | if (queue_delayed_work( |
166 | dev->cbdev->cn_queue, | ||
167 | &__cbq->work, 0)) | ||
169 | err = 0; | 168 | err = 0; |
170 | else { | 169 | else { |
171 | kfree(w); | 170 | kfree(__cbq); |
172 | err = -EINVAL; | 171 | err = -EINVAL; |
173 | } | 172 | } |
174 | } else | 173 | } else |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index dd0c2623e27b..7a7c6e6dfe4f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
42 | 42 | ||
43 | /* internal prototypes */ | 43 | /* internal prototypes */ |
44 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 44 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
45 | static void handle_update(void *data); | 45 | static void handle_update(struct work_struct *work); |
46 | 46 | ||
47 | /** | 47 | /** |
48 | * Two notifier lists: the "policy" list is involved in the | 48 | * Two notifier lists: the "policy" list is involved in the |
@@ -665,7 +665,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
665 | mutex_init(&policy->lock); | 665 | mutex_init(&policy->lock); |
666 | mutex_lock(&policy->lock); | 666 | mutex_lock(&policy->lock); |
667 | init_completion(&policy->kobj_unregister); | 667 | init_completion(&policy->kobj_unregister); |
668 | INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); | 668 | INIT_WORK(&policy->update, handle_update); |
669 | 669 | ||
670 | /* call driver. From then on the cpufreq must be able | 670 | /* call driver. From then on the cpufreq must be able |
671 | * to accept all calls to ->verify and ->setpolicy for this CPU | 671 | * to accept all calls to ->verify and ->setpolicy for this CPU |
@@ -895,9 +895,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
895 | } | 895 | } |
896 | 896 | ||
897 | 897 | ||
898 | static void handle_update(void *data) | 898 | static void handle_update(struct work_struct *work) |
899 | { | 899 | { |
900 | unsigned int cpu = (unsigned int)(long)data; | 900 | struct cpufreq_policy *policy = |
901 | container_of(work, struct cpufreq_policy, update); | ||
902 | unsigned int cpu = policy->cpu; | ||
901 | dprintk("handle_update for cpu %u called\n", cpu); | 903 | dprintk("handle_update for cpu %u called\n", cpu); |
902 | cpufreq_update_policy(cpu); | 904 | cpufreq_update_policy(cpu); |
903 | } | 905 | } |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c4c578defabf..5ef5ede5b884 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -59,7 +59,7 @@ static unsigned int def_sampling_rate; | |||
59 | #define MAX_SAMPLING_DOWN_FACTOR (10) | 59 | #define MAX_SAMPLING_DOWN_FACTOR (10) |
60 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) | 60 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) |
61 | 61 | ||
62 | static void do_dbs_timer(void *data); | 62 | static void do_dbs_timer(struct work_struct *work); |
63 | 63 | ||
64 | struct cpu_dbs_info_s { | 64 | struct cpu_dbs_info_s { |
65 | struct cpufreq_policy *cur_policy; | 65 | struct cpufreq_policy *cur_policy; |
@@ -82,7 +82,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
82 | * is recursive for the same process. -Venki | 82 | * is recursive for the same process. -Venki |
83 | */ | 83 | */ |
84 | static DEFINE_MUTEX (dbs_mutex); | 84 | static DEFINE_MUTEX (dbs_mutex); |
85 | static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); | 85 | static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer); |
86 | 86 | ||
87 | struct dbs_tuners { | 87 | struct dbs_tuners { |
88 | unsigned int sampling_rate; | 88 | unsigned int sampling_rate; |
@@ -420,7 +420,7 @@ static void dbs_check_cpu(int cpu) | |||
420 | } | 420 | } |
421 | } | 421 | } |
422 | 422 | ||
423 | static void do_dbs_timer(void *data) | 423 | static void do_dbs_timer(struct work_struct *work) |
424 | { | 424 | { |
425 | int i; | 425 | int i; |
426 | lock_cpu_hotplug(); | 426 | lock_cpu_hotplug(); |
@@ -435,7 +435,6 @@ static void do_dbs_timer(void *data) | |||
435 | 435 | ||
436 | static inline void dbs_timer_init(void) | 436 | static inline void dbs_timer_init(void) |
437 | { | 437 | { |
438 | INIT_WORK(&dbs_work, do_dbs_timer, NULL); | ||
439 | schedule_delayed_work(&dbs_work, | 438 | schedule_delayed_work(&dbs_work, |
440 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 439 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
441 | return; | 440 | return; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index bf8aa45d4f01..e1cc5113c2ae 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -47,13 +47,17 @@ static unsigned int def_sampling_rate; | |||
47 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) | 47 | #define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000) |
48 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) | 48 | #define TRANSITION_LATENCY_LIMIT (10 * 1000) |
49 | 49 | ||
50 | static void do_dbs_timer(void *data); | 50 | static void do_dbs_timer(struct work_struct *work); |
51 | |||
52 | /* Sampling types */ | ||
53 | enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
51 | 54 | ||
52 | struct cpu_dbs_info_s { | 55 | struct cpu_dbs_info_s { |
53 | cputime64_t prev_cpu_idle; | 56 | cputime64_t prev_cpu_idle; |
54 | cputime64_t prev_cpu_wall; | 57 | cputime64_t prev_cpu_wall; |
55 | struct cpufreq_policy *cur_policy; | 58 | struct cpufreq_policy *cur_policy; |
56 | struct work_struct work; | 59 | struct delayed_work work; |
60 | enum dbs_sample sample_type; | ||
57 | unsigned int enable; | 61 | unsigned int enable; |
58 | struct cpufreq_frequency_table *freq_table; | 62 | struct cpufreq_frequency_table *freq_table; |
59 | unsigned int freq_lo; | 63 | unsigned int freq_lo; |
@@ -407,30 +411,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
407 | } | 411 | } |
408 | } | 412 | } |
409 | 413 | ||
410 | /* Sampling types */ | 414 | static void do_dbs_timer(struct work_struct *work) |
411 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | ||
412 | |||
413 | static void do_dbs_timer(void *data) | ||
414 | { | 415 | { |
415 | unsigned int cpu = smp_processor_id(); | 416 | unsigned int cpu = smp_processor_id(); |
416 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 417 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); |
418 | enum dbs_sample sample_type = dbs_info->sample_type; | ||
417 | /* We want all CPUs to do sampling nearly on same jiffy */ | 419 | /* We want all CPUs to do sampling nearly on same jiffy */ |
418 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 420 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
421 | |||
422 | /* Permit rescheduling of this work item */ | ||
423 | work_release(work); | ||
424 | |||
419 | delay -= jiffies % delay; | 425 | delay -= jiffies % delay; |
420 | 426 | ||
421 | if (!dbs_info->enable) | 427 | if (!dbs_info->enable) |
422 | return; | 428 | return; |
423 | /* Common NORMAL_SAMPLE setup */ | 429 | /* Common NORMAL_SAMPLE setup */ |
424 | INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE); | 430 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
425 | if (!dbs_tuners_ins.powersave_bias || | 431 | if (!dbs_tuners_ins.powersave_bias || |
426 | (unsigned long) data == DBS_NORMAL_SAMPLE) { | 432 | sample_type == DBS_NORMAL_SAMPLE) { |
427 | lock_cpu_hotplug(); | 433 | lock_cpu_hotplug(); |
428 | dbs_check_cpu(dbs_info); | 434 | dbs_check_cpu(dbs_info); |
429 | unlock_cpu_hotplug(); | 435 | unlock_cpu_hotplug(); |
430 | if (dbs_info->freq_lo) { | 436 | if (dbs_info->freq_lo) { |
431 | /* Setup timer for SUB_SAMPLE */ | 437 | /* Setup timer for SUB_SAMPLE */ |
432 | INIT_WORK(&dbs_info->work, do_dbs_timer, | 438 | dbs_info->sample_type = DBS_SUB_SAMPLE; |
433 | (void *)DBS_SUB_SAMPLE); | ||
434 | delay = dbs_info->freq_hi_jiffies; | 439 | delay = dbs_info->freq_hi_jiffies; |
435 | } | 440 | } |
436 | } else { | 441 | } else { |
@@ -449,7 +454,8 @@ static inline void dbs_timer_init(unsigned int cpu) | |||
449 | delay -= jiffies % delay; | 454 | delay -= jiffies % delay; |
450 | 455 | ||
451 | ondemand_powersave_bias_init(); | 456 | ondemand_powersave_bias_init(); |
452 | INIT_WORK(&dbs_info->work, do_dbs_timer, NULL); | 457 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); |
458 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | ||
453 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 459 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
454 | } | 460 | } |
455 | 461 | ||
diff --git a/drivers/i2c/chips/ds1374.c b/drivers/i2c/chips/ds1374.c index 4630f1969a09..15edf40828b4 100644 --- a/drivers/i2c/chips/ds1374.c +++ b/drivers/i2c/chips/ds1374.c | |||
@@ -140,12 +140,14 @@ ulong ds1374_get_rtc_time(void) | |||
140 | return t1; | 140 | return t1; |
141 | } | 141 | } |
142 | 142 | ||
143 | static void ds1374_set_work(void *arg) | 143 | static ulong new_time; |
144 | |||
145 | static void ds1374_set_work(struct work_struct *work) | ||
144 | { | 146 | { |
145 | ulong t1, t2; | 147 | ulong t1, t2; |
146 | int limit = 10; /* arbitrary retry limit */ | 148 | int limit = 10; /* arbitrary retry limit */ |
147 | 149 | ||
148 | t1 = *(ulong *) arg; | 150 | t1 = new_time; |
149 | 151 | ||
150 | mutex_lock(&ds1374_mutex); | 152 | mutex_lock(&ds1374_mutex); |
151 | 153 | ||
@@ -167,11 +169,9 @@ static void ds1374_set_work(void *arg) | |||
167 | "can't confirm time set from rtc chip\n"); | 169 | "can't confirm time set from rtc chip\n"); |
168 | } | 170 | } |
169 | 171 | ||
170 | static ulong new_time; | ||
171 | |||
172 | static struct workqueue_struct *ds1374_workqueue; | 172 | static struct workqueue_struct *ds1374_workqueue; |
173 | 173 | ||
174 | static DECLARE_WORK(ds1374_work, ds1374_set_work, &new_time); | 174 | static DECLARE_WORK(ds1374_work, ds1374_set_work); |
175 | 175 | ||
176 | int ds1374_set_rtc_time(ulong nowtime) | 176 | int ds1374_set_rtc_time(ulong nowtime) |
177 | { | 177 | { |
@@ -180,7 +180,7 @@ int ds1374_set_rtc_time(ulong nowtime) | |||
180 | if (in_interrupt()) | 180 | if (in_interrupt()) |
181 | queue_work(ds1374_workqueue, &ds1374_work); | 181 | queue_work(ds1374_workqueue, &ds1374_work); |
182 | else | 182 | else |
183 | ds1374_set_work(&new_time); | 183 | ds1374_set_work(NULL); |
184 | 184 | ||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
diff --git a/drivers/i2c/chips/m41t00.c b/drivers/i2c/chips/m41t00.c index 2dd0a34d9472..420377c86422 100644 --- a/drivers/i2c/chips/m41t00.c +++ b/drivers/i2c/chips/m41t00.c | |||
@@ -215,8 +215,15 @@ m41t00_set(void *arg) | |||
215 | } | 215 | } |
216 | 216 | ||
217 | static ulong new_time; | 217 | static ulong new_time; |
218 | /* well, isn't this API just _lovely_? */ | ||
219 | static void | ||
220 | m41t00_barf(struct work_struct *unusable) | ||
221 | { | ||
222 | m41t00_set(&new_time); | ||
223 | } | ||
224 | |||
218 | static struct workqueue_struct *m41t00_wq; | 225 | static struct workqueue_struct *m41t00_wq; |
219 | static DECLARE_WORK(m41t00_work, m41t00_set, &new_time); | 226 | static DECLARE_WORK(m41t00_work, m41t00_barf); |
220 | 227 | ||
221 | int | 228 | int |
222 | m41t00_set_rtc_time(ulong nowtime) | 229 | m41t00_set_rtc_time(ulong nowtime) |
diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c index d90a3a1898c0..8f4378a1631c 100644 --- a/drivers/ieee1394/hosts.c +++ b/drivers/ieee1394/hosts.c | |||
@@ -31,9 +31,10 @@ | |||
31 | #include "config_roms.h" | 31 | #include "config_roms.h" |
32 | 32 | ||
33 | 33 | ||
34 | static void delayed_reset_bus(void * __reset_info) | 34 | static void delayed_reset_bus(struct work_struct *work) |
35 | { | 35 | { |
36 | struct hpsb_host *host = (struct hpsb_host*)__reset_info; | 36 | struct hpsb_host *host = |
37 | container_of(work, struct hpsb_host, delayed_reset.work); | ||
37 | int generation = host->csr.generation + 1; | 38 | int generation = host->csr.generation + 1; |
38 | 39 | ||
39 | /* The generation field rolls over to 2 rather than 0 per IEEE | 40 | /* The generation field rolls over to 2 rather than 0 per IEEE |
@@ -145,7 +146,7 @@ struct hpsb_host *hpsb_alloc_host(struct hpsb_host_driver *drv, size_t extra, | |||
145 | 146 | ||
146 | atomic_set(&h->generation, 0); | 147 | atomic_set(&h->generation, 0); |
147 | 148 | ||
148 | INIT_WORK(&h->delayed_reset, delayed_reset_bus, h); | 149 | INIT_DELAYED_WORK(&h->delayed_reset, delayed_reset_bus); |
149 | 150 | ||
150 | init_timer(&h->timeout); | 151 | init_timer(&h->timeout); |
151 | h->timeout.data = (unsigned long) h; | 152 | h->timeout.data = (unsigned long) h; |
@@ -234,7 +235,7 @@ int hpsb_update_config_rom_image(struct hpsb_host *host) | |||
234 | * Config ROM in the near future. */ | 235 | * Config ROM in the near future. */ |
235 | reset_delay = HZ; | 236 | reset_delay = HZ; |
236 | 237 | ||
237 | PREPARE_WORK(&host->delayed_reset, delayed_reset_bus, host); | 238 | PREPARE_DELAYED_WORK(&host->delayed_reset, delayed_reset_bus); |
238 | schedule_delayed_work(&host->delayed_reset, reset_delay); | 239 | schedule_delayed_work(&host->delayed_reset, reset_delay); |
239 | 240 | ||
240 | return 0; | 241 | return 0; |
diff --git a/drivers/ieee1394/hosts.h b/drivers/ieee1394/hosts.h index bc6dbfadb891..d553e38c9543 100644 --- a/drivers/ieee1394/hosts.h +++ b/drivers/ieee1394/hosts.h | |||
@@ -62,7 +62,7 @@ struct hpsb_host { | |||
62 | struct class_device class_dev; | 62 | struct class_device class_dev; |
63 | 63 | ||
64 | int update_config_rom; | 64 | int update_config_rom; |
65 | struct work_struct delayed_reset; | 65 | struct delayed_work delayed_reset; |
66 | unsigned int config_roms; | 66 | unsigned int config_roms; |
67 | 67 | ||
68 | struct list_head addr_space; | 68 | struct list_head addr_space; |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 6986ac188281..cd156d4e779e 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -493,20 +493,25 @@ static void sbp2util_notify_fetch_agent(struct scsi_id_instance_data *scsi_id, | |||
493 | scsi_unblock_requests(scsi_id->scsi_host); | 493 | scsi_unblock_requests(scsi_id->scsi_host); |
494 | } | 494 | } |
495 | 495 | ||
496 | static void sbp2util_write_orb_pointer(void *p) | 496 | static void sbp2util_write_orb_pointer(struct work_struct *work) |
497 | { | 497 | { |
498 | struct scsi_id_instance_data *scsi_id = | ||
499 | container_of(work, struct scsi_id_instance_data, | ||
500 | protocol_work.work); | ||
498 | quadlet_t data[2]; | 501 | quadlet_t data[2]; |
499 | 502 | ||
500 | data[0] = ORB_SET_NODE_ID( | 503 | data[0] = ORB_SET_NODE_ID(scsi_id->hi->host->node_id); |
501 | ((struct scsi_id_instance_data *)p)->hi->host->node_id); | 504 | data[1] = scsi_id->last_orb_dma; |
502 | data[1] = ((struct scsi_id_instance_data *)p)->last_orb_dma; | ||
503 | sbp2util_cpu_to_be32_buffer(data, 8); | 505 | sbp2util_cpu_to_be32_buffer(data, 8); |
504 | sbp2util_notify_fetch_agent(p, SBP2_ORB_POINTER_OFFSET, data, 8); | 506 | sbp2util_notify_fetch_agent(scsi_id, SBP2_ORB_POINTER_OFFSET, data, 8); |
505 | } | 507 | } |
506 | 508 | ||
507 | static void sbp2util_write_doorbell(void *p) | 509 | static void sbp2util_write_doorbell(struct work_struct *work) |
508 | { | 510 | { |
509 | sbp2util_notify_fetch_agent(p, SBP2_DOORBELL_OFFSET, NULL, 4); | 511 | struct scsi_id_instance_data *scsi_id = |
512 | container_of(work, struct scsi_id_instance_data, | ||
513 | protocol_work.work); | ||
514 | sbp2util_notify_fetch_agent(scsi_id, SBP2_DOORBELL_OFFSET, NULL, 4); | ||
510 | } | 515 | } |
511 | 516 | ||
512 | /* | 517 | /* |
@@ -843,7 +848,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud | |||
843 | INIT_LIST_HEAD(&scsi_id->scsi_list); | 848 | INIT_LIST_HEAD(&scsi_id->scsi_list); |
844 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); | 849 | spin_lock_init(&scsi_id->sbp2_command_orb_lock); |
845 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); | 850 | atomic_set(&scsi_id->state, SBP2LU_STATE_RUNNING); |
846 | INIT_WORK(&scsi_id->protocol_work, NULL, NULL); | 851 | INIT_DELAYED_WORK(&scsi_id->protocol_work, NULL); |
847 | 852 | ||
848 | ud->device.driver_data = scsi_id; | 853 | ud->device.driver_data = scsi_id; |
849 | 854 | ||
@@ -2047,11 +2052,10 @@ static void sbp2_link_orb_command(struct scsi_id_instance_data *scsi_id, | |||
2047 | * We do not accept new commands until the job is over. | 2052 | * We do not accept new commands until the job is over. |
2048 | */ | 2053 | */ |
2049 | scsi_block_requests(scsi_id->scsi_host); | 2054 | scsi_block_requests(scsi_id->scsi_host); |
2050 | PREPARE_WORK(&scsi_id->protocol_work, | 2055 | PREPARE_DELAYED_WORK(&scsi_id->protocol_work, |
2051 | last_orb ? sbp2util_write_doorbell: | 2056 | last_orb ? sbp2util_write_doorbell: |
2052 | sbp2util_write_orb_pointer, | 2057 | sbp2util_write_orb_pointer); |
2053 | scsi_id); | 2058 | schedule_delayed_work(&scsi_id->protocol_work, 0); |
2054 | schedule_work(&scsi_id->protocol_work); | ||
2055 | } | 2059 | } |
2056 | } | 2060 | } |
2057 | 2061 | ||
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index abbe48e646c3..1b16d6b9cf11 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -348,7 +348,7 @@ struct scsi_id_instance_data { | |||
348 | unsigned workarounds; | 348 | unsigned workarounds; |
349 | 349 | ||
350 | atomic_t state; | 350 | atomic_t state; |
351 | struct work_struct protocol_work; | 351 | struct delayed_work protocol_work; |
352 | }; | 352 | }; |
353 | 353 | ||
354 | /* For use in scsi_id_instance_data.state */ | 354 | /* For use in scsi_id_instance_data.state */ |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 7767a11b6890..af939796750d 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -55,11 +55,11 @@ struct addr_req { | |||
55 | int status; | 55 | int status; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static void process_req(void *data); | 58 | static void process_req(struct work_struct *work); |
59 | 59 | ||
60 | static DEFINE_MUTEX(lock); | 60 | static DEFINE_MUTEX(lock); |
61 | static LIST_HEAD(req_list); | 61 | static LIST_HEAD(req_list); |
62 | static DECLARE_WORK(work, process_req, NULL); | 62 | static DECLARE_DELAYED_WORK(work, process_req); |
63 | static struct workqueue_struct *addr_wq; | 63 | static struct workqueue_struct *addr_wq; |
64 | 64 | ||
65 | void rdma_addr_register_client(struct rdma_addr_client *client) | 65 | void rdma_addr_register_client(struct rdma_addr_client *client) |
@@ -215,7 +215,7 @@ out: | |||
215 | return ret; | 215 | return ret; |
216 | } | 216 | } |
217 | 217 | ||
218 | static void process_req(void *data) | 218 | static void process_req(struct work_struct *work) |
219 | { | 219 | { |
220 | struct addr_req *req, *temp_req; | 220 | struct addr_req *req, *temp_req; |
221 | struct sockaddr_in *src_in, *dst_in; | 221 | struct sockaddr_in *src_in, *dst_in; |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 20e9f64e67a6..98272fbbfb31 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -285,9 +285,10 @@ err: | |||
285 | kfree(tprops); | 285 | kfree(tprops); |
286 | } | 286 | } |
287 | 287 | ||
288 | static void ib_cache_task(void *work_ptr) | 288 | static void ib_cache_task(struct work_struct *_work) |
289 | { | 289 | { |
290 | struct ib_update_work *work = work_ptr; | 290 | struct ib_update_work *work = |
291 | container_of(_work, struct ib_update_work, work); | ||
291 | 292 | ||
292 | ib_cache_update(work->device, work->port_num); | 293 | ib_cache_update(work->device, work->port_num); |
293 | kfree(work); | 294 | kfree(work); |
@@ -306,7 +307,7 @@ static void ib_cache_event(struct ib_event_handler *handler, | |||
306 | event->event == IB_EVENT_CLIENT_REREGISTER) { | 307 | event->event == IB_EVENT_CLIENT_REREGISTER) { |
307 | work = kmalloc(sizeof *work, GFP_ATOMIC); | 308 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
308 | if (work) { | 309 | if (work) { |
309 | INIT_WORK(&work->work, ib_cache_task, work); | 310 | INIT_WORK(&work->work, ib_cache_task); |
310 | work->device = event->device; | 311 | work->device = event->device; |
311 | work->port_num = event->element.port_num; | 312 | work->port_num = event->element.port_num; |
312 | schedule_work(&work->work); | 313 | schedule_work(&work->work); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index e5dc4530808a..79c937bf6962 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -101,7 +101,7 @@ struct cm_av { | |||
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct cm_work { | 103 | struct cm_work { |
104 | struct work_struct work; | 104 | struct delayed_work work; |
105 | struct list_head list; | 105 | struct list_head list; |
106 | struct cm_port *port; | 106 | struct cm_port *port; |
107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ | 107 | struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */ |
@@ -161,7 +161,7 @@ struct cm_id_private { | |||
161 | atomic_t work_count; | 161 | atomic_t work_count; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static void cm_work_handler(void *data); | 164 | static void cm_work_handler(struct work_struct *work); |
165 | 165 | ||
166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 166 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
167 | { | 167 | { |
@@ -668,8 +668,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) | |||
668 | return ERR_PTR(-ENOMEM); | 668 | return ERR_PTR(-ENOMEM); |
669 | 669 | ||
670 | timewait_info->work.local_id = local_id; | 670 | timewait_info->work.local_id = local_id; |
671 | INIT_WORK(&timewait_info->work.work, cm_work_handler, | 671 | INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler); |
672 | &timewait_info->work); | ||
673 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; | 672 | timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT; |
674 | return timewait_info; | 673 | return timewait_info; |
675 | } | 674 | } |
@@ -2995,9 +2994,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent, | |||
2995 | } | 2994 | } |
2996 | } | 2995 | } |
2997 | 2996 | ||
2998 | static void cm_work_handler(void *data) | 2997 | static void cm_work_handler(struct work_struct *_work) |
2999 | { | 2998 | { |
3000 | struct cm_work *work = data; | 2999 | struct cm_work *work = container_of(_work, struct cm_work, work.work); |
3001 | int ret; | 3000 | int ret; |
3002 | 3001 | ||
3003 | switch (work->cm_event.event) { | 3002 | switch (work->cm_event.event) { |
@@ -3087,12 +3086,12 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
3087 | * we need to find the cm_id once we're in the context of the | 3086 | * we need to find the cm_id once we're in the context of the |
3088 | * worker thread, rather than holding a reference on it. | 3087 | * worker thread, rather than holding a reference on it. |
3089 | */ | 3088 | */ |
3090 | INIT_WORK(&work->work, cm_work_handler, work); | 3089 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3091 | work->local_id = cm_id->local_id; | 3090 | work->local_id = cm_id->local_id; |
3092 | work->remote_id = cm_id->remote_id; | 3091 | work->remote_id = cm_id->remote_id; |
3093 | work->mad_recv_wc = NULL; | 3092 | work->mad_recv_wc = NULL; |
3094 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3093 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
3095 | queue_work(cm.wq, &work->work); | 3094 | queue_delayed_work(cm.wq, &work->work, 0); |
3096 | out: | 3095 | out: |
3097 | return ret; | 3096 | return ret; |
3098 | } | 3097 | } |
@@ -3191,11 +3190,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent, | |||
3191 | return; | 3190 | return; |
3192 | } | 3191 | } |
3193 | 3192 | ||
3194 | INIT_WORK(&work->work, cm_work_handler, work); | 3193 | INIT_DELAYED_WORK(&work->work, cm_work_handler); |
3195 | work->cm_event.event = event; | 3194 | work->cm_event.event = event; |
3196 | work->mad_recv_wc = mad_recv_wc; | 3195 | work->mad_recv_wc = mad_recv_wc; |
3197 | work->port = (struct cm_port *)mad_agent->context; | 3196 | work->port = (struct cm_port *)mad_agent->context; |
3198 | queue_work(cm.wq, &work->work); | 3197 | queue_delayed_work(cm.wq, &work->work, 0); |
3199 | } | 3198 | } |
3200 | 3199 | ||
3201 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, | 3200 | static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv, |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cf48f2697434..985a6b564d8f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1340,9 +1340,9 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; | 1340 | return (id_priv->query_id < 0) ? id_priv->query_id : 0; |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | static void cma_work_handler(void *data) | 1343 | static void cma_work_handler(struct work_struct *_work) |
1344 | { | 1344 | { |
1345 | struct cma_work *work = data; | 1345 | struct cma_work *work = container_of(_work, struct cma_work, work); |
1346 | struct rdma_id_private *id_priv = work->id; | 1346 | struct rdma_id_private *id_priv = work->id; |
1347 | int destroy = 0; | 1347 | int destroy = 0; |
1348 | 1348 | ||
@@ -1373,7 +1373,7 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1373 | return -ENOMEM; | 1373 | return -ENOMEM; |
1374 | 1374 | ||
1375 | work->id = id_priv; | 1375 | work->id = id_priv; |
1376 | INIT_WORK(&work->work, cma_work_handler, work); | 1376 | INIT_WORK(&work->work, cma_work_handler); |
1377 | work->old_state = CMA_ROUTE_QUERY; | 1377 | work->old_state = CMA_ROUTE_QUERY; |
1378 | work->new_state = CMA_ROUTE_RESOLVED; | 1378 | work->new_state = CMA_ROUTE_RESOLVED; |
1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1379 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1430,7 +1430,7 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1430 | return -ENOMEM; | 1430 | return -ENOMEM; |
1431 | 1431 | ||
1432 | work->id = id_priv; | 1432 | work->id = id_priv; |
1433 | INIT_WORK(&work->work, cma_work_handler, work); | 1433 | INIT_WORK(&work->work, cma_work_handler); |
1434 | work->old_state = CMA_ROUTE_QUERY; | 1434 | work->old_state = CMA_ROUTE_QUERY; |
1435 | work->new_state = CMA_ROUTE_RESOLVED; | 1435 | work->new_state = CMA_ROUTE_RESOLVED; |
1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1436 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
@@ -1583,7 +1583,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1583 | } | 1583 | } |
1584 | 1584 | ||
1585 | work->id = id_priv; | 1585 | work->id = id_priv; |
1586 | INIT_WORK(&work->work, cma_work_handler, work); | 1586 | INIT_WORK(&work->work, cma_work_handler); |
1587 | work->old_state = CMA_ADDR_QUERY; | 1587 | work->old_state = CMA_ADDR_QUERY; |
1588 | work->new_state = CMA_ADDR_RESOLVED; | 1588 | work->new_state = CMA_ADDR_RESOLVED; |
1589 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1589 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index cf797d7aea09..1039ad57d53b 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -828,9 +828,9 @@ static int process_event(struct iwcm_id_private *cm_id_priv, | |||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | 828 | * thread asleep on the destroy_comp list vs. an object destroyed |
829 | * here synchronously when the last reference is removed. | 829 | * here synchronously when the last reference is removed. |
830 | */ | 830 | */ |
831 | static void cm_work_handler(void *arg) | 831 | static void cm_work_handler(struct work_struct *_work) |
832 | { | 832 | { |
833 | struct iwcm_work *work = arg; | 833 | struct iwcm_work *work = container_of(_work, struct iwcm_work, work); |
834 | struct iw_cm_event levent; | 834 | struct iw_cm_event levent; |
835 | struct iwcm_id_private *cm_id_priv = work->cm_id; | 835 | struct iwcm_id_private *cm_id_priv = work->cm_id; |
836 | unsigned long flags; | 836 | unsigned long flags; |
@@ -900,7 +900,7 @@ static int cm_event_handler(struct iw_cm_id *cm_id, | |||
900 | goto out; | 900 | goto out; |
901 | } | 901 | } |
902 | 902 | ||
903 | INIT_WORK(&work->work, cm_work_handler, work); | 903 | INIT_WORK(&work->work, cm_work_handler); |
904 | work->cm_id = cm_id_priv; | 904 | work->cm_id = cm_id_priv; |
905 | work->event = *iw_event; | 905 | work->event = *iw_event; |
906 | 906 | ||
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 3f9c16232c4d..15f38d94b3a8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -65,8 +65,8 @@ static struct ib_mad_agent_private *find_mad_agent( | |||
65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
66 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
68 | static void timeout_sends(void *data); | 68 | static void timeout_sends(struct work_struct *work); |
69 | static void local_completions(void *data); | 69 | static void local_completions(struct work_struct *work); |
70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
71 | struct ib_mad_agent_private *agent_priv, | 71 | struct ib_mad_agent_private *agent_priv, |
72 | u8 mgmt_class); | 72 | u8 mgmt_class); |
@@ -356,10 +356,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 356 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | 357 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | 358 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); |
359 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 359 | INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends); |
360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 360 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
361 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 361 | INIT_WORK(&mad_agent_priv->local_work, local_completions); |
362 | mad_agent_priv); | ||
363 | atomic_set(&mad_agent_priv->refcount, 1); | 362 | atomic_set(&mad_agent_priv->refcount, 1); |
364 | init_completion(&mad_agent_priv->comp); | 363 | init_completion(&mad_agent_priv->comp); |
365 | 364 | ||
@@ -2198,12 +2197,12 @@ static void mad_error_handler(struct ib_mad_port_private *port_priv, | |||
2198 | /* | 2197 | /* |
2199 | * IB MAD completion callback | 2198 | * IB MAD completion callback |
2200 | */ | 2199 | */ |
2201 | static void ib_mad_completion_handler(void *data) | 2200 | static void ib_mad_completion_handler(struct work_struct *work) |
2202 | { | 2201 | { |
2203 | struct ib_mad_port_private *port_priv; | 2202 | struct ib_mad_port_private *port_priv; |
2204 | struct ib_wc wc; | 2203 | struct ib_wc wc; |
2205 | 2204 | ||
2206 | port_priv = (struct ib_mad_port_private *)data; | 2205 | port_priv = container_of(work, struct ib_mad_port_private, work); |
2207 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); | 2206 | ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); |
2208 | 2207 | ||
2209 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { | 2208 | while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { |
@@ -2324,7 +2323,7 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent, | |||
2324 | } | 2323 | } |
2325 | EXPORT_SYMBOL(ib_cancel_mad); | 2324 | EXPORT_SYMBOL(ib_cancel_mad); |
2326 | 2325 | ||
2327 | static void local_completions(void *data) | 2326 | static void local_completions(struct work_struct *work) |
2328 | { | 2327 | { |
2329 | struct ib_mad_agent_private *mad_agent_priv; | 2328 | struct ib_mad_agent_private *mad_agent_priv; |
2330 | struct ib_mad_local_private *local; | 2329 | struct ib_mad_local_private *local; |
@@ -2334,7 +2333,8 @@ static void local_completions(void *data) | |||
2334 | struct ib_wc wc; | 2333 | struct ib_wc wc; |
2335 | struct ib_mad_send_wc mad_send_wc; | 2334 | struct ib_mad_send_wc mad_send_wc; |
2336 | 2335 | ||
2337 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2336 | mad_agent_priv = |
2337 | container_of(work, struct ib_mad_agent_private, local_work); | ||
2338 | 2338 | ||
2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2339 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
2340 | while (!list_empty(&mad_agent_priv->local_list)) { | 2340 | while (!list_empty(&mad_agent_priv->local_list)) { |
@@ -2434,14 +2434,15 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2434 | return ret; | 2434 | return ret; |
2435 | } | 2435 | } |
2436 | 2436 | ||
2437 | static void timeout_sends(void *data) | 2437 | static void timeout_sends(struct work_struct *work) |
2438 | { | 2438 | { |
2439 | struct ib_mad_agent_private *mad_agent_priv; | 2439 | struct ib_mad_agent_private *mad_agent_priv; |
2440 | struct ib_mad_send_wr_private *mad_send_wr; | 2440 | struct ib_mad_send_wr_private *mad_send_wr; |
2441 | struct ib_mad_send_wc mad_send_wc; | 2441 | struct ib_mad_send_wc mad_send_wc; |
2442 | unsigned long flags, delay; | 2442 | unsigned long flags, delay; |
2443 | 2443 | ||
2444 | mad_agent_priv = (struct ib_mad_agent_private *)data; | 2444 | mad_agent_priv = container_of(work, struct ib_mad_agent_private, |
2445 | timed_work.work); | ||
2445 | mad_send_wc.vendor_err = 0; | 2446 | mad_send_wc.vendor_err = 0; |
2446 | 2447 | ||
2447 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 2448 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
@@ -2799,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2799 | ret = -ENOMEM; | 2800 | ret = -ENOMEM; |
2800 | goto error8; | 2801 | goto error8; |
2801 | } | 2802 | } |
2802 | INIT_WORK(&port_priv->work, ib_mad_completion_handler, port_priv); | 2803 | INIT_WORK(&port_priv->work, ib_mad_completion_handler); |
2803 | 2804 | ||
2804 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); | 2805 | spin_lock_irqsave(&ib_mad_port_list_lock, flags); |
2805 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); | 2806 | list_add_tail(&port_priv->port_list, &ib_mad_port_list); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d06b59083f6e..d5548e73e068 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -102,7 +102,7 @@ struct ib_mad_agent_private { | |||
102 | struct list_head send_list; | 102 | struct list_head send_list; |
103 | struct list_head wait_list; | 103 | struct list_head wait_list; |
104 | struct list_head done_list; | 104 | struct list_head done_list; |
105 | struct work_struct timed_work; | 105 | struct delayed_work timed_work; |
106 | unsigned long timeout; | 106 | unsigned long timeout; |
107 | struct list_head local_list; | 107 | struct list_head local_list; |
108 | struct work_struct local_work; | 108 | struct work_struct local_work; |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index 1ef79d015a1e..3663fd7022be 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -45,8 +45,8 @@ enum rmpp_state { | |||
45 | struct mad_rmpp_recv { | 45 | struct mad_rmpp_recv { |
46 | struct ib_mad_agent_private *agent; | 46 | struct ib_mad_agent_private *agent; |
47 | struct list_head list; | 47 | struct list_head list; |
48 | struct work_struct timeout_work; | 48 | struct delayed_work timeout_work; |
49 | struct work_struct cleanup_work; | 49 | struct delayed_work cleanup_work; |
50 | struct completion comp; | 50 | struct completion comp; |
51 | enum rmpp_state state; | 51 | enum rmpp_state state; |
52 | spinlock_t lock; | 52 | spinlock_t lock; |
@@ -233,9 +233,10 @@ static void nack_recv(struct ib_mad_agent_private *agent, | |||
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
236 | static void recv_timeout_handler(void *data) | 236 | static void recv_timeout_handler(struct work_struct *work) |
237 | { | 237 | { |
238 | struct mad_rmpp_recv *rmpp_recv = data; | 238 | struct mad_rmpp_recv *rmpp_recv = |
239 | container_of(work, struct mad_rmpp_recv, timeout_work.work); | ||
239 | struct ib_mad_recv_wc *rmpp_wc; | 240 | struct ib_mad_recv_wc *rmpp_wc; |
240 | unsigned long flags; | 241 | unsigned long flags; |
241 | 242 | ||
@@ -254,9 +255,10 @@ static void recv_timeout_handler(void *data) | |||
254 | ib_free_recv_mad(rmpp_wc); | 255 | ib_free_recv_mad(rmpp_wc); |
255 | } | 256 | } |
256 | 257 | ||
257 | static void recv_cleanup_handler(void *data) | 258 | static void recv_cleanup_handler(struct work_struct *work) |
258 | { | 259 | { |
259 | struct mad_rmpp_recv *rmpp_recv = data; | 260 | struct mad_rmpp_recv *rmpp_recv = |
261 | container_of(work, struct mad_rmpp_recv, cleanup_work.work); | ||
260 | unsigned long flags; | 262 | unsigned long flags; |
261 | 263 | ||
262 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); | 264 | spin_lock_irqsave(&rmpp_recv->agent->lock, flags); |
@@ -285,8 +287,8 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
285 | 287 | ||
286 | rmpp_recv->agent = agent; | 288 | rmpp_recv->agent = agent; |
287 | init_completion(&rmpp_recv->comp); | 289 | init_completion(&rmpp_recv->comp); |
288 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 290 | INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler); |
289 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 291 | INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler); |
290 | spin_lock_init(&rmpp_recv->lock); | 292 | spin_lock_init(&rmpp_recv->lock); |
291 | rmpp_recv->state = RMPP_STATE_ACTIVE; | 293 | rmpp_recv->state = RMPP_STATE_ACTIVE; |
292 | atomic_set(&rmpp_recv->refcount, 1); | 294 | atomic_set(&rmpp_recv->refcount, 1); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1706d3c7e95e..e45afba75341 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -360,9 +360,10 @@ static void free_sm_ah(struct kref *kref) | |||
360 | kfree(sm_ah); | 360 | kfree(sm_ah); |
361 | } | 361 | } |
362 | 362 | ||
363 | static void update_sm_ah(void *port_ptr) | 363 | static void update_sm_ah(struct work_struct *work) |
364 | { | 364 | { |
365 | struct ib_sa_port *port = port_ptr; | 365 | struct ib_sa_port *port = |
366 | container_of(work, struct ib_sa_port, update_task); | ||
366 | struct ib_sa_sm_ah *new_ah, *old_ah; | 367 | struct ib_sa_sm_ah *new_ah, *old_ah; |
367 | struct ib_port_attr port_attr; | 368 | struct ib_port_attr port_attr; |
368 | struct ib_ah_attr ah_attr; | 369 | struct ib_ah_attr ah_attr; |
@@ -992,8 +993,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
992 | if (IS_ERR(sa_dev->port[i].agent)) | 993 | if (IS_ERR(sa_dev->port[i].agent)) |
993 | goto err; | 994 | goto err; |
994 | 995 | ||
995 | INIT_WORK(&sa_dev->port[i].update_task, | 996 | INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); |
996 | update_sm_ah, &sa_dev->port[i]); | ||
997 | } | 997 | } |
998 | 998 | ||
999 | ib_set_client_data(device, &sa_client, sa_dev); | 999 | ib_set_client_data(device, &sa_client, sa_dev); |
@@ -1010,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1010 | goto err; | 1010 | goto err; |
1011 | 1011 | ||
1012 | for (i = 0; i <= e - s; ++i) | 1012 | for (i = 0; i <= e - s; ++i) |
1013 | update_sm_ah(&sa_dev->port[i]); | 1013 | update_sm_ah(&sa_dev->port[i].update_task); |
1014 | 1014 | ||
1015 | return; | 1015 | return; |
1016 | 1016 | ||
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index efe147dbeb42..db12cc0841df 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -179,9 +179,10 @@ void ib_umem_release(struct ib_device *dev, struct ib_umem *umem) | |||
179 | up_write(¤t->mm->mmap_sem); | 179 | up_write(¤t->mm->mmap_sem); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void ib_umem_account(void *work_ptr) | 182 | static void ib_umem_account(struct work_struct *_work) |
183 | { | 183 | { |
184 | struct ib_umem_account_work *work = work_ptr; | 184 | struct ib_umem_account_work *work = |
185 | container_of(_work, struct ib_umem_account_work, work); | ||
185 | 186 | ||
186 | down_write(&work->mm->mmap_sem); | 187 | down_write(&work->mm->mmap_sem); |
187 | work->mm->locked_vm -= work->diff; | 188 | work->mm->locked_vm -= work->diff; |
@@ -216,7 +217,7 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
216 | return; | 217 | return; |
217 | } | 218 | } |
218 | 219 | ||
219 | INIT_WORK(&work->work, ib_umem_account, work); | 220 | INIT_WORK(&work->work, ib_umem_account); |
220 | work->mm = mm; | 221 | work->mm = mm; |
221 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; | 222 | work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT; |
222 | 223 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 413754b1d8a2..8536aeb96af8 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c | |||
@@ -214,9 +214,10 @@ struct ipath_user_pages_work { | |||
214 | unsigned long num_pages; | 214 | unsigned long num_pages; |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static void user_pages_account(void *ptr) | 217 | static void user_pages_account(struct work_struct *_work) |
218 | { | 218 | { |
219 | struct ipath_user_pages_work *work = ptr; | 219 | struct ipath_user_pages_work *work = |
220 | container_of(_work, struct ipath_user_pages_work, work); | ||
220 | 221 | ||
221 | down_write(&work->mm->mmap_sem); | 222 | down_write(&work->mm->mmap_sem); |
222 | work->mm->locked_vm -= work->num_pages; | 223 | work->mm->locked_vm -= work->num_pages; |
@@ -242,7 +243,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) | |||
242 | 243 | ||
243 | goto bail; | 244 | goto bail; |
244 | 245 | ||
245 | INIT_WORK(&work->work, user_pages_account, work); | 246 | INIT_WORK(&work->work, user_pages_account); |
246 | work->mm = mm; | 247 | work->mm = mm; |
247 | work->num_pages = num_pages; | 248 | work->num_pages = num_pages; |
248 | 249 | ||
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c index cd044ea2dfa4..e948158a28d9 100644 --- a/drivers/infiniband/hw/mthca/mthca_catas.c +++ b/drivers/infiniband/hw/mthca/mthca_catas.c | |||
@@ -57,7 +57,7 @@ static int catas_reset_disable; | |||
57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); | 57 | module_param_named(catas_reset_disable, catas_reset_disable, int, 0644); |
58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); | 58 | MODULE_PARM_DESC(catas_reset_disable, "disable reset on catastrophic event if nonzero"); |
59 | 59 | ||
60 | static void catas_reset(void *work_ptr) | 60 | static void catas_reset(struct work_struct *work) |
61 | { | 61 | { |
62 | struct mthca_dev *dev, *tmpdev; | 62 | struct mthca_dev *dev, *tmpdev; |
63 | LIST_HEAD(tlist); | 63 | LIST_HEAD(tlist); |
@@ -203,7 +203,7 @@ void mthca_stop_catas_poll(struct mthca_dev *dev) | |||
203 | 203 | ||
204 | int __init mthca_catas_init(void) | 204 | int __init mthca_catas_init(void) |
205 | { | 205 | { |
206 | INIT_WORK(&catas_work, catas_reset, NULL); | 206 | INIT_WORK(&catas_work, catas_reset); |
207 | 207 | ||
208 | catas_wq = create_singlethread_workqueue("mthca_catas"); | 208 | catas_wq = create_singlethread_workqueue("mthca_catas"); |
209 | if (!catas_wq) | 209 | if (!catas_wq) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index f2b61851a49c..99547996aba2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -136,11 +136,11 @@ struct ipoib_dev_priv { | |||
136 | struct list_head multicast_list; | 136 | struct list_head multicast_list; |
137 | struct rb_root multicast_tree; | 137 | struct rb_root multicast_tree; |
138 | 138 | ||
139 | struct work_struct pkey_task; | 139 | struct delayed_work pkey_task; |
140 | struct work_struct mcast_task; | 140 | struct delayed_work mcast_task; |
141 | struct work_struct flush_task; | 141 | struct work_struct flush_task; |
142 | struct work_struct restart_task; | 142 | struct work_struct restart_task; |
143 | struct work_struct ah_reap_task; | 143 | struct delayed_work ah_reap_task; |
144 | 144 | ||
145 | struct ib_device *ca; | 145 | struct ib_device *ca; |
146 | u8 port; | 146 | u8 port; |
@@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); | |||
254 | 254 | ||
255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, | 255 | void ipoib_send(struct net_device *dev, struct sk_buff *skb, |
256 | struct ipoib_ah *address, u32 qpn); | 256 | struct ipoib_ah *address, u32 qpn); |
257 | void ipoib_reap_ah(void *dev_ptr); | 257 | void ipoib_reap_ah(struct work_struct *work); |
258 | 258 | ||
259 | void ipoib_flush_paths(struct net_device *dev); | 259 | void ipoib_flush_paths(struct net_device *dev); |
260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); | 260 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); |
261 | 261 | ||
262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 262 | int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
263 | void ipoib_ib_dev_flush(void *dev); | 263 | void ipoib_ib_dev_flush(struct work_struct *work); |
264 | void ipoib_ib_dev_cleanup(struct net_device *dev); | 264 | void ipoib_ib_dev_cleanup(struct net_device *dev); |
265 | 265 | ||
266 | int ipoib_ib_dev_open(struct net_device *dev); | 266 | int ipoib_ib_dev_open(struct net_device *dev); |
@@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); | |||
271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); | 271 | int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); |
272 | void ipoib_dev_cleanup(struct net_device *dev); | 272 | void ipoib_dev_cleanup(struct net_device *dev); |
273 | 273 | ||
274 | void ipoib_mcast_join_task(void *dev_ptr); | 274 | void ipoib_mcast_join_task(struct work_struct *work); |
275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); | 275 | void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); |
276 | 276 | ||
277 | void ipoib_mcast_restart_task(void *dev_ptr); | 277 | void ipoib_mcast_restart_task(struct work_struct *work); |
278 | int ipoib_mcast_start_thread(struct net_device *dev); | 278 | int ipoib_mcast_start_thread(struct net_device *dev); |
279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); | 279 | int ipoib_mcast_stop_thread(struct net_device *dev, int flush); |
280 | 280 | ||
@@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, | |||
312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); | 312 | int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); |
313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); | 313 | int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); |
314 | 314 | ||
315 | void ipoib_pkey_poll(void *dev); | 315 | void ipoib_pkey_poll(struct work_struct *work); |
316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); | 316 | int ipoib_pkey_dev_delay_open(struct net_device *dev); |
317 | 317 | ||
318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 318 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 8bf5e9ec7c95..f10fba5d3265 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -400,10 +400,11 @@ static void __ipoib_reap_ah(struct net_device *dev) | |||
400 | spin_unlock_irq(&priv->tx_lock); | 400 | spin_unlock_irq(&priv->tx_lock); |
401 | } | 401 | } |
402 | 402 | ||
403 | void ipoib_reap_ah(void *dev_ptr) | 403 | void ipoib_reap_ah(struct work_struct *work) |
404 | { | 404 | { |
405 | struct net_device *dev = dev_ptr; | 405 | struct ipoib_dev_priv *priv = |
406 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 406 | container_of(work, struct ipoib_dev_priv, ah_reap_task.work); |
407 | struct net_device *dev = priv->dev; | ||
407 | 408 | ||
408 | __ipoib_reap_ah(dev); | 409 | __ipoib_reap_ah(dev); |
409 | 410 | ||
@@ -613,10 +614,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) | |||
613 | return 0; | 614 | return 0; |
614 | } | 615 | } |
615 | 616 | ||
616 | void ipoib_ib_dev_flush(void *_dev) | 617 | void ipoib_ib_dev_flush(struct work_struct *work) |
617 | { | 618 | { |
618 | struct net_device *dev = (struct net_device *)_dev; | 619 | struct ipoib_dev_priv *cpriv, *priv = |
619 | struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; | 620 | container_of(work, struct ipoib_dev_priv, flush_task); |
621 | struct net_device *dev = priv->dev; | ||
620 | 622 | ||
621 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { | 623 | if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { |
622 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); | 624 | ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); |
@@ -638,14 +640,14 @@ void ipoib_ib_dev_flush(void *_dev) | |||
638 | */ | 640 | */ |
639 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { | 641 | if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { |
640 | ipoib_ib_dev_up(dev); | 642 | ipoib_ib_dev_up(dev); |
641 | ipoib_mcast_restart_task(dev); | 643 | ipoib_mcast_restart_task(&priv->restart_task); |
642 | } | 644 | } |
643 | 645 | ||
644 | mutex_lock(&priv->vlan_mutex); | 646 | mutex_lock(&priv->vlan_mutex); |
645 | 647 | ||
646 | /* Flush any child interfaces too */ | 648 | /* Flush any child interfaces too */ |
647 | list_for_each_entry(cpriv, &priv->child_intfs, list) | 649 | list_for_each_entry(cpriv, &priv->child_intfs, list) |
648 | ipoib_ib_dev_flush(cpriv->dev); | 650 | ipoib_ib_dev_flush(&cpriv->flush_task); |
649 | 651 | ||
650 | mutex_unlock(&priv->vlan_mutex); | 652 | mutex_unlock(&priv->vlan_mutex); |
651 | } | 653 | } |
@@ -672,10 +674,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) | |||
672 | * change async notification is available. | 674 | * change async notification is available. |
673 | */ | 675 | */ |
674 | 676 | ||
675 | void ipoib_pkey_poll(void *dev_ptr) | 677 | void ipoib_pkey_poll(struct work_struct *work) |
676 | { | 678 | { |
677 | struct net_device *dev = dev_ptr; | 679 | struct ipoib_dev_priv *priv = |
678 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 680 | container_of(work, struct ipoib_dev_priv, pkey_task.work); |
681 | struct net_device *dev = priv->dev; | ||
679 | 682 | ||
680 | ipoib_pkey_dev_check_presence(dev); | 683 | ipoib_pkey_dev_check_presence(dev); |
681 | 684 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5ba3154320b4..c09280243726 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -940,11 +940,11 @@ static void ipoib_setup(struct net_device *dev) | |||
940 | INIT_LIST_HEAD(&priv->dead_ahs); | 940 | INIT_LIST_HEAD(&priv->dead_ahs); |
941 | INIT_LIST_HEAD(&priv->multicast_list); | 941 | INIT_LIST_HEAD(&priv->multicast_list); |
942 | 942 | ||
943 | INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); | 943 | INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); |
944 | INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); | 944 | INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); |
945 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); | 945 | INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); |
946 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); | 946 | INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); |
947 | INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); | 947 | INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); |
948 | } | 948 | } |
949 | 949 | ||
950 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) | 950 | struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index d282d65e3ee0..b04b72ca32ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
@@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status, | |||
399 | mcast->backoff = 1; | 399 | mcast->backoff = 1; |
400 | mutex_lock(&mcast_mutex); | 400 | mutex_lock(&mcast_mutex); |
401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 401 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
402 | queue_work(ipoib_workqueue, &priv->mcast_task); | 402 | queue_delayed_work(ipoib_workqueue, |
403 | &priv->mcast_task, 0); | ||
403 | mutex_unlock(&mcast_mutex); | 404 | mutex_unlock(&mcast_mutex); |
404 | complete(&mcast->done); | 405 | complete(&mcast->done); |
405 | return; | 406 | return; |
@@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status, | |||
435 | 436 | ||
436 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { | 437 | if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { |
437 | if (status == -ETIMEDOUT) | 438 | if (status == -ETIMEDOUT) |
438 | queue_work(ipoib_workqueue, &priv->mcast_task); | 439 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
440 | 0); | ||
439 | else | 441 | else |
440 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, | 442 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, |
441 | mcast->backoff * HZ); | 443 | mcast->backoff * HZ); |
@@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, | |||
517 | mcast->query_id = ret; | 519 | mcast->query_id = ret; |
518 | } | 520 | } |
519 | 521 | ||
520 | void ipoib_mcast_join_task(void *dev_ptr) | 522 | void ipoib_mcast_join_task(struct work_struct *work) |
521 | { | 523 | { |
522 | struct net_device *dev = dev_ptr; | 524 | struct ipoib_dev_priv *priv = |
523 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 525 | container_of(work, struct ipoib_dev_priv, mcast_task.work); |
526 | struct net_device *dev = priv->dev; | ||
524 | 527 | ||
525 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) | 528 | if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) |
526 | return; | 529 | return; |
@@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) | |||
610 | 613 | ||
611 | mutex_lock(&mcast_mutex); | 614 | mutex_lock(&mcast_mutex); |
612 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) | 615 | if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) |
613 | queue_work(ipoib_workqueue, &priv->mcast_task); | 616 | queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); |
614 | mutex_unlock(&mcast_mutex); | 617 | mutex_unlock(&mcast_mutex); |
615 | 618 | ||
616 | spin_lock_irq(&priv->lock); | 619 | spin_lock_irq(&priv->lock); |
@@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) | |||
818 | } | 821 | } |
819 | } | 822 | } |
820 | 823 | ||
821 | void ipoib_mcast_restart_task(void *dev_ptr) | 824 | void ipoib_mcast_restart_task(struct work_struct *work) |
822 | { | 825 | { |
823 | struct net_device *dev = dev_ptr; | 826 | struct ipoib_dev_priv *priv = |
824 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 827 | container_of(work, struct ipoib_dev_priv, restart_task); |
828 | struct net_device *dev = priv->dev; | ||
825 | struct dev_mc_list *mclist; | 829 | struct dev_mc_list *mclist; |
826 | struct ipoib_mcast *mcast, *tmcast; | 830 | struct ipoib_mcast *mcast, *tmcast; |
827 | LIST_HEAD(remove_list); | 831 | LIST_HEAD(remove_list); |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 18a000034996..693b77002897 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | static void iser_cq_tasklet_fn(unsigned long data); | 49 | static void iser_cq_tasklet_fn(unsigned long data); |
50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); | 50 | static void iser_cq_callback(struct ib_cq *cq, void *cq_context); |
51 | static void iser_comp_error_worker(void *data); | 51 | static void iser_comp_error_worker(struct work_struct *work); |
52 | 52 | ||
53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) | 53 | static void iser_cq_event_callback(struct ib_event *cause, void *context) |
54 | { | 54 | { |
@@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn) | |||
480 | init_waitqueue_head(&ib_conn->wait); | 480 | init_waitqueue_head(&ib_conn->wait); |
481 | atomic_set(&ib_conn->post_recv_buf_count, 0); | 481 | atomic_set(&ib_conn->post_recv_buf_count, 0); |
482 | atomic_set(&ib_conn->post_send_buf_count, 0); | 482 | atomic_set(&ib_conn->post_send_buf_count, 0); |
483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, | 483 | INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); |
484 | ib_conn); | ||
485 | INIT_LIST_HEAD(&ib_conn->conn_list); | 484 | INIT_LIST_HEAD(&ib_conn->conn_list); |
486 | spin_lock_init(&ib_conn->lock); | 485 | spin_lock_init(&ib_conn->lock); |
487 | 486 | ||
@@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc) | |||
754 | return ret_val; | 753 | return ret_val; |
755 | } | 754 | } |
756 | 755 | ||
757 | static void iser_comp_error_worker(void *data) | 756 | static void iser_comp_error_worker(struct work_struct *work) |
758 | { | 757 | { |
759 | struct iser_conn *ib_conn = data; | 758 | struct iser_conn *ib_conn = |
759 | container_of(work, struct iser_conn, comperror_work); | ||
760 | 760 | ||
761 | /* getting here when the state is UP means that the conn is being * | 761 | /* getting here when the state is UP means that the conn is being * |
762 | * terminated asynchronously from the iSCSI layer's perspective. */ | 762 | * terminated asynchronously from the iSCSI layer's perspective. */ |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 64ab5fc7cca3..a6289595557b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -390,9 +390,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
390 | wait_for_completion(&target->done); | 390 | wait_for_completion(&target->done); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void srp_remove_work(void *target_ptr) | 393 | static void srp_remove_work(struct work_struct *work) |
394 | { | 394 | { |
395 | struct srp_target_port *target = target_ptr; | 395 | struct srp_target_port *target = |
396 | container_of(work, struct srp_target_port, work); | ||
396 | 397 | ||
397 | spin_lock_irq(target->scsi_host->host_lock); | 398 | spin_lock_irq(target->scsi_host->host_lock); |
398 | if (target->state != SRP_TARGET_DEAD) { | 399 | if (target->state != SRP_TARGET_DEAD) { |
@@ -575,7 +576,7 @@ err: | |||
575 | spin_lock_irq(target->scsi_host->host_lock); | 576 | spin_lock_irq(target->scsi_host->host_lock); |
576 | if (target->state == SRP_TARGET_CONNECTING) { | 577 | if (target->state == SRP_TARGET_CONNECTING) { |
577 | target->state = SRP_TARGET_DEAD; | 578 | target->state = SRP_TARGET_DEAD; |
578 | INIT_WORK(&target->work, srp_remove_work, target); | 579 | INIT_WORK(&target->work, srp_remove_work); |
579 | schedule_work(&target->work); | 580 | schedule_work(&target->work); |
580 | } | 581 | } |
581 | spin_unlock_irq(target->scsi_host->host_lock); | 582 | spin_unlock_irq(target->scsi_host->host_lock); |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index cbb93669d1ce..8451b29a3db5 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -567,9 +567,9 @@ static int atkbd_set_leds(struct atkbd *atkbd) | |||
567 | * interrupt context. | 567 | * interrupt context. |
568 | */ | 568 | */ |
569 | 569 | ||
570 | static void atkbd_event_work(void *data) | 570 | static void atkbd_event_work(struct work_struct *work) |
571 | { | 571 | { |
572 | struct atkbd *atkbd = data; | 572 | struct atkbd *atkbd = container_of(work, struct atkbd, event_work); |
573 | 573 | ||
574 | mutex_lock(&atkbd->event_mutex); | 574 | mutex_lock(&atkbd->event_mutex); |
575 | 575 | ||
@@ -943,7 +943,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) | |||
943 | 943 | ||
944 | atkbd->dev = dev; | 944 | atkbd->dev = dev; |
945 | ps2_init(&atkbd->ps2dev, serio); | 945 | ps2_init(&atkbd->ps2dev, serio); |
946 | INIT_WORK(&atkbd->event_work, atkbd_event_work, atkbd); | 946 | INIT_WORK(&atkbd->event_work, atkbd_event_work); |
947 | mutex_init(&atkbd->event_mutex); | 947 | mutex_init(&atkbd->event_mutex); |
948 | 948 | ||
949 | switch (serio->id.type) { | 949 | switch (serio->id.type) { |
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c index 979b93e33da7..b7f049b45b6b 100644 --- a/drivers/input/keyboard/lkkbd.c +++ b/drivers/input/keyboard/lkkbd.c | |||
@@ -572,9 +572,9 @@ lkkbd_event (struct input_dev *dev, unsigned int type, unsigned int code, | |||
572 | * were in. | 572 | * were in. |
573 | */ | 573 | */ |
574 | static void | 574 | static void |
575 | lkkbd_reinit (void *data) | 575 | lkkbd_reinit (struct work_struct *work) |
576 | { | 576 | { |
577 | struct lkkbd *lk = data; | 577 | struct lkkbd *lk = container_of(work, struct lkkbd, tq); |
578 | int division; | 578 | int division; |
579 | unsigned char leds_on = 0; | 579 | unsigned char leds_on = 0; |
580 | unsigned char leds_off = 0; | 580 | unsigned char leds_off = 0; |
@@ -651,7 +651,7 @@ lkkbd_connect (struct serio *serio, struct serio_driver *drv) | |||
651 | 651 | ||
652 | lk->serio = serio; | 652 | lk->serio = serio; |
653 | lk->dev = input_dev; | 653 | lk->dev = input_dev; |
654 | INIT_WORK (&lk->tq, lkkbd_reinit, lk); | 654 | INIT_WORK (&lk->tq, lkkbd_reinit); |
655 | lk->bell_volume = bell_volume; | 655 | lk->bell_volume = bell_volume; |
656 | lk->keyclick_volume = keyclick_volume; | 656 | lk->keyclick_volume = keyclick_volume; |
657 | lk->ctrlclick_volume = ctrlclick_volume; | 657 | lk->ctrlclick_volume = ctrlclick_volume; |
diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c index cac4781103c3..6cd887c5eb0a 100644 --- a/drivers/input/keyboard/sunkbd.c +++ b/drivers/input/keyboard/sunkbd.c | |||
@@ -208,9 +208,9 @@ static int sunkbd_initialize(struct sunkbd *sunkbd) | |||
208 | * were in. | 208 | * were in. |
209 | */ | 209 | */ |
210 | 210 | ||
211 | static void sunkbd_reinit(void *data) | 211 | static void sunkbd_reinit(struct work_struct *work) |
212 | { | 212 | { |
213 | struct sunkbd *sunkbd = data; | 213 | struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); |
214 | 214 | ||
215 | wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); | 215 | wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ); |
216 | 216 | ||
@@ -248,7 +248,7 @@ static int sunkbd_connect(struct serio *serio, struct serio_driver *drv) | |||
248 | sunkbd->serio = serio; | 248 | sunkbd->serio = serio; |
249 | sunkbd->dev = input_dev; | 249 | sunkbd->dev = input_dev; |
250 | init_waitqueue_head(&sunkbd->wait); | 250 | init_waitqueue_head(&sunkbd->wait); |
251 | INIT_WORK(&sunkbd->tq, sunkbd_reinit, sunkbd); | 251 | INIT_WORK(&sunkbd->tq, sunkbd_reinit); |
252 | snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); | 252 | snprintf(sunkbd->phys, sizeof(sunkbd->phys), "%s/input0", serio->phys); |
253 | 253 | ||
254 | serio_set_drvdata(serio, sunkbd); | 254 | serio_set_drvdata(serio, sunkbd); |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 6f9b2c7cc9c2..52bb2226ce2f 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -888,9 +888,10 @@ static int psmouse_poll(struct psmouse *psmouse) | |||
888 | * psmouse_resync() attempts to re-validate current protocol. | 888 | * psmouse_resync() attempts to re-validate current protocol. |
889 | */ | 889 | */ |
890 | 890 | ||
891 | static void psmouse_resync(void *p) | 891 | static void psmouse_resync(struct work_struct *work) |
892 | { | 892 | { |
893 | struct psmouse *psmouse = p, *parent = NULL; | 893 | struct psmouse *parent = NULL, *psmouse = |
894 | container_of(work, struct psmouse, resync_work); | ||
894 | struct serio *serio = psmouse->ps2dev.serio; | 895 | struct serio *serio = psmouse->ps2dev.serio; |
895 | psmouse_ret_t rc = PSMOUSE_GOOD_DATA; | 896 | psmouse_ret_t rc = PSMOUSE_GOOD_DATA; |
896 | int failed = 0, enabled = 0; | 897 | int failed = 0, enabled = 0; |
@@ -1121,7 +1122,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv) | |||
1121 | goto out; | 1122 | goto out; |
1122 | 1123 | ||
1123 | ps2_init(&psmouse->ps2dev, serio); | 1124 | ps2_init(&psmouse->ps2dev, serio); |
1124 | INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse); | 1125 | INIT_WORK(&psmouse->resync_work, psmouse_resync); |
1125 | psmouse->dev = input_dev; | 1126 | psmouse->dev = input_dev; |
1126 | snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); | 1127 | snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys); |
1127 | 1128 | ||
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index e5b1b60757bb..b3e84d3bb7f7 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c | |||
@@ -251,9 +251,9 @@ EXPORT_SYMBOL(ps2_command); | |||
251 | * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) | 251 | * ps2_schedule_command(), to a PS/2 device (keyboard, mouse, etc.) |
252 | */ | 252 | */ |
253 | 253 | ||
254 | static void ps2_execute_scheduled_command(void *data) | 254 | static void ps2_execute_scheduled_command(struct work_struct *work) |
255 | { | 255 | { |
256 | struct ps2work *ps2work = data; | 256 | struct ps2work *ps2work = container_of(work, struct ps2work, work); |
257 | 257 | ||
258 | ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); | 258 | ps2_command(ps2work->ps2dev, ps2work->param, ps2work->command); |
259 | kfree(ps2work); | 259 | kfree(ps2work); |
@@ -278,7 +278,7 @@ int ps2_schedule_command(struct ps2dev *ps2dev, unsigned char *param, int comman | |||
278 | ps2work->ps2dev = ps2dev; | 278 | ps2work->ps2dev = ps2dev; |
279 | ps2work->command = command; | 279 | ps2work->command = command; |
280 | memcpy(ps2work->param, param, send); | 280 | memcpy(ps2work->param, param, send); |
281 | INIT_WORK(&ps2work->work, ps2_execute_scheduled_command, ps2work); | 281 | INIT_WORK(&ps2work->work, ps2_execute_scheduled_command); |
282 | 282 | ||
283 | if (!schedule_work(&ps2work->work)) { | 283 | if (!schedule_work(&ps2work->work)) { |
284 | kfree(ps2work); | 284 | kfree(ps2work); |
diff --git a/drivers/isdn/act2000/capi.c b/drivers/isdn/act2000/capi.c index 6ae6eb322111..946c38cf6f8a 100644 --- a/drivers/isdn/act2000/capi.c +++ b/drivers/isdn/act2000/capi.c | |||
@@ -627,8 +627,10 @@ handle_ack(act2000_card *card, act2000_chan *chan, __u8 blocknr) { | |||
627 | } | 627 | } |
628 | 628 | ||
629 | void | 629 | void |
630 | actcapi_dispatch(act2000_card *card) | 630 | actcapi_dispatch(struct work_struct *work) |
631 | { | 631 | { |
632 | struct act2000_card *card = | ||
633 | container_of(work, struct act2000_card, rcv_tq); | ||
632 | struct sk_buff *skb; | 634 | struct sk_buff *skb; |
633 | actcapi_msg *msg; | 635 | actcapi_msg *msg; |
634 | __u16 ccmd; | 636 | __u16 ccmd; |
diff --git a/drivers/isdn/act2000/capi.h b/drivers/isdn/act2000/capi.h index 49f453c53c64..e55f6a931f66 100644 --- a/drivers/isdn/act2000/capi.h +++ b/drivers/isdn/act2000/capi.h | |||
@@ -356,7 +356,7 @@ extern int actcapi_connect_req(act2000_card *, act2000_chan *, char *, char, int | |||
356 | extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); | 356 | extern void actcapi_select_b2_protocol_req(act2000_card *, act2000_chan *); |
357 | extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); | 357 | extern void actcapi_disconnect_b3_req(act2000_card *, act2000_chan *); |
358 | extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); | 358 | extern void actcapi_connect_resp(act2000_card *, act2000_chan *, __u8); |
359 | extern void actcapi_dispatch(act2000_card *); | 359 | extern void actcapi_dispatch(struct work_struct *); |
360 | #ifdef DEBUG_MSG | 360 | #ifdef DEBUG_MSG |
361 | extern void actcapi_debug_msg(struct sk_buff *skb, int); | 361 | extern void actcapi_debug_msg(struct sk_buff *skb, int); |
362 | #else | 362 | #else |
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c index d89dcde4eade..90593e2ef872 100644 --- a/drivers/isdn/act2000/module.c +++ b/drivers/isdn/act2000/module.c | |||
@@ -192,8 +192,11 @@ act2000_set_msn(act2000_card *card, char *eazmsn) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | static void | 194 | static void |
195 | act2000_transmit(struct act2000_card *card) | 195 | act2000_transmit(struct work_struct *work) |
196 | { | 196 | { |
197 | struct act2000_card *card = | ||
198 | container_of(work, struct act2000_card, snd_tq); | ||
199 | |||
197 | switch (card->bus) { | 200 | switch (card->bus) { |
198 | case ACT2000_BUS_ISA: | 201 | case ACT2000_BUS_ISA: |
199 | act2000_isa_send(card); | 202 | act2000_isa_send(card); |
@@ -207,8 +210,11 @@ act2000_transmit(struct act2000_card *card) | |||
207 | } | 210 | } |
208 | 211 | ||
209 | static void | 212 | static void |
210 | act2000_receive(struct act2000_card *card) | 213 | act2000_receive(struct work_struct *work) |
211 | { | 214 | { |
215 | struct act2000_card *card = | ||
216 | container_of(work, struct act2000_card, poll_tq); | ||
217 | |||
212 | switch (card->bus) { | 218 | switch (card->bus) { |
213 | case ACT2000_BUS_ISA: | 219 | case ACT2000_BUS_ISA: |
214 | act2000_isa_receive(card); | 220 | act2000_isa_receive(card); |
@@ -227,7 +233,7 @@ act2000_poll(unsigned long data) | |||
227 | act2000_card * card = (act2000_card *)data; | 233 | act2000_card * card = (act2000_card *)data; |
228 | unsigned long flags; | 234 | unsigned long flags; |
229 | 235 | ||
230 | act2000_receive(card); | 236 | act2000_receive(&card->poll_tq); |
231 | spin_lock_irqsave(&card->lock, flags); | 237 | spin_lock_irqsave(&card->lock, flags); |
232 | mod_timer(&card->ptimer, jiffies+3); | 238 | mod_timer(&card->ptimer, jiffies+3); |
233 | spin_unlock_irqrestore(&card->lock, flags); | 239 | spin_unlock_irqrestore(&card->lock, flags); |
@@ -578,9 +584,9 @@ act2000_alloccard(int bus, int port, int irq, char *id) | |||
578 | skb_queue_head_init(&card->sndq); | 584 | skb_queue_head_init(&card->sndq); |
579 | skb_queue_head_init(&card->rcvq); | 585 | skb_queue_head_init(&card->rcvq); |
580 | skb_queue_head_init(&card->ackq); | 586 | skb_queue_head_init(&card->ackq); |
581 | INIT_WORK(&card->snd_tq, (void *) (void *) act2000_transmit, card); | 587 | INIT_WORK(&card->snd_tq, act2000_transmit); |
582 | INIT_WORK(&card->rcv_tq, (void *) (void *) actcapi_dispatch, card); | 588 | INIT_WORK(&card->rcv_tq, actcapi_dispatch); |
583 | INIT_WORK(&card->poll_tq, (void *) (void *) act2000_receive, card); | 589 | INIT_WORK(&card->poll_tq, act2000_receive); |
584 | init_timer(&card->ptimer); | 590 | init_timer(&card->ptimer); |
585 | card->interface.owner = THIS_MODULE; | 591 | card->interface.owner = THIS_MODULE; |
586 | card->interface.channels = ACT2000_BCH; | 592 | card->interface.channels = ACT2000_BCH; |
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 8c4fcb9027b3..783a25526315 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c | |||
@@ -208,9 +208,10 @@ static void notify_down(u32 contr) | |||
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | static void notify_handler(void *data) | 211 | static void notify_handler(struct work_struct *work) |
212 | { | 212 | { |
213 | struct capi_notifier *np = data; | 213 | struct capi_notifier *np = |
214 | container_of(work, struct capi_notifier, work); | ||
214 | 215 | ||
215 | switch (np->cmd) { | 216 | switch (np->cmd) { |
216 | case KCI_CONTRUP: | 217 | case KCI_CONTRUP: |
@@ -235,7 +236,7 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) | |||
235 | if (!np) | 236 | if (!np) |
236 | return -ENOMEM; | 237 | return -ENOMEM; |
237 | 238 | ||
238 | INIT_WORK(&np->work, notify_handler, np); | 239 | INIT_WORK(&np->work, notify_handler); |
239 | np->cmd = cmd; | 240 | np->cmd = cmd; |
240 | np->controller = controller; | 241 | np->controller = controller; |
241 | np->applid = applid; | 242 | np->applid = applid; |
@@ -248,10 +249,11 @@ static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci) | |||
248 | 249 | ||
249 | /* -------- Receiver ------------------------------------------ */ | 250 | /* -------- Receiver ------------------------------------------ */ |
250 | 251 | ||
251 | static void recv_handler(void *_ap) | 252 | static void recv_handler(struct work_struct *work) |
252 | { | 253 | { |
253 | struct sk_buff *skb; | 254 | struct sk_buff *skb; |
254 | struct capi20_appl *ap = (struct capi20_appl *) _ap; | 255 | struct capi20_appl *ap = |
256 | container_of(work, struct capi20_appl, recv_work); | ||
255 | 257 | ||
256 | if ((!ap) || (ap->release_in_progress)) | 258 | if ((!ap) || (ap->release_in_progress)) |
257 | return; | 259 | return; |
@@ -527,7 +529,7 @@ u16 capi20_register(struct capi20_appl *ap) | |||
527 | ap->callback = NULL; | 529 | ap->callback = NULL; |
528 | init_MUTEX(&ap->recv_sem); | 530 | init_MUTEX(&ap->recv_sem); |
529 | skb_queue_head_init(&ap->recv_queue); | 531 | skb_queue_head_init(&ap->recv_queue); |
530 | INIT_WORK(&ap->recv_work, recv_handler, (void *)ap); | 532 | INIT_WORK(&ap->recv_work, recv_handler); |
531 | ap->release_in_progress = 0; | 533 | ap->release_in_progress = 0; |
532 | 534 | ||
533 | write_unlock_irqrestore(&application_lock, flags); | 535 | write_unlock_irqrestore(&application_lock, flags); |
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index bec59010bc66..3b19caeba258 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c | |||
@@ -232,9 +232,10 @@ Amd7930_new_ph(struct IsdnCardState *cs) | |||
232 | 232 | ||
233 | 233 | ||
234 | static void | 234 | static void |
235 | Amd7930_bh(struct IsdnCardState *cs) | 235 | Amd7930_bh(struct work_struct *work) |
236 | { | 236 | { |
237 | 237 | struct IsdnCardState *cs = | |
238 | container_of(work, struct IsdnCardState, tqueue); | ||
238 | struct PStack *stptr; | 239 | struct PStack *stptr; |
239 | 240 | ||
240 | if (!cs) | 241 | if (!cs) |
@@ -789,7 +790,7 @@ Amd7930_init(struct IsdnCardState *cs) | |||
789 | void __devinit | 790 | void __devinit |
790 | setup_Amd7930(struct IsdnCardState *cs) | 791 | setup_Amd7930(struct IsdnCardState *cs) |
791 | { | 792 | { |
792 | INIT_WORK(&cs->tqueue, (void *)(void *) Amd7930_bh, cs); | 793 | INIT_WORK(&cs->tqueue, Amd7930_bh); |
793 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 794 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
794 | cs->dbusytimer.data = (long) cs; | 795 | cs->dbusytimer.data = (long) cs; |
795 | init_timer(&cs->dbusytimer); | 796 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 785b08554fca..cede72cdbb31 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c | |||
@@ -1137,7 +1137,6 @@ static int checkcard(int cardnr, char *id, int *busy_flag, struct module *lockow | |||
1137 | cs->tx_skb = NULL; | 1137 | cs->tx_skb = NULL; |
1138 | cs->tx_cnt = 0; | 1138 | cs->tx_cnt = 0; |
1139 | cs->event = 0; | 1139 | cs->event = 0; |
1140 | cs->tqueue.data = cs; | ||
1141 | 1140 | ||
1142 | skb_queue_head_init(&cs->rq); | 1141 | skb_queue_head_init(&cs->rq); |
1143 | skb_queue_head_init(&cs->sq); | 1142 | skb_queue_head_init(&cs->sq); |
@@ -1554,7 +1553,7 @@ static void hisax_b_l2l1(struct PStack *st, int pr, void *arg); | |||
1554 | static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); | 1553 | static int hisax_cardmsg(struct IsdnCardState *cs, int mt, void *arg); |
1555 | static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); | 1554 | static int hisax_bc_setstack(struct PStack *st, struct BCState *bcs); |
1556 | static void hisax_bc_close(struct BCState *bcs); | 1555 | static void hisax_bc_close(struct BCState *bcs); |
1557 | static void hisax_bh(struct IsdnCardState *cs); | 1556 | static void hisax_bh(struct work_struct *work); |
1558 | static void EChannel_proc_rcv(struct hisax_d_if *d_if); | 1557 | static void EChannel_proc_rcv(struct hisax_d_if *d_if); |
1559 | 1558 | ||
1560 | int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], | 1559 | int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], |
@@ -1586,7 +1585,7 @@ int hisax_register(struct hisax_d_if *hisax_d_if, struct hisax_b_if *b_if[], | |||
1586 | hisax_d_if->cs = cs; | 1585 | hisax_d_if->cs = cs; |
1587 | cs->hw.hisax_d_if = hisax_d_if; | 1586 | cs->hw.hisax_d_if = hisax_d_if; |
1588 | cs->cardmsg = hisax_cardmsg; | 1587 | cs->cardmsg = hisax_cardmsg; |
1589 | INIT_WORK(&cs->tqueue, (void *)(void *)hisax_bh, cs); | 1588 | INIT_WORK(&cs->tqueue, hisax_bh); |
1590 | cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; | 1589 | cs->channel[0].d_st->l2.l2l1 = hisax_d_l2l1; |
1591 | for (i = 0; i < 2; i++) { | 1590 | for (i = 0; i < 2; i++) { |
1592 | cs->bcs[i].BC_SetStack = hisax_bc_setstack; | 1591 | cs->bcs[i].BC_SetStack = hisax_bc_setstack; |
@@ -1618,8 +1617,10 @@ static void hisax_sched_event(struct IsdnCardState *cs, int event) | |||
1618 | schedule_work(&cs->tqueue); | 1617 | schedule_work(&cs->tqueue); |
1619 | } | 1618 | } |
1620 | 1619 | ||
1621 | static void hisax_bh(struct IsdnCardState *cs) | 1620 | static void hisax_bh(struct work_struct *work) |
1622 | { | 1621 | { |
1622 | struct IsdnCardState *cs = | ||
1623 | container_of(work, struct IsdnCardState, tqueue); | ||
1623 | struct PStack *st; | 1624 | struct PStack *st; |
1624 | int pr; | 1625 | int pr; |
1625 | 1626 | ||
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c index d852c9d998b2..de9b1a4d6bac 100644 --- a/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/drivers/isdn/hisax/hfc4s8s_l1.c | |||
@@ -1083,8 +1083,9 @@ tx_b_frame(struct hfc4s8s_btype *bch) | |||
1083 | /* bottom half handler for interrupt */ | 1083 | /* bottom half handler for interrupt */ |
1084 | /*************************************/ | 1084 | /*************************************/ |
1085 | static void | 1085 | static void |
1086 | hfc4s8s_bh(hfc4s8s_hw * hw) | 1086 | hfc4s8s_bh(struct work_struct *work) |
1087 | { | 1087 | { |
1088 | hfc4s8s_hw *hw = container_of(work, hfc4s8s_hw, tqueue); | ||
1088 | u_char b; | 1089 | u_char b; |
1089 | struct hfc4s8s_l1 *l1p; | 1090 | struct hfc4s8s_l1 *l1p; |
1090 | volatile u_char *fifo_stat; | 1091 | volatile u_char *fifo_stat; |
@@ -1550,7 +1551,7 @@ setup_instance(hfc4s8s_hw * hw) | |||
1550 | goto out; | 1551 | goto out; |
1551 | } | 1552 | } |
1552 | 1553 | ||
1553 | INIT_WORK(&hw->tqueue, (void *) (void *) hfc4s8s_bh, hw); | 1554 | INIT_WORK(&hw->tqueue, hfc4s8s_bh); |
1554 | 1555 | ||
1555 | if (request_irq | 1556 | if (request_irq |
1556 | (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { | 1557 | (hw->irq, hfc4s8s_interrupt, IRQF_SHARED, hw->card_name, hw)) { |
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c index 6360e8214720..8d9864453a23 100644 --- a/drivers/isdn/hisax/hfc_2bds0.c +++ b/drivers/isdn/hisax/hfc_2bds0.c | |||
@@ -549,10 +549,11 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
549 | } | 549 | } |
550 | 550 | ||
551 | static void | 551 | static void |
552 | hfcd_bh(struct IsdnCardState *cs) | 552 | hfcd_bh(struct work_struct *work) |
553 | { | 553 | { |
554 | if (!cs) | 554 | struct IsdnCardState *cs = |
555 | return; | 555 | container_of(work, struct IsdnCardState, tqueue); |
556 | |||
556 | if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { | 557 | if (test_and_clear_bit(D_L1STATECHANGE, &cs->event)) { |
557 | switch (cs->dc.hfcd.ph_state) { | 558 | switch (cs->dc.hfcd.ph_state) { |
558 | case (0): | 559 | case (0): |
@@ -1072,5 +1073,5 @@ set_cs_func(struct IsdnCardState *cs) | |||
1072 | cs->dbusytimer.function = (void *) hfc_dbusy_timer; | 1073 | cs->dbusytimer.function = (void *) hfc_dbusy_timer; |
1073 | cs->dbusytimer.data = (long) cs; | 1074 | cs->dbusytimer.data = (long) cs; |
1074 | init_timer(&cs->dbusytimer); | 1075 | init_timer(&cs->dbusytimer); |
1075 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcd_bh, cs); | 1076 | INIT_WORK(&cs->tqueue, hfcd_bh); |
1076 | } | 1077 | } |
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 93f60b563515..5db0a85b827f 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -1506,8 +1506,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
1506 | /* handle L1 state changes */ | 1506 | /* handle L1 state changes */ |
1507 | /***************************/ | 1507 | /***************************/ |
1508 | static void | 1508 | static void |
1509 | hfcpci_bh(struct IsdnCardState *cs) | 1509 | hfcpci_bh(struct work_struct *work) |
1510 | { | 1510 | { |
1511 | struct IsdnCardState *cs = | ||
1512 | container_of(work, struct IsdnCardState, tqueue); | ||
1511 | u_long flags; | 1513 | u_long flags; |
1512 | // struct PStack *stptr; | 1514 | // struct PStack *stptr; |
1513 | 1515 | ||
@@ -1722,7 +1724,7 @@ setup_hfcpci(struct IsdnCard *card) | |||
1722 | Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); | 1724 | Write_hfc(cs, HFCPCI_INT_M2, cs->hw.hfcpci.int_m2); |
1723 | /* At this point the needed PCI config is done */ | 1725 | /* At this point the needed PCI config is done */ |
1724 | /* fifos are still not enabled */ | 1726 | /* fifos are still not enabled */ |
1725 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcpci_bh, cs); | 1727 | INIT_WORK(&cs->tqueue, hfcpci_bh); |
1726 | cs->setstack_d = setstack_hfcpci; | 1728 | cs->setstack_d = setstack_hfcpci; |
1727 | cs->BC_Send_Data = &hfcpci_send_data; | 1729 | cs->BC_Send_Data = &hfcpci_send_data; |
1728 | cs->readisac = NULL; | 1730 | cs->readisac = NULL; |
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index 954d1536db1f..4fd09d21a27f 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c | |||
@@ -1251,8 +1251,10 @@ setstack_2b(struct PStack *st, struct BCState *bcs) | |||
1251 | /* handle L1 state changes */ | 1251 | /* handle L1 state changes */ |
1252 | /***************************/ | 1252 | /***************************/ |
1253 | static void | 1253 | static void |
1254 | hfcsx_bh(struct IsdnCardState *cs) | 1254 | hfcsx_bh(struct work_struct *work) |
1255 | { | 1255 | { |
1256 | struct IsdnCardState *cs = | ||
1257 | container_of(work, struct IsdnCardState, tqueue); | ||
1256 | u_long flags; | 1258 | u_long flags; |
1257 | 1259 | ||
1258 | if (!cs) | 1260 | if (!cs) |
@@ -1499,7 +1501,7 @@ setup_hfcsx(struct IsdnCard *card) | |||
1499 | cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; | 1501 | cs->dbusytimer.function = (void *) hfcsx_dbusy_timer; |
1500 | cs->dbusytimer.data = (long) cs; | 1502 | cs->dbusytimer.data = (long) cs; |
1501 | init_timer(&cs->dbusytimer); | 1503 | init_timer(&cs->dbusytimer); |
1502 | INIT_WORK(&cs->tqueue, (void *)(void *) hfcsx_bh, cs); | 1504 | INIT_WORK(&cs->tqueue, hfcsx_bh); |
1503 | cs->readisac = NULL; | 1505 | cs->readisac = NULL; |
1504 | cs->writeisac = NULL; | 1506 | cs->writeisac = NULL; |
1505 | cs->readisacfifo = NULL; | 1507 | cs->readisacfifo = NULL; |
diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c index da706925d54d..682cac32f259 100644 --- a/drivers/isdn/hisax/icc.c +++ b/drivers/isdn/hisax/icc.c | |||
@@ -77,8 +77,10 @@ icc_new_ph(struct IsdnCardState *cs) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static void | 79 | static void |
80 | icc_bh(struct IsdnCardState *cs) | 80 | icc_bh(struct work_struct *work) |
81 | { | 81 | { |
82 | struct IsdnCardState *cs = | ||
83 | container_of(work, struct IsdnCardState, tqueue); | ||
82 | struct PStack *stptr; | 84 | struct PStack *stptr; |
83 | 85 | ||
84 | if (!cs) | 86 | if (!cs) |
@@ -674,7 +676,7 @@ clear_pending_icc_ints(struct IsdnCardState *cs) | |||
674 | void __devinit | 676 | void __devinit |
675 | setup_icc(struct IsdnCardState *cs) | 677 | setup_icc(struct IsdnCardState *cs) |
676 | { | 678 | { |
677 | INIT_WORK(&cs->tqueue, (void *)(void *) icc_bh, cs); | 679 | INIT_WORK(&cs->tqueue, icc_bh); |
678 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 680 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
679 | cs->dbusytimer.data = (long) cs; | 681 | cs->dbusytimer.data = (long) cs; |
680 | init_timer(&cs->dbusytimer); | 682 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/isac.c b/drivers/isdn/hisax/isac.c index 282f349408bc..4e9f23803dae 100644 --- a/drivers/isdn/hisax/isac.c +++ b/drivers/isdn/hisax/isac.c | |||
@@ -81,8 +81,10 @@ isac_new_ph(struct IsdnCardState *cs) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | static void | 83 | static void |
84 | isac_bh(struct IsdnCardState *cs) | 84 | isac_bh(struct work_struct *work) |
85 | { | 85 | { |
86 | struct IsdnCardState *cs = | ||
87 | container_of(work, struct IsdnCardState, tqueue); | ||
86 | struct PStack *stptr; | 88 | struct PStack *stptr; |
87 | 89 | ||
88 | if (!cs) | 90 | if (!cs) |
@@ -674,7 +676,7 @@ clear_pending_isac_ints(struct IsdnCardState *cs) | |||
674 | void __devinit | 676 | void __devinit |
675 | setup_isac(struct IsdnCardState *cs) | 677 | setup_isac(struct IsdnCardState *cs) |
676 | { | 678 | { |
677 | INIT_WORK(&cs->tqueue, (void *)(void *) isac_bh, cs); | 679 | INIT_WORK(&cs->tqueue, isac_bh); |
678 | cs->dbusytimer.function = (void *) dbusy_timer_handler; | 680 | cs->dbusytimer.function = (void *) dbusy_timer_handler; |
679 | cs->dbusytimer.data = (long) cs; | 681 | cs->dbusytimer.data = (long) cs; |
680 | init_timer(&cs->dbusytimer); | 682 | init_timer(&cs->dbusytimer); |
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index 674af673ff96..6f1a6583b17d 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c | |||
@@ -437,8 +437,10 @@ extern void BChannel_bh(struct BCState *); | |||
437 | #define B_LL_OK 10 | 437 | #define B_LL_OK 10 |
438 | 438 | ||
439 | static void | 439 | static void |
440 | isar_bh(struct BCState *bcs) | 440 | isar_bh(struct work_struct *work) |
441 | { | 441 | { |
442 | struct BCState *bcs = container_of(work, struct BCState, tqueue); | ||
443 | |||
442 | BChannel_bh(bcs); | 444 | BChannel_bh(bcs); |
443 | if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) | 445 | if (test_and_clear_bit(B_LL_NOCARRIER, &bcs->event)) |
444 | ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); | 446 | ll_deliver_faxstat(bcs, ISDN_FAX_CLASS1_NOCARR); |
@@ -1580,7 +1582,7 @@ isar_setup(struct IsdnCardState *cs) | |||
1580 | cs->bcs[i].mode = 0; | 1582 | cs->bcs[i].mode = 0; |
1581 | cs->bcs[i].hw.isar.dpath = i + 1; | 1583 | cs->bcs[i].hw.isar.dpath = i + 1; |
1582 | modeisar(&cs->bcs[i], 0, 0); | 1584 | modeisar(&cs->bcs[i], 0, 0); |
1583 | INIT_WORK(&cs->bcs[i].tqueue, (void *)(void *) isar_bh, &cs->bcs[i]); | 1585 | INIT_WORK(&cs->bcs[i].tqueue, isar_bh); |
1584 | } | 1586 | } |
1585 | } | 1587 | } |
1586 | 1588 | ||
diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c index bab356886483..a14204ec88ee 100644 --- a/drivers/isdn/hisax/isdnl1.c +++ b/drivers/isdn/hisax/isdnl1.c | |||
@@ -315,8 +315,10 @@ BChannel_proc_ack(struct BCState *bcs) | |||
315 | } | 315 | } |
316 | 316 | ||
317 | void | 317 | void |
318 | BChannel_bh(struct BCState *bcs) | 318 | BChannel_bh(struct work_struct *work) |
319 | { | 319 | { |
320 | struct BCState *bcs = container_of(work, struct BCState, tqueue); | ||
321 | |||
320 | if (!bcs) | 322 | if (!bcs) |
321 | return; | 323 | return; |
322 | if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) | 324 | if (test_and_clear_bit(B_RCVBUFREADY, &bcs->event)) |
@@ -362,7 +364,7 @@ init_bcstate(struct IsdnCardState *cs, int bc) | |||
362 | 364 | ||
363 | bcs->cs = cs; | 365 | bcs->cs = cs; |
364 | bcs->channel = bc; | 366 | bcs->channel = bc; |
365 | INIT_WORK(&bcs->tqueue, (void *)(void *) BChannel_bh, bcs); | 367 | INIT_WORK(&bcs->tqueue, BChannel_bh); |
366 | spin_lock_init(&bcs->aclock); | 368 | spin_lock_init(&bcs->aclock); |
367 | bcs->BC_SetStack = NULL; | 369 | bcs->BC_SetStack = NULL; |
368 | bcs->BC_Close = NULL; | 370 | bcs->BC_Close = NULL; |
diff --git a/drivers/isdn/hisax/w6692.c b/drivers/isdn/hisax/w6692.c index 1655341797a9..3aeceaf9769e 100644 --- a/drivers/isdn/hisax/w6692.c +++ b/drivers/isdn/hisax/w6692.c | |||
@@ -101,8 +101,10 @@ W6692_new_ph(struct IsdnCardState *cs) | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static void | 103 | static void |
104 | W6692_bh(struct IsdnCardState *cs) | 104 | W6692_bh(struct work_struct *work) |
105 | { | 105 | { |
106 | struct IsdnCardState *cs = | ||
107 | container_of(work, struct IsdnCardState, tqueue); | ||
106 | struct PStack *stptr; | 108 | struct PStack *stptr; |
107 | 109 | ||
108 | if (!cs) | 110 | if (!cs) |
@@ -1070,7 +1072,7 @@ setup_w6692(struct IsdnCard *card) | |||
1070 | id_list[cs->subtyp].card_name, cs->irq, | 1072 | id_list[cs->subtyp].card_name, cs->irq, |
1071 | cs->hw.w6692.iobase); | 1073 | cs->hw.w6692.iobase); |
1072 | 1074 | ||
1073 | INIT_WORK(&cs->tqueue, (void *)(void *) W6692_bh, cs); | 1075 | INIT_WORK(&cs->tqueue, W6692_bh); |
1074 | cs->readW6692 = &ReadW6692; | 1076 | cs->readW6692 = &ReadW6692; |
1075 | cs->writeW6692 = &WriteW6692; | 1077 | cs->writeW6692 = &WriteW6692; |
1076 | cs->readisacfifo = &ReadISACfifo; | 1078 | cs->readisacfifo = &ReadISACfifo; |
diff --git a/drivers/isdn/hysdn/boardergo.c b/drivers/isdn/hysdn/boardergo.c index 82e42a80dc4b..a1206498a1cf 100644 --- a/drivers/isdn/hysdn/boardergo.c +++ b/drivers/isdn/hysdn/boardergo.c | |||
@@ -71,8 +71,9 @@ ergo_interrupt(int intno, void *dev_id) | |||
71 | /* may be queued from everywhere (interrupts included). */ | 71 | /* may be queued from everywhere (interrupts included). */ |
72 | /******************************************************************************/ | 72 | /******************************************************************************/ |
73 | static void | 73 | static void |
74 | ergo_irq_bh(hysdn_card * card) | 74 | ergo_irq_bh(struct work_struct *ugli_api) |
75 | { | 75 | { |
76 | hysdn_card * card = container_of(ugli_api, hysdn_card, irq_queue); | ||
76 | tErgDpram *dpr; | 77 | tErgDpram *dpr; |
77 | int again; | 78 | int again; |
78 | unsigned long flags; | 79 | unsigned long flags; |
@@ -442,7 +443,7 @@ ergo_inithardware(hysdn_card * card) | |||
442 | card->writebootseq = ergo_writebootseq; | 443 | card->writebootseq = ergo_writebootseq; |
443 | card->waitpofready = ergo_waitpofready; | 444 | card->waitpofready = ergo_waitpofready; |
444 | card->set_errlog_state = ergo_set_errlog_state; | 445 | card->set_errlog_state = ergo_set_errlog_state; |
445 | INIT_WORK(&card->irq_queue, (void *) (void *) ergo_irq_bh, card); | 446 | INIT_WORK(&card->irq_queue, ergo_irq_bh); |
446 | card->hysdn_lock = SPIN_LOCK_UNLOCKED; | 447 | card->hysdn_lock = SPIN_LOCK_UNLOCKED; |
447 | 448 | ||
448 | return (0); | 449 | return (0); |
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c index 1f8d6ae66b41..2e4daebfb7e0 100644 --- a/drivers/isdn/i4l/isdn_net.c +++ b/drivers/isdn/i4l/isdn_net.c | |||
@@ -984,9 +984,9 @@ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb) | |||
984 | /* | 984 | /* |
985 | * called from tq_immediate | 985 | * called from tq_immediate |
986 | */ | 986 | */ |
987 | static void isdn_net_softint(void *private) | 987 | static void isdn_net_softint(struct work_struct *work) |
988 | { | 988 | { |
989 | isdn_net_local *lp = private; | 989 | isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); |
990 | struct sk_buff *skb; | 990 | struct sk_buff *skb; |
991 | 991 | ||
992 | spin_lock_bh(&lp->xmit_lock); | 992 | spin_lock_bh(&lp->xmit_lock); |
@@ -2596,7 +2596,7 @@ isdn_net_new(char *name, struct net_device *master) | |||
2596 | netdev->local->netdev = netdev; | 2596 | netdev->local->netdev = netdev; |
2597 | netdev->local->next = netdev->local; | 2597 | netdev->local->next = netdev->local; |
2598 | 2598 | ||
2599 | INIT_WORK(&netdev->local->tqueue, (void *)(void *) isdn_net_softint, netdev->local); | 2599 | INIT_WORK(&netdev->local->tqueue, isdn_net_softint); |
2600 | spin_lock_init(&netdev->local->xmit_lock); | 2600 | spin_lock_init(&netdev->local->xmit_lock); |
2601 | 2601 | ||
2602 | netdev->local->isdn_device = -1; | 2602 | netdev->local->isdn_device = -1; |
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c index 6ead5e1508b7..1966f3410a13 100644 --- a/drivers/isdn/pcbit/drv.c +++ b/drivers/isdn/pcbit/drv.c | |||
@@ -68,8 +68,6 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list); | |||
68 | static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); | 68 | static int pcbit_check_msn(struct pcbit_dev *dev, char *msn); |
69 | 69 | ||
70 | 70 | ||
71 | extern void pcbit_deliver(void * data); | ||
72 | |||
73 | int pcbit_init_dev(int board, int mem_base, int irq) | 71 | int pcbit_init_dev(int board, int mem_base, int irq) |
74 | { | 72 | { |
75 | struct pcbit_dev *dev; | 73 | struct pcbit_dev *dev; |
@@ -129,7 +127,7 @@ int pcbit_init_dev(int board, int mem_base, int irq) | |||
129 | memset(dev->b2, 0, sizeof(struct pcbit_chan)); | 127 | memset(dev->b2, 0, sizeof(struct pcbit_chan)); |
130 | dev->b2->id = 1; | 128 | dev->b2->id = 1; |
131 | 129 | ||
132 | INIT_WORK(&dev->qdelivery, pcbit_deliver, dev); | 130 | INIT_WORK(&dev->qdelivery, pcbit_deliver); |
133 | 131 | ||
134 | /* | 132 | /* |
135 | * interrupts | 133 | * interrupts |
diff --git a/drivers/isdn/pcbit/layer2.c b/drivers/isdn/pcbit/layer2.c index 937fd2120381..0c9f6df873fc 100644 --- a/drivers/isdn/pcbit/layer2.c +++ b/drivers/isdn/pcbit/layer2.c | |||
@@ -67,7 +67,6 @@ extern void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg, | |||
67 | * Prototypes | 67 | * Prototypes |
68 | */ | 68 | */ |
69 | 69 | ||
70 | void pcbit_deliver(void *data); | ||
71 | static void pcbit_transmit(struct pcbit_dev *dev); | 70 | static void pcbit_transmit(struct pcbit_dev *dev); |
72 | 71 | ||
73 | static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); | 72 | static void pcbit_recv_ack(struct pcbit_dev *dev, unsigned char ack); |
@@ -299,11 +298,12 @@ pcbit_transmit(struct pcbit_dev *dev) | |||
299 | */ | 298 | */ |
300 | 299 | ||
301 | void | 300 | void |
302 | pcbit_deliver(void *data) | 301 | pcbit_deliver(struct work_struct *work) |
303 | { | 302 | { |
304 | struct frame_buf *frame; | 303 | struct frame_buf *frame; |
305 | unsigned long flags, msg; | 304 | unsigned long flags, msg; |
306 | struct pcbit_dev *dev = (struct pcbit_dev *) data; | 305 | struct pcbit_dev *dev = |
306 | container_of(work, struct pcbit_dev, qdelivery); | ||
307 | 307 | ||
308 | spin_lock_irqsave(&dev->lock, flags); | 308 | spin_lock_irqsave(&dev->lock, flags); |
309 | 309 | ||
diff --git a/drivers/isdn/pcbit/pcbit.h b/drivers/isdn/pcbit/pcbit.h index 388bacefd23a..19c18e88ff16 100644 --- a/drivers/isdn/pcbit/pcbit.h +++ b/drivers/isdn/pcbit/pcbit.h | |||
@@ -166,4 +166,6 @@ struct pcbit_ioctl { | |||
166 | #define L2_RUNNING 5 | 166 | #define L2_RUNNING 5 |
167 | #define L2_ERROR 6 | 167 | #define L2_ERROR 6 |
168 | 168 | ||
169 | extern void pcbit_deliver(struct work_struct *work); | ||
170 | |||
169 | #endif | 171 | #endif |
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index be0bd34ff6f9..d43ea81d6df9 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c | |||
@@ -267,12 +267,12 @@ adb_probe_task(void *x) | |||
267 | } | 267 | } |
268 | 268 | ||
269 | static void | 269 | static void |
270 | __adb_probe_task(void *data) | 270 | __adb_probe_task(struct work_struct *bullshit) |
271 | { | 271 | { |
272 | adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL); | 272 | adb_probe_task_pid = kernel_thread(adb_probe_task, NULL, SIGCHLD | CLONE_KERNEL); |
273 | } | 273 | } |
274 | 274 | ||
275 | static DECLARE_WORK(adb_reset_work, __adb_probe_task, NULL); | 275 | static DECLARE_WORK(adb_reset_work, __adb_probe_task); |
276 | 276 | ||
277 | int | 277 | int |
278 | adb_reset_bus(void) | 278 | adb_reset_bus(void) |
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index f1b6f563673a..5ed41fe84e57 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c | |||
@@ -48,7 +48,8 @@ struct rackmeter_dma { | |||
48 | } ____cacheline_aligned; | 48 | } ____cacheline_aligned; |
49 | 49 | ||
50 | struct rackmeter_cpu { | 50 | struct rackmeter_cpu { |
51 | struct work_struct sniffer; | 51 | struct delayed_work sniffer; |
52 | struct rackmeter *rm; | ||
52 | cputime64_t prev_wall; | 53 | cputime64_t prev_wall; |
53 | cputime64_t prev_idle; | 54 | cputime64_t prev_idle; |
54 | int zero; | 55 | int zero; |
@@ -208,11 +209,12 @@ static void rackmeter_setup_dbdma(struct rackmeter *rm) | |||
208 | rackmeter_do_pause(rm, 0); | 209 | rackmeter_do_pause(rm, 0); |
209 | } | 210 | } |
210 | 211 | ||
211 | static void rackmeter_do_timer(void *data) | 212 | static void rackmeter_do_timer(struct work_struct *work) |
212 | { | 213 | { |
213 | struct rackmeter *rm = data; | 214 | struct rackmeter_cpu *rcpu = |
215 | container_of(work, struct rackmeter_cpu, sniffer.work); | ||
216 | struct rackmeter *rm = rcpu->rm; | ||
214 | unsigned int cpu = smp_processor_id(); | 217 | unsigned int cpu = smp_processor_id(); |
215 | struct rackmeter_cpu *rcpu = &rm->cpu[cpu]; | ||
216 | cputime64_t cur_jiffies, total_idle_ticks; | 218 | cputime64_t cur_jiffies, total_idle_ticks; |
217 | unsigned int total_ticks, idle_ticks; | 219 | unsigned int total_ticks, idle_ticks; |
218 | int i, offset, load, cumm, pause; | 220 | int i, offset, load, cumm, pause; |
@@ -263,8 +265,10 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) | |||
263 | * on those machines yet | 265 | * on those machines yet |
264 | */ | 266 | */ |
265 | 267 | ||
266 | INIT_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer, rm); | 268 | rm->cpu[0].rm = rm; |
267 | INIT_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer, rm); | 269 | INIT_DELAYED_WORK(&rm->cpu[0].sniffer, rackmeter_do_timer); |
270 | rm->cpu[1].rm = rm; | ||
271 | INIT_DELAYED_WORK(&rm->cpu[1].sniffer, rackmeter_do_timer); | ||
268 | 272 | ||
269 | for_each_online_cpu(cpu) { | 273 | for_each_online_cpu(cpu) { |
270 | struct rackmeter_cpu *rcpu; | 274 | struct rackmeter_cpu *rcpu; |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 4f724cdd2efa..6dde27ab79a8 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -601,7 +601,7 @@ core_initcall(smu_late_init); | |||
601 | * sysfs visibility | 601 | * sysfs visibility |
602 | */ | 602 | */ |
603 | 603 | ||
604 | static void smu_expose_childs(void *unused) | 604 | static void smu_expose_childs(struct work_struct *unused) |
605 | { | 605 | { |
606 | struct device_node *np; | 606 | struct device_node *np; |
607 | 607 | ||
@@ -611,7 +611,7 @@ static void smu_expose_childs(void *unused) | |||
611 | &smu->of_dev->dev); | 611 | &smu->of_dev->dev); |
612 | } | 612 | } |
613 | 613 | ||
614 | static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs, NULL); | 614 | static DECLARE_WORK(smu_expose_childs_work, smu_expose_childs); |
615 | 615 | ||
616 | static int smu_platform_probe(struct of_device* dev, | 616 | static int smu_platform_probe(struct of_device* dev, |
617 | const struct of_device_id *match) | 617 | const struct of_device_id *match) |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 08a40f4e4f60..ed2d4ef27fd8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -458,11 +458,11 @@ static void dec_pending(struct crypt_io *io, int error) | |||
458 | * interrupt context. | 458 | * interrupt context. |
459 | */ | 459 | */ |
460 | static struct workqueue_struct *_kcryptd_workqueue; | 460 | static struct workqueue_struct *_kcryptd_workqueue; |
461 | static void kcryptd_do_work(void *data); | 461 | static void kcryptd_do_work(struct work_struct *work); |
462 | 462 | ||
463 | static void kcryptd_queue_io(struct crypt_io *io) | 463 | static void kcryptd_queue_io(struct crypt_io *io) |
464 | { | 464 | { |
465 | INIT_WORK(&io->work, kcryptd_do_work, io); | 465 | INIT_WORK(&io->work, kcryptd_do_work); |
466 | queue_work(_kcryptd_workqueue, &io->work); | 466 | queue_work(_kcryptd_workqueue, &io->work); |
467 | } | 467 | } |
468 | 468 | ||
@@ -618,9 +618,9 @@ static void process_read_endio(struct crypt_io *io) | |||
618 | dec_pending(io, crypt_convert(cc, &ctx)); | 618 | dec_pending(io, crypt_convert(cc, &ctx)); |
619 | } | 619 | } |
620 | 620 | ||
621 | static void kcryptd_do_work(void *data) | 621 | static void kcryptd_do_work(struct work_struct *work) |
622 | { | 622 | { |
623 | struct crypt_io *io = data; | 623 | struct crypt_io *io = container_of(work, struct crypt_io, work); |
624 | 624 | ||
625 | if (io->post_process) | 625 | if (io->post_process) |
626 | process_read_endio(io); | 626 | process_read_endio(io); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index d754e0bc6e90..e77ee6fd1044 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -104,8 +104,8 @@ typedef int (*action_fn) (struct pgpath *pgpath); | |||
104 | static kmem_cache_t *_mpio_cache; | 104 | static kmem_cache_t *_mpio_cache; |
105 | 105 | ||
106 | struct workqueue_struct *kmultipathd; | 106 | struct workqueue_struct *kmultipathd; |
107 | static void process_queued_ios(void *data); | 107 | static void process_queued_ios(struct work_struct *work); |
108 | static void trigger_event(void *data); | 108 | static void trigger_event(struct work_struct *work); |
109 | 109 | ||
110 | 110 | ||
111 | /*----------------------------------------------- | 111 | /*----------------------------------------------- |
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti) | |||
173 | INIT_LIST_HEAD(&m->priority_groups); | 173 | INIT_LIST_HEAD(&m->priority_groups); |
174 | spin_lock_init(&m->lock); | 174 | spin_lock_init(&m->lock); |
175 | m->queue_io = 1; | 175 | m->queue_io = 1; |
176 | INIT_WORK(&m->process_queued_ios, process_queued_ios, m); | 176 | INIT_WORK(&m->process_queued_ios, process_queued_ios); |
177 | INIT_WORK(&m->trigger_event, trigger_event, m); | 177 | INIT_WORK(&m->trigger_event, trigger_event); |
178 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); | 178 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
179 | if (!m->mpio_pool) { | 179 | if (!m->mpio_pool) { |
180 | kfree(m); | 180 | kfree(m); |
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m) | |||
379 | } | 379 | } |
380 | } | 380 | } |
381 | 381 | ||
382 | static void process_queued_ios(void *data) | 382 | static void process_queued_ios(struct work_struct *work) |
383 | { | 383 | { |
384 | struct multipath *m = (struct multipath *) data; | 384 | struct multipath *m = |
385 | container_of(work, struct multipath, process_queued_ios); | ||
385 | struct hw_handler *hwh = &m->hw_handler; | 386 | struct hw_handler *hwh = &m->hw_handler; |
386 | struct pgpath *pgpath = NULL; | 387 | struct pgpath *pgpath = NULL; |
387 | unsigned init_required = 0, must_queue = 1; | 388 | unsigned init_required = 0, must_queue = 1; |
@@ -421,9 +422,10 @@ out: | |||
421 | * An event is triggered whenever a path is taken out of use. | 422 | * An event is triggered whenever a path is taken out of use. |
422 | * Includes path failure and PG bypass. | 423 | * Includes path failure and PG bypass. |
423 | */ | 424 | */ |
424 | static void trigger_event(void *data) | 425 | static void trigger_event(struct work_struct *work) |
425 | { | 426 | { |
426 | struct multipath *m = (struct multipath *) data; | 427 | struct multipath *m = |
428 | container_of(work, struct multipath, trigger_event); | ||
427 | 429 | ||
428 | dm_table_event(m->ti->table); | 430 | dm_table_event(m->ti->table); |
429 | } | 431 | } |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 48a653b3f518..fc8cbb168e3e 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -883,7 +883,7 @@ static void do_mirror(struct mirror_set *ms) | |||
883 | do_writes(ms, &writes); | 883 | do_writes(ms, &writes); |
884 | } | 884 | } |
885 | 885 | ||
886 | static void do_work(void *ignored) | 886 | static void do_work(struct work_struct *ignored) |
887 | { | 887 | { |
888 | struct mirror_set *ms; | 888 | struct mirror_set *ms; |
889 | 889 | ||
@@ -1269,7 +1269,7 @@ static int __init dm_mirror_init(void) | |||
1269 | dm_dirty_log_exit(); | 1269 | dm_dirty_log_exit(); |
1270 | return r; | 1270 | return r; |
1271 | } | 1271 | } |
1272 | INIT_WORK(&_kmirrord_work, do_work, NULL); | 1272 | INIT_WORK(&_kmirrord_work, do_work); |
1273 | 1273 | ||
1274 | r = dm_register_target(&mirror_target); | 1274 | r = dm_register_target(&mirror_target); |
1275 | if (r < 0) { | 1275 | if (r < 0) { |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5281e0094072..91c7aa1fed0e 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #define SNAPSHOT_PAGES 256 | 40 | #define SNAPSHOT_PAGES 256 |
41 | 41 | ||
42 | struct workqueue_struct *ksnapd; | 42 | struct workqueue_struct *ksnapd; |
43 | static void flush_queued_bios(void *data); | 43 | static void flush_queued_bios(struct work_struct *work); |
44 | 44 | ||
45 | struct pending_exception { | 45 | struct pending_exception { |
46 | struct exception e; | 46 | struct exception e; |
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
528 | } | 528 | } |
529 | 529 | ||
530 | bio_list_init(&s->queued_bios); | 530 | bio_list_init(&s->queued_bios); |
531 | INIT_WORK(&s->queued_bios_work, flush_queued_bios, s); | 531 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); |
532 | 532 | ||
533 | /* Add snapshot to the list of snapshots for this origin */ | 533 | /* Add snapshot to the list of snapshots for this origin */ |
534 | /* Exceptions aren't triggered till snapshot_resume() is called */ | 534 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio) | |||
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | static void flush_queued_bios(void *data) | 606 | static void flush_queued_bios(struct work_struct *work) |
607 | { | 607 | { |
608 | struct dm_snapshot *s = (struct dm_snapshot *) data; | 608 | struct dm_snapshot *s = |
609 | container_of(work, struct dm_snapshot, queued_bios_work); | ||
609 | struct bio *queued_bios; | 610 | struct bio *queued_bios; |
610 | unsigned long flags; | 611 | unsigned long flags; |
611 | 612 | ||
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index f1db6eff4857..b3c01496c737 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c | |||
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) | |||
417 | /* | 417 | /* |
418 | * kcopyd does this every time it's woken up. | 418 | * kcopyd does this every time it's woken up. |
419 | */ | 419 | */ |
420 | static void do_work(void *ignored) | 420 | static void do_work(struct work_struct *ignored) |
421 | { | 421 | { |
422 | /* | 422 | /* |
423 | * The order that these are called is *very* important. | 423 | * The order that these are called is *very* important. |
@@ -628,7 +628,7 @@ static int kcopyd_init(void) | |||
628 | } | 628 | } |
629 | 629 | ||
630 | kcopyd_clients++; | 630 | kcopyd_clients++; |
631 | INIT_WORK(&_kcopyd_work, do_work, NULL); | 631 | INIT_WORK(&_kcopyd_work, do_work); |
632 | mutex_unlock(&kcopyd_init_lock); | 632 | mutex_unlock(&kcopyd_init_lock); |
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
diff --git a/drivers/media/dvb/b2c2/flexcop-pci.c b/drivers/media/dvb/b2c2/flexcop-pci.c index 06893243f3d4..6e166801505d 100644 --- a/drivers/media/dvb/b2c2/flexcop-pci.c +++ b/drivers/media/dvb/b2c2/flexcop-pci.c | |||
@@ -63,7 +63,7 @@ struct flexcop_pci { | |||
63 | 63 | ||
64 | unsigned long last_irq; | 64 | unsigned long last_irq; |
65 | 65 | ||
66 | struct work_struct irq_check_work; | 66 | struct delayed_work irq_check_work; |
67 | 67 | ||
68 | struct flexcop_device *fc_dev; | 68 | struct flexcop_device *fc_dev; |
69 | }; | 69 | }; |
@@ -97,9 +97,10 @@ static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc, flexcop_ibi_regi | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static void flexcop_pci_irq_check_work(void *data) | 100 | static void flexcop_pci_irq_check_work(struct work_struct *work) |
101 | { | 101 | { |
102 | struct flexcop_pci *fc_pci = data; | 102 | struct flexcop_pci *fc_pci = |
103 | container_of(work, struct flexcop_pci, irq_check_work.work); | ||
103 | struct flexcop_device *fc = fc_pci->fc_dev; | 104 | struct flexcop_device *fc = fc_pci->fc_dev; |
104 | 105 | ||
105 | flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); | 106 | flexcop_ibi_value v = fc->read_ibi_reg(fc,sram_dest_reg_714); |
@@ -371,7 +372,7 @@ static int flexcop_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e | |||
371 | if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) | 372 | if ((ret = flexcop_pci_dma_init(fc_pci)) != 0) |
372 | goto err_fc_exit; | 373 | goto err_fc_exit; |
373 | 374 | ||
374 | INIT_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work, fc_pci); | 375 | INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work); |
375 | 376 | ||
376 | return ret; | 377 | return ret; |
377 | 378 | ||
diff --git a/drivers/media/dvb/cinergyT2/cinergyT2.c b/drivers/media/dvb/cinergyT2/cinergyT2.c index 8a7dd507cf6e..206c13e47a06 100644 --- a/drivers/media/dvb/cinergyT2/cinergyT2.c +++ b/drivers/media/dvb/cinergyT2/cinergyT2.c | |||
@@ -128,7 +128,7 @@ struct cinergyt2 { | |||
128 | 128 | ||
129 | struct dvbt_set_parameters_msg param; | 129 | struct dvbt_set_parameters_msg param; |
130 | struct dvbt_get_status_msg status; | 130 | struct dvbt_get_status_msg status; |
131 | struct work_struct query_work; | 131 | struct delayed_work query_work; |
132 | 132 | ||
133 | wait_queue_head_t poll_wq; | 133 | wait_queue_head_t poll_wq; |
134 | int pending_fe_events; | 134 | int pending_fe_events; |
@@ -142,7 +142,7 @@ struct cinergyt2 { | |||
142 | #ifdef ENABLE_RC | 142 | #ifdef ENABLE_RC |
143 | struct input_dev *rc_input_dev; | 143 | struct input_dev *rc_input_dev; |
144 | char phys[64]; | 144 | char phys[64]; |
145 | struct work_struct rc_query_work; | 145 | struct delayed_work rc_query_work; |
146 | int rc_input_event; | 146 | int rc_input_event; |
147 | u32 rc_last_code; | 147 | u32 rc_last_code; |
148 | unsigned long last_event_jiffies; | 148 | unsigned long last_event_jiffies; |
@@ -723,9 +723,10 @@ static struct dvb_device cinergyt2_fe_template = { | |||
723 | 723 | ||
724 | #ifdef ENABLE_RC | 724 | #ifdef ENABLE_RC |
725 | 725 | ||
726 | static void cinergyt2_query_rc (void *data) | 726 | static void cinergyt2_query_rc (struct work_struct *work) |
727 | { | 727 | { |
728 | struct cinergyt2 *cinergyt2 = data; | 728 | struct cinergyt2 *cinergyt2 = |
729 | container_of(work, struct cinergyt2, rc_query_work.work); | ||
729 | char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; | 730 | char buf[1] = { CINERGYT2_EP1_GET_RC_EVENTS }; |
730 | struct cinergyt2_rc_event rc_events[12]; | 731 | struct cinergyt2_rc_event rc_events[12]; |
731 | int n, len, i; | 732 | int n, len, i; |
@@ -806,7 +807,7 @@ static int cinergyt2_register_rc(struct cinergyt2 *cinergyt2) | |||
806 | strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); | 807 | strlcat(cinergyt2->phys, "/input0", sizeof(cinergyt2->phys)); |
807 | cinergyt2->rc_input_event = KEY_MAX; | 808 | cinergyt2->rc_input_event = KEY_MAX; |
808 | cinergyt2->rc_last_code = ~0; | 809 | cinergyt2->rc_last_code = ~0; |
809 | INIT_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc, cinergyt2); | 810 | INIT_DELAYED_WORK(&cinergyt2->rc_query_work, cinergyt2_query_rc); |
810 | 811 | ||
811 | input_dev->name = DRIVER_NAME " remote control"; | 812 | input_dev->name = DRIVER_NAME " remote control"; |
812 | input_dev->phys = cinergyt2->phys; | 813 | input_dev->phys = cinergyt2->phys; |
@@ -847,9 +848,10 @@ static inline void cinergyt2_resume_rc(struct cinergyt2 *cinergyt2) { } | |||
847 | 848 | ||
848 | #endif /* ENABLE_RC */ | 849 | #endif /* ENABLE_RC */ |
849 | 850 | ||
850 | static void cinergyt2_query (void *data) | 851 | static void cinergyt2_query (struct work_struct *work) |
851 | { | 852 | { |
852 | struct cinergyt2 *cinergyt2 = (struct cinergyt2 *) data; | 853 | struct cinergyt2 *cinergyt2 = |
854 | container_of(work, struct cinergyt2, query_work.work); | ||
853 | char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; | 855 | char cmd [] = { CINERGYT2_EP1_GET_TUNER_STATUS }; |
854 | struct dvbt_get_status_msg *s = &cinergyt2->status; | 856 | struct dvbt_get_status_msg *s = &cinergyt2->status; |
855 | uint8_t lock_bits; | 857 | uint8_t lock_bits; |
@@ -893,7 +895,7 @@ static int cinergyt2_probe (struct usb_interface *intf, | |||
893 | 895 | ||
894 | mutex_init(&cinergyt2->sem); | 896 | mutex_init(&cinergyt2->sem); |
895 | init_waitqueue_head (&cinergyt2->poll_wq); | 897 | init_waitqueue_head (&cinergyt2->poll_wq); |
896 | INIT_WORK(&cinergyt2->query_work, cinergyt2_query, cinergyt2); | 898 | INIT_DELAYED_WORK(&cinergyt2->query_work, cinergyt2_query); |
897 | 899 | ||
898 | cinergyt2->udev = interface_to_usbdev(intf); | 900 | cinergyt2->udev = interface_to_usbdev(intf); |
899 | cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; | 901 | cinergyt2->param.cmd = CINERGYT2_EP1_SET_TUNER_PARAMETERS; |
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 8859ab74f0fe..ebf4dc5190f6 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c | |||
@@ -127,6 +127,7 @@ struct dvb_net_priv { | |||
127 | int in_use; | 127 | int in_use; |
128 | struct net_device_stats stats; | 128 | struct net_device_stats stats; |
129 | u16 pid; | 129 | u16 pid; |
130 | struct net_device *net; | ||
130 | struct dvb_net *host; | 131 | struct dvb_net *host; |
131 | struct dmx_demux *demux; | 132 | struct dmx_demux *demux; |
132 | struct dmx_section_feed *secfeed; | 133 | struct dmx_section_feed *secfeed; |
@@ -1123,10 +1124,11 @@ static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc) | |||
1123 | } | 1124 | } |
1124 | 1125 | ||
1125 | 1126 | ||
1126 | static void wq_set_multicast_list (void *data) | 1127 | static void wq_set_multicast_list (struct work_struct *work) |
1127 | { | 1128 | { |
1128 | struct net_device *dev = data; | 1129 | struct dvb_net_priv *priv = |
1129 | struct dvb_net_priv *priv = dev->priv; | 1130 | container_of(work, struct dvb_net_priv, set_multicast_list_wq); |
1131 | struct net_device *dev = priv->net; | ||
1130 | 1132 | ||
1131 | dvb_net_feed_stop(dev); | 1133 | dvb_net_feed_stop(dev); |
1132 | priv->rx_mode = RX_MODE_UNI; | 1134 | priv->rx_mode = RX_MODE_UNI; |
@@ -1167,9 +1169,11 @@ static void dvb_net_set_multicast_list (struct net_device *dev) | |||
1167 | } | 1169 | } |
1168 | 1170 | ||
1169 | 1171 | ||
1170 | static void wq_restart_net_feed (void *data) | 1172 | static void wq_restart_net_feed (struct work_struct *work) |
1171 | { | 1173 | { |
1172 | struct net_device *dev = data; | 1174 | struct dvb_net_priv *priv = |
1175 | container_of(work, struct dvb_net_priv, restart_net_feed_wq); | ||
1176 | struct net_device *dev = priv->net; | ||
1173 | 1177 | ||
1174 | if (netif_running(dev)) { | 1178 | if (netif_running(dev)) { |
1175 | dvb_net_feed_stop(dev); | 1179 | dvb_net_feed_stop(dev); |
@@ -1276,6 +1280,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) | |||
1276 | dvbnet->device[if_num] = net; | 1280 | dvbnet->device[if_num] = net; |
1277 | 1281 | ||
1278 | priv = net->priv; | 1282 | priv = net->priv; |
1283 | priv->net = net; | ||
1279 | priv->demux = dvbnet->demux; | 1284 | priv->demux = dvbnet->demux; |
1280 | priv->pid = pid; | 1285 | priv->pid = pid; |
1281 | priv->rx_mode = RX_MODE_UNI; | 1286 | priv->rx_mode = RX_MODE_UNI; |
@@ -1284,8 +1289,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype) | |||
1284 | priv->feedtype = feedtype; | 1289 | priv->feedtype = feedtype; |
1285 | reset_ule(priv); | 1290 | reset_ule(priv); |
1286 | 1291 | ||
1287 | INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list, net); | 1292 | INIT_WORK(&priv->set_multicast_list_wq, wq_set_multicast_list); |
1288 | INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed, net); | 1293 | INIT_WORK(&priv->restart_net_feed_wq, wq_restart_net_feed); |
1289 | mutex_init(&priv->mutex); | 1294 | mutex_init(&priv->mutex); |
1290 | 1295 | ||
1291 | net->base_addr = pid; | 1296 | net->base_addr = pid; |
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c index 0a3a0b6c2350..794e4471561c 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb-remote.c +++ b/drivers/media/dvb/dvb-usb/dvb-usb-remote.c | |||
@@ -13,9 +13,10 @@ | |||
13 | * | 13 | * |
14 | * TODO: Fix the repeat rate of the input device. | 14 | * TODO: Fix the repeat rate of the input device. |
15 | */ | 15 | */ |
16 | static void dvb_usb_read_remote_control(void *data) | 16 | static void dvb_usb_read_remote_control(struct work_struct *work) |
17 | { | 17 | { |
18 | struct dvb_usb_device *d = data; | 18 | struct dvb_usb_device *d = |
19 | container_of(work, struct dvb_usb_device, rc_query_work.work); | ||
19 | u32 event; | 20 | u32 event; |
20 | int state; | 21 | int state; |
21 | 22 | ||
@@ -128,7 +129,7 @@ int dvb_usb_remote_init(struct dvb_usb_device *d) | |||
128 | 129 | ||
129 | input_register_device(d->rc_input_dev); | 130 | input_register_device(d->rc_input_dev); |
130 | 131 | ||
131 | INIT_WORK(&d->rc_query_work, dvb_usb_read_remote_control, d); | 132 | INIT_DELAYED_WORK(&d->rc_query_work, dvb_usb_read_remote_control); |
132 | 133 | ||
133 | info("schedule remote query interval to %d msecs.", d->props.rc_interval); | 134 | info("schedule remote query interval to %d msecs.", d->props.rc_interval); |
134 | schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); | 135 | schedule_delayed_work(&d->rc_query_work,msecs_to_jiffies(d->props.rc_interval)); |
diff --git a/drivers/media/dvb/dvb-usb/dvb-usb.h b/drivers/media/dvb/dvb-usb/dvb-usb.h index 376c45a8e779..0d721731a524 100644 --- a/drivers/media/dvb/dvb-usb/dvb-usb.h +++ b/drivers/media/dvb/dvb-usb/dvb-usb.h | |||
@@ -369,7 +369,7 @@ struct dvb_usb_device { | |||
369 | /* remote control */ | 369 | /* remote control */ |
370 | struct input_dev *rc_input_dev; | 370 | struct input_dev *rc_input_dev; |
371 | char rc_phys[64]; | 371 | char rc_phys[64]; |
372 | struct work_struct rc_query_work; | 372 | struct delayed_work rc_query_work; |
373 | u32 last_event; | 373 | u32 last_event; |
374 | int last_state; | 374 | int last_state; |
375 | 375 | ||
diff --git a/drivers/media/video/cpia_pp.c b/drivers/media/video/cpia_pp.c index 41f4b8d17559..b12cec94f4cc 100644 --- a/drivers/media/video/cpia_pp.c +++ b/drivers/media/video/cpia_pp.c | |||
@@ -82,6 +82,8 @@ struct pp_cam_entry { | |||
82 | struct pardevice *pdev; | 82 | struct pardevice *pdev; |
83 | struct parport *port; | 83 | struct parport *port; |
84 | struct work_struct cb_task; | 84 | struct work_struct cb_task; |
85 | void (*cb_func)(void *cbdata); | ||
86 | void *cb_data; | ||
85 | int open_count; | 87 | int open_count; |
86 | wait_queue_head_t wq_stream; | 88 | wait_queue_head_t wq_stream; |
87 | /* image state flags */ | 89 | /* image state flags */ |
@@ -130,6 +132,20 @@ static void cpia_parport_disable_irq( struct parport *port ) { | |||
130 | #define PARPORT_CHUNK_SIZE PAGE_SIZE | 132 | #define PARPORT_CHUNK_SIZE PAGE_SIZE |
131 | 133 | ||
132 | 134 | ||
135 | static void cpia_pp_run_callback(struct work_struct *work) | ||
136 | { | ||
137 | void (*cb_func)(void *cbdata); | ||
138 | void *cb_data; | ||
139 | struct pp_cam_entry *cam; | ||
140 | |||
141 | cam = container_of(work, struct pp_cam_entry, cb_task); | ||
142 | cb_func = cam->cb_func; | ||
143 | cb_data = cam->cb_data; | ||
144 | work_release(work); | ||
145 | |||
146 | cb_func(cb_data); | ||
147 | } | ||
148 | |||
133 | /**************************************************************************** | 149 | /**************************************************************************** |
134 | * | 150 | * |
135 | * CPiA-specific low-level parport functions for nibble uploads | 151 | * CPiA-specific low-level parport functions for nibble uploads |
@@ -664,7 +680,9 @@ static int cpia_pp_registerCallback(void *privdata, void (*cb)(void *cbdata), vo | |||
664 | int retval = 0; | 680 | int retval = 0; |
665 | 681 | ||
666 | if(cam->port->irq != PARPORT_IRQ_NONE) { | 682 | if(cam->port->irq != PARPORT_IRQ_NONE) { |
667 | INIT_WORK(&cam->cb_task, cb, cbdata); | 683 | cam->cb_func = cb; |
684 | cam->cb_data = cbdata; | ||
685 | INIT_WORK_NAR(&cam->cb_task, cpia_pp_run_callback); | ||
668 | } else { | 686 | } else { |
669 | retval = -1; | 687 | retval = -1; |
670 | } | 688 | } |
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index 57e1c024a547..e60a0a52e4b2 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c | |||
@@ -145,9 +145,9 @@ static void ir_timer(unsigned long data) | |||
145 | schedule_work(&ir->work); | 145 | schedule_work(&ir->work); |
146 | } | 146 | } |
147 | 147 | ||
148 | static void cx88_ir_work(void *data) | 148 | static void cx88_ir_work(struct work_struct *work) |
149 | { | 149 | { |
150 | struct cx88_IR *ir = data; | 150 | struct cx88_IR *ir = container_of(work, struct cx88_IR, work); |
151 | unsigned long timeout; | 151 | unsigned long timeout; |
152 | 152 | ||
153 | cx88_ir_handle_key(ir); | 153 | cx88_ir_handle_key(ir); |
@@ -308,7 +308,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci) | |||
308 | core->ir = ir; | 308 | core->ir = ir; |
309 | 309 | ||
310 | if (ir->polling) { | 310 | if (ir->polling) { |
311 | INIT_WORK(&ir->work, cx88_ir_work, ir); | 311 | INIT_WORK(&ir->work, cx88_ir_work); |
312 | init_timer(&ir->timer); | 312 | init_timer(&ir->timer); |
313 | ir->timer.function = ir_timer; | 313 | ir->timer.function = ir_timer; |
314 | ir->timer.data = (unsigned long)ir; | 314 | ir->timer.data = (unsigned long)ir; |
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 1457b1602221..ab87e7bfe84f 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c | |||
@@ -268,9 +268,9 @@ static void ir_timer(unsigned long data) | |||
268 | schedule_work(&ir->work); | 268 | schedule_work(&ir->work); |
269 | } | 269 | } |
270 | 270 | ||
271 | static void ir_work(void *data) | 271 | static void ir_work(struct work_struct *work) |
272 | { | 272 | { |
273 | struct IR_i2c *ir = data; | 273 | struct IR_i2c *ir = container_of(work, struct IR_i2c, work); |
274 | ir_key_poll(ir); | 274 | ir_key_poll(ir); |
275 | mod_timer(&ir->timer, jiffies+HZ/10); | 275 | mod_timer(&ir->timer, jiffies+HZ/10); |
276 | } | 276 | } |
@@ -400,7 +400,7 @@ static int ir_attach(struct i2c_adapter *adap, int addr, | |||
400 | ir->input->name,ir->input->phys,adap->name); | 400 | ir->input->name,ir->input->phys,adap->name); |
401 | 401 | ||
402 | /* start polling via eventd */ | 402 | /* start polling via eventd */ |
403 | INIT_WORK(&ir->work, ir_work, ir); | 403 | INIT_WORK(&ir->work, ir_work); |
404 | init_timer(&ir->timer); | 404 | init_timer(&ir->timer); |
405 | ir->timer.function = ir_timer; | 405 | ir->timer.function = ir_timer; |
406 | ir->timer.data = (unsigned long)ir; | 406 | ir->timer.data = (unsigned long)ir; |
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c index f129f316d20e..cf129746205d 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-context.c +++ b/drivers/media/video/pvrusb2/pvrusb2-context.c | |||
@@ -45,16 +45,21 @@ static void pvr2_context_trigger_poll(struct pvr2_context *mp) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | 47 | ||
48 | static void pvr2_context_poll(struct pvr2_context *mp) | 48 | static void pvr2_context_poll(struct work_struct *work) |
49 | { | 49 | { |
50 | struct pvr2_context *mp = | ||
51 | container_of(work, struct pvr2_context, workpoll); | ||
50 | pvr2_context_enter(mp); do { | 52 | pvr2_context_enter(mp); do { |
51 | pvr2_hdw_poll(mp->hdw); | 53 | pvr2_hdw_poll(mp->hdw); |
52 | } while (0); pvr2_context_exit(mp); | 54 | } while (0); pvr2_context_exit(mp); |
53 | } | 55 | } |
54 | 56 | ||
55 | 57 | ||
56 | static void pvr2_context_setup(struct pvr2_context *mp) | 58 | static void pvr2_context_setup(struct work_struct *work) |
57 | { | 59 | { |
60 | struct pvr2_context *mp = | ||
61 | container_of(work, struct pvr2_context, workinit); | ||
62 | |||
58 | pvr2_context_enter(mp); do { | 63 | pvr2_context_enter(mp); do { |
59 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; | 64 | if (!pvr2_hdw_dev_ok(mp->hdw)) break; |
60 | pvr2_hdw_setup(mp->hdw); | 65 | pvr2_hdw_setup(mp->hdw); |
@@ -92,8 +97,8 @@ struct pvr2_context *pvr2_context_create( | |||
92 | } | 97 | } |
93 | 98 | ||
94 | mp->workqueue = create_singlethread_workqueue("pvrusb2"); | 99 | mp->workqueue = create_singlethread_workqueue("pvrusb2"); |
95 | INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp); | 100 | INIT_WORK(&mp->workinit, pvr2_context_setup); |
96 | INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp); | 101 | INIT_WORK(&mp->workpoll, pvr2_context_poll); |
97 | queue_work(mp->workqueue,&mp->workinit); | 102 | queue_work(mp->workqueue,&mp->workinit); |
98 | done: | 103 | done: |
99 | return mp; | 104 | return mp; |
diff --git a/drivers/media/video/saa6588.c b/drivers/media/video/saa6588.c index 7b9859c33018..92eabf88a09b 100644 --- a/drivers/media/video/saa6588.c +++ b/drivers/media/video/saa6588.c | |||
@@ -324,9 +324,9 @@ static void saa6588_timer(unsigned long data) | |||
324 | schedule_work(&s->work); | 324 | schedule_work(&s->work); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void saa6588_work(void *data) | 327 | static void saa6588_work(struct work_struct *work) |
328 | { | 328 | { |
329 | struct saa6588 *s = (struct saa6588 *)data; | 329 | struct saa6588 *s = container_of(work, struct saa6588, work); |
330 | 330 | ||
331 | saa6588_i2c_poll(s); | 331 | saa6588_i2c_poll(s); |
332 | mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); | 332 | mod_timer(&s->timer, jiffies + msecs_to_jiffies(20)); |
@@ -419,7 +419,7 @@ static int saa6588_attach(struct i2c_adapter *adap, int addr, int kind) | |||
419 | saa6588_configure(s); | 419 | saa6588_configure(s); |
420 | 420 | ||
421 | /* start polling via eventd */ | 421 | /* start polling via eventd */ |
422 | INIT_WORK(&s->work, saa6588_work, s); | 422 | INIT_WORK(&s->work, saa6588_work); |
423 | init_timer(&s->timer); | 423 | init_timer(&s->timer); |
424 | s->timer.function = saa6588_timer; | 424 | s->timer.function = saa6588_timer; |
425 | s->timer.data = (unsigned long)s; | 425 | s->timer.data = (unsigned long)s; |
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 65d044086ce9..daaae870a2c4 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c | |||
@@ -343,9 +343,10 @@ static struct video_device saa7134_empress_template = | |||
343 | .minor = -1, | 343 | .minor = -1, |
344 | }; | 344 | }; |
345 | 345 | ||
346 | static void empress_signal_update(void* data) | 346 | static void empress_signal_update(struct work_struct *work) |
347 | { | 347 | { |
348 | struct saa7134_dev* dev = (struct saa7134_dev*) data; | 348 | struct saa7134_dev* dev = |
349 | container_of(work, struct saa7134_dev, empress_workqueue); | ||
349 | 350 | ||
350 | if (dev->nosignal) { | 351 | if (dev->nosignal) { |
351 | dprintk("no video signal\n"); | 352 | dprintk("no video signal\n"); |
@@ -378,7 +379,7 @@ static int empress_init(struct saa7134_dev *dev) | |||
378 | "%s empress (%s)", dev->name, | 379 | "%s empress (%s)", dev->name, |
379 | saa7134_boards[dev->board].name); | 380 | saa7134_boards[dev->board].name); |
380 | 381 | ||
381 | INIT_WORK(&dev->empress_workqueue, empress_signal_update, (void*) dev); | 382 | INIT_WORK(&dev->empress_workqueue, empress_signal_update); |
382 | 383 | ||
383 | err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, | 384 | err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, |
384 | empress_nr[dev->nr]); | 385 | empress_nr[dev->nr]); |
@@ -399,7 +400,7 @@ static int empress_init(struct saa7134_dev *dev) | |||
399 | sizeof(struct saa7134_buf), | 400 | sizeof(struct saa7134_buf), |
400 | dev); | 401 | dev); |
401 | 402 | ||
402 | empress_signal_update(dev); | 403 | empress_signal_update(&dev->empress_workqueue); |
403 | return 0; | 404 | return 0; |
404 | } | 405 | } |
405 | 406 | ||
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 1dd491773150..ef2b55e19910 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -1018,9 +1018,10 @@ mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum) | |||
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | static void | 1020 | static void |
1021 | mptfc_setup_reset(void *arg) | 1021 | mptfc_setup_reset(struct work_struct *work) |
1022 | { | 1022 | { |
1023 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 1023 | MPT_ADAPTER *ioc = |
1024 | container_of(work, MPT_ADAPTER, fc_setup_reset_work); | ||
1024 | u64 pn; | 1025 | u64 pn; |
1025 | struct mptfc_rport_info *ri; | 1026 | struct mptfc_rport_info *ri; |
1026 | 1027 | ||
@@ -1043,9 +1044,10 @@ mptfc_setup_reset(void *arg) | |||
1043 | } | 1044 | } |
1044 | 1045 | ||
1045 | static void | 1046 | static void |
1046 | mptfc_rescan_devices(void *arg) | 1047 | mptfc_rescan_devices(struct work_struct *work) |
1047 | { | 1048 | { |
1048 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 1049 | MPT_ADAPTER *ioc = |
1050 | container_of(work, MPT_ADAPTER, fc_rescan_work); | ||
1049 | int ii; | 1051 | int ii; |
1050 | u64 pn; | 1052 | u64 pn; |
1051 | struct mptfc_rport_info *ri; | 1053 | struct mptfc_rport_info *ri; |
@@ -1154,8 +1156,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1154 | } | 1156 | } |
1155 | 1157 | ||
1156 | spin_lock_init(&ioc->fc_rescan_work_lock); | 1158 | spin_lock_init(&ioc->fc_rescan_work_lock); |
1157 | INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices,(void *)ioc); | 1159 | INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices); |
1158 | INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset, (void *)ioc); | 1160 | INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset); |
1159 | 1161 | ||
1160 | spin_lock_irqsave(&ioc->FreeQlock, flags); | 1162 | spin_lock_irqsave(&ioc->FreeQlock, flags); |
1161 | 1163 | ||
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c index 314c3a27585d..b7c4407c5e3f 100644 --- a/drivers/message/fusion/mptlan.c +++ b/drivers/message/fusion/mptlan.c | |||
@@ -111,7 +111,8 @@ struct mpt_lan_priv { | |||
111 | u32 total_received; | 111 | u32 total_received; |
112 | struct net_device_stats stats; /* Per device statistics */ | 112 | struct net_device_stats stats; /* Per device statistics */ |
113 | 113 | ||
114 | struct work_struct post_buckets_task; | 114 | struct delayed_work post_buckets_task; |
115 | struct net_device *dev; | ||
115 | unsigned long post_buckets_active; | 116 | unsigned long post_buckets_active; |
116 | }; | 117 | }; |
117 | 118 | ||
@@ -132,7 +133,7 @@ static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, | |||
132 | static int mpt_lan_open(struct net_device *dev); | 133 | static int mpt_lan_open(struct net_device *dev); |
133 | static int mpt_lan_reset(struct net_device *dev); | 134 | static int mpt_lan_reset(struct net_device *dev); |
134 | static int mpt_lan_close(struct net_device *dev); | 135 | static int mpt_lan_close(struct net_device *dev); |
135 | static void mpt_lan_post_receive_buckets(void *dev_id); | 136 | static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv); |
136 | static void mpt_lan_wake_post_buckets_task(struct net_device *dev, | 137 | static void mpt_lan_wake_post_buckets_task(struct net_device *dev, |
137 | int priority); | 138 | int priority); |
138 | static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); | 139 | static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg); |
@@ -345,7 +346,7 @@ mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) | |||
345 | priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; | 346 | priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i; |
346 | spin_unlock_irqrestore(&priv->rxfidx_lock, flags); | 347 | spin_unlock_irqrestore(&priv->rxfidx_lock, flags); |
347 | } else { | 348 | } else { |
348 | mpt_lan_post_receive_buckets(dev); | 349 | mpt_lan_post_receive_buckets(priv); |
349 | netif_wake_queue(dev); | 350 | netif_wake_queue(dev); |
350 | } | 351 | } |
351 | 352 | ||
@@ -441,7 +442,7 @@ mpt_lan_open(struct net_device *dev) | |||
441 | 442 | ||
442 | dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); | 443 | dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n")); |
443 | 444 | ||
444 | mpt_lan_post_receive_buckets(dev); | 445 | mpt_lan_post_receive_buckets(priv); |
445 | printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", | 446 | printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n", |
446 | IOC_AND_NETDEV_NAMES_s_s(dev)); | 447 | IOC_AND_NETDEV_NAMES_s_s(dev)); |
447 | 448 | ||
@@ -854,7 +855,7 @@ mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority) | |||
854 | 855 | ||
855 | if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { | 856 | if (test_and_set_bit(0, &priv->post_buckets_active) == 0) { |
856 | if (priority) { | 857 | if (priority) { |
857 | schedule_work(&priv->post_buckets_task); | 858 | schedule_delayed_work(&priv->post_buckets_task, 0); |
858 | } else { | 859 | } else { |
859 | schedule_delayed_work(&priv->post_buckets_task, 1); | 860 | schedule_delayed_work(&priv->post_buckets_task, 1); |
860 | dioprintk((KERN_INFO MYNAM ": post_buckets queued on " | 861 | dioprintk((KERN_INFO MYNAM ": post_buckets queued on " |
@@ -1188,10 +1189,9 @@ mpt_lan_receive_post_reply(struct net_device *dev, | |||
1188 | /* Simple SGE's only at the moment */ | 1189 | /* Simple SGE's only at the moment */ |
1189 | 1190 | ||
1190 | static void | 1191 | static void |
1191 | mpt_lan_post_receive_buckets(void *dev_id) | 1192 | mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv) |
1192 | { | 1193 | { |
1193 | struct net_device *dev = dev_id; | 1194 | struct net_device *dev = priv->dev; |
1194 | struct mpt_lan_priv *priv = dev->priv; | ||
1195 | MPT_ADAPTER *mpt_dev = priv->mpt_dev; | 1195 | MPT_ADAPTER *mpt_dev = priv->mpt_dev; |
1196 | MPT_FRAME_HDR *mf; | 1196 | MPT_FRAME_HDR *mf; |
1197 | LANReceivePostRequest_t *pRecvReq; | 1197 | LANReceivePostRequest_t *pRecvReq; |
@@ -1335,6 +1335,13 @@ out: | |||
1335 | clear_bit(0, &priv->post_buckets_active); | 1335 | clear_bit(0, &priv->post_buckets_active); |
1336 | } | 1336 | } |
1337 | 1337 | ||
1338 | static void | ||
1339 | mpt_lan_post_receive_buckets_work(struct work_struct *work) | ||
1340 | { | ||
1341 | mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv, | ||
1342 | post_buckets_task.work)); | ||
1343 | } | ||
1344 | |||
1338 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ | 1345 | /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ |
1339 | static struct net_device * | 1346 | static struct net_device * |
1340 | mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) | 1347 | mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) |
@@ -1350,11 +1357,13 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum) | |||
1350 | 1357 | ||
1351 | priv = netdev_priv(dev); | 1358 | priv = netdev_priv(dev); |
1352 | 1359 | ||
1360 | priv->dev = dev; | ||
1353 | priv->mpt_dev = mpt_dev; | 1361 | priv->mpt_dev = mpt_dev; |
1354 | priv->pnum = pnum; | 1362 | priv->pnum = pnum; |
1355 | 1363 | ||
1356 | memset(&priv->post_buckets_task, 0, sizeof(struct work_struct)); | 1364 | memset(&priv->post_buckets_task, 0, sizeof(priv->post_buckets_task)); |
1357 | INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev); | 1365 | INIT_DELAYED_WORK(&priv->post_buckets_task, |
1366 | mpt_lan_post_receive_buckets_work); | ||
1358 | priv->post_buckets_active = 0; | 1367 | priv->post_buckets_active = 0; |
1359 | 1368 | ||
1360 | dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", | 1369 | dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n", |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index b752a479f6db..4f0c530e47b0 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -2006,9 +2006,10 @@ __mptsas_discovery_work(MPT_ADAPTER *ioc) | |||
2006 | *(Mutex LOCKED) | 2006 | *(Mutex LOCKED) |
2007 | */ | 2007 | */ |
2008 | static void | 2008 | static void |
2009 | mptsas_discovery_work(void * arg) | 2009 | mptsas_discovery_work(struct work_struct *work) |
2010 | { | 2010 | { |
2011 | struct mptsas_discovery_event *ev = arg; | 2011 | struct mptsas_discovery_event *ev = |
2012 | container_of(work, struct mptsas_discovery_event, work); | ||
2012 | MPT_ADAPTER *ioc = ev->ioc; | 2013 | MPT_ADAPTER *ioc = ev->ioc; |
2013 | 2014 | ||
2014 | mutex_lock(&ioc->sas_discovery_mutex); | 2015 | mutex_lock(&ioc->sas_discovery_mutex); |
@@ -2068,9 +2069,9 @@ mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u32 id) | |||
2068 | * Work queue thread to clear the persitency table | 2069 | * Work queue thread to clear the persitency table |
2069 | */ | 2070 | */ |
2070 | static void | 2071 | static void |
2071 | mptsas_persist_clear_table(void * arg) | 2072 | mptsas_persist_clear_table(struct work_struct *work) |
2072 | { | 2073 | { |
2073 | MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; | 2074 | MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); |
2074 | 2075 | ||
2075 | mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); | 2076 | mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); |
2076 | } | 2077 | } |
@@ -2093,9 +2094,10 @@ mptsas_reprobe_target(struct scsi_target *starget, int uld_attach) | |||
2093 | * Work queue thread to handle SAS hotplug events | 2094 | * Work queue thread to handle SAS hotplug events |
2094 | */ | 2095 | */ |
2095 | static void | 2096 | static void |
2096 | mptsas_hotplug_work(void *arg) | 2097 | mptsas_hotplug_work(struct work_struct *work) |
2097 | { | 2098 | { |
2098 | struct mptsas_hotplug_event *ev = arg; | 2099 | struct mptsas_hotplug_event *ev = |
2100 | container_of(work, struct mptsas_hotplug_event, work); | ||
2099 | MPT_ADAPTER *ioc = ev->ioc; | 2101 | MPT_ADAPTER *ioc = ev->ioc; |
2100 | struct mptsas_phyinfo *phy_info; | 2102 | struct mptsas_phyinfo *phy_info; |
2101 | struct sas_rphy *rphy; | 2103 | struct sas_rphy *rphy; |
@@ -2341,7 +2343,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, | |||
2341 | break; | 2343 | break; |
2342 | } | 2344 | } |
2343 | 2345 | ||
2344 | INIT_WORK(&ev->work, mptsas_hotplug_work, ev); | 2346 | INIT_WORK(&ev->work, mptsas_hotplug_work); |
2345 | ev->ioc = ioc; | 2347 | ev->ioc = ioc; |
2346 | ev->handle = le16_to_cpu(sas_event_data->DevHandle); | 2348 | ev->handle = le16_to_cpu(sas_event_data->DevHandle); |
2347 | ev->parent_handle = | 2349 | ev->parent_handle = |
@@ -2366,7 +2368,7 @@ mptsas_send_sas_event(MPT_ADAPTER *ioc, | |||
2366 | * Persistent table is full. | 2368 | * Persistent table is full. |
2367 | */ | 2369 | */ |
2368 | INIT_WORK(&ioc->sas_persist_task, | 2370 | INIT_WORK(&ioc->sas_persist_task, |
2369 | mptsas_persist_clear_table, (void *)ioc); | 2371 | mptsas_persist_clear_table); |
2370 | schedule_work(&ioc->sas_persist_task); | 2372 | schedule_work(&ioc->sas_persist_task); |
2371 | break; | 2373 | break; |
2372 | case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: | 2374 | case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: |
@@ -2395,7 +2397,7 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc, | |||
2395 | return; | 2397 | return; |
2396 | } | 2398 | } |
2397 | 2399 | ||
2398 | INIT_WORK(&ev->work, mptsas_hotplug_work, ev); | 2400 | INIT_WORK(&ev->work, mptsas_hotplug_work); |
2399 | ev->ioc = ioc; | 2401 | ev->ioc = ioc; |
2400 | ev->id = raid_event_data->VolumeID; | 2402 | ev->id = raid_event_data->VolumeID; |
2401 | ev->event_type = MPTSAS_IGNORE_EVENT; | 2403 | ev->event_type = MPTSAS_IGNORE_EVENT; |
@@ -2474,7 +2476,7 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc, | |||
2474 | ev = kzalloc(sizeof(*ev), GFP_ATOMIC); | 2476 | ev = kzalloc(sizeof(*ev), GFP_ATOMIC); |
2475 | if (!ev) | 2477 | if (!ev) |
2476 | return; | 2478 | return; |
2477 | INIT_WORK(&ev->work, mptsas_discovery_work, ev); | 2479 | INIT_WORK(&ev->work, mptsas_discovery_work); |
2478 | ev->ioc = ioc; | 2480 | ev->ioc = ioc; |
2479 | schedule_work(&ev->work); | 2481 | schedule_work(&ev->work); |
2480 | }; | 2482 | }; |
@@ -2511,8 +2513,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) | |||
2511 | break; | 2513 | break; |
2512 | case MPI_EVENT_PERSISTENT_TABLE_FULL: | 2514 | case MPI_EVENT_PERSISTENT_TABLE_FULL: |
2513 | INIT_WORK(&ioc->sas_persist_task, | 2515 | INIT_WORK(&ioc->sas_persist_task, |
2514 | mptsas_persist_clear_table, | 2516 | mptsas_persist_clear_table); |
2515 | (void *)ioc); | ||
2516 | schedule_work(&ioc->sas_persist_task); | 2517 | schedule_work(&ioc->sas_persist_task); |
2517 | break; | 2518 | break; |
2518 | case MPI_EVENT_SAS_DISCOVERY: | 2519 | case MPI_EVENT_SAS_DISCOVERY: |
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index e4cc3dd5fc9f..f422c0d0621c 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c | |||
@@ -646,9 +646,10 @@ struct work_queue_wrapper { | |||
646 | int disk; | 646 | int disk; |
647 | }; | 647 | }; |
648 | 648 | ||
649 | static void mpt_work_wrapper(void *data) | 649 | static void mpt_work_wrapper(struct work_struct *work) |
650 | { | 650 | { |
651 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 651 | struct work_queue_wrapper *wqw = |
652 | container_of(work, struct work_queue_wrapper, work); | ||
652 | struct _MPT_SCSI_HOST *hd = wqw->hd; | 653 | struct _MPT_SCSI_HOST *hd = wqw->hd; |
653 | struct Scsi_Host *shost = hd->ioc->sh; | 654 | struct Scsi_Host *shost = hd->ioc->sh; |
654 | struct scsi_device *sdev; | 655 | struct scsi_device *sdev; |
@@ -695,7 +696,7 @@ static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) | |||
695 | disk); | 696 | disk); |
696 | return; | 697 | return; |
697 | } | 698 | } |
698 | INIT_WORK(&wqw->work, mpt_work_wrapper, wqw); | 699 | INIT_WORK(&wqw->work, mpt_work_wrapper); |
699 | wqw->hd = hd; | 700 | wqw->hd = hd; |
700 | wqw->disk = disk; | 701 | wqw->disk = disk; |
701 | 702 | ||
@@ -784,9 +785,10 @@ MODULE_DEVICE_TABLE(pci, mptspi_pci_table); | |||
784 | * renegotiate for a given target | 785 | * renegotiate for a given target |
785 | */ | 786 | */ |
786 | static void | 787 | static void |
787 | mptspi_dv_renegotiate_work(void *data) | 788 | mptspi_dv_renegotiate_work(struct work_struct *work) |
788 | { | 789 | { |
789 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 790 | struct work_queue_wrapper *wqw = |
791 | container_of(work, struct work_queue_wrapper, work); | ||
790 | struct _MPT_SCSI_HOST *hd = wqw->hd; | 792 | struct _MPT_SCSI_HOST *hd = wqw->hd; |
791 | struct scsi_device *sdev; | 793 | struct scsi_device *sdev; |
792 | 794 | ||
@@ -804,7 +806,7 @@ mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) | |||
804 | if (!wqw) | 806 | if (!wqw) |
805 | return; | 807 | return; |
806 | 808 | ||
807 | INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); | 809 | INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work); |
808 | wqw->hd = hd; | 810 | wqw->hd = hd; |
809 | 811 | ||
810 | schedule_work(&wqw->work); | 812 | schedule_work(&wqw->work); |
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index 64130227574f..7fc7399bd2ec 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c | |||
@@ -232,7 +232,7 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m) | |||
232 | break; | 232 | break; |
233 | } | 233 | } |
234 | 234 | ||
235 | INIT_WORK(&evt->work, (void (*)(void *))drv->event, evt); | 235 | INIT_WORK(&evt->work, drv->event); |
236 | queue_work(drv->event_queue, &evt->work); | 236 | queue_work(drv->event_queue, &evt->work); |
237 | return 1; | 237 | return 1; |
238 | } | 238 | } |
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c index a2350640384b..9e529d8dd5cb 100644 --- a/drivers/message/i2o/exec-osm.c +++ b/drivers/message/i2o/exec-osm.c | |||
@@ -371,8 +371,10 @@ static int i2o_exec_remove(struct device *dev) | |||
371 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY | 371 | * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY |
372 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. | 372 | * again, otherwise send LCT NOTIFY to get informed on next LCT change. |
373 | */ | 373 | */ |
374 | static void i2o_exec_lct_modified(struct i2o_exec_lct_notify_work *work) | 374 | static void i2o_exec_lct_modified(struct work_struct *_work) |
375 | { | 375 | { |
376 | struct i2o_exec_lct_notify_work *work = | ||
377 | container_of(_work, struct i2o_exec_lct_notify_work, work); | ||
376 | u32 change_ind = 0; | 378 | u32 change_ind = 0; |
377 | struct i2o_controller *c = work->c; | 379 | struct i2o_controller *c = work->c; |
378 | 380 | ||
@@ -439,8 +441,7 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
439 | 441 | ||
440 | work->c = c; | 442 | work->c = c; |
441 | 443 | ||
442 | INIT_WORK(&work->work, (void (*)(void *))i2o_exec_lct_modified, | 444 | INIT_WORK(&work->work, i2o_exec_lct_modified); |
443 | work); | ||
444 | queue_work(i2o_exec_driver.event_queue, &work->work); | 445 | queue_work(i2o_exec_driver.event_queue, &work->work); |
445 | return 1; | 446 | return 1; |
446 | } | 447 | } |
@@ -460,13 +461,15 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m, | |||
460 | 461 | ||
461 | /** | 462 | /** |
462 | * i2o_exec_event - Event handling function | 463 | * i2o_exec_event - Event handling function |
463 | * @evt: Event which occurs | 464 | * @work: Work item in occurring event |
464 | * | 465 | * |
465 | * Handles events send by the Executive device. At the moment does not do | 466 | * Handles events send by the Executive device. At the moment does not do |
466 | * anything useful. | 467 | * anything useful. |
467 | */ | 468 | */ |
468 | static void i2o_exec_event(struct i2o_event *evt) | 469 | static void i2o_exec_event(struct work_struct *work) |
469 | { | 470 | { |
471 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
472 | |||
470 | if (likely(evt->i2o_dev)) | 473 | if (likely(evt->i2o_dev)) |
471 | osm_debug("Event received from device: %d\n", | 474 | osm_debug("Event received from device: %d\n", |
472 | evt->i2o_dev->lct_data.tid); | 475 | evt->i2o_dev->lct_data.tid); |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index eaba81bf2eca..70ae00253321 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -419,16 +419,18 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
419 | 419 | ||
420 | /** | 420 | /** |
421 | * i2o_block_delayed_request_fn - delayed request queue function | 421 | * i2o_block_delayed_request_fn - delayed request queue function |
422 | * delayed_request: the delayed request with the queue to start | 422 | * @work: the delayed request with the queue to start |
423 | * | 423 | * |
424 | * If the request queue is stopped for a disk, and there is no open | 424 | * If the request queue is stopped for a disk, and there is no open |
425 | * request, a new event is created, which calls this function to start | 425 | * request, a new event is created, which calls this function to start |
426 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never | 426 | * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never |
427 | * be started again. | 427 | * be started again. |
428 | */ | 428 | */ |
429 | static void i2o_block_delayed_request_fn(void *delayed_request) | 429 | static void i2o_block_delayed_request_fn(struct work_struct *work) |
430 | { | 430 | { |
431 | struct i2o_block_delayed_request *dreq = delayed_request; | 431 | struct i2o_block_delayed_request *dreq = |
432 | container_of(work, struct i2o_block_delayed_request, | ||
433 | work.work); | ||
432 | struct request_queue *q = dreq->queue; | 434 | struct request_queue *q = dreq->queue; |
433 | unsigned long flags; | 435 | unsigned long flags; |
434 | 436 | ||
@@ -538,8 +540,9 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m, | |||
538 | return 1; | 540 | return 1; |
539 | }; | 541 | }; |
540 | 542 | ||
541 | static void i2o_block_event(struct i2o_event *evt) | 543 | static void i2o_block_event(struct work_struct *work) |
542 | { | 544 | { |
545 | struct i2o_event *evt = container_of(work, struct i2o_event, work); | ||
543 | osm_debug("event received\n"); | 546 | osm_debug("event received\n"); |
544 | kfree(evt); | 547 | kfree(evt); |
545 | }; | 548 | }; |
@@ -938,8 +941,8 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
938 | continue; | 941 | continue; |
939 | 942 | ||
940 | dreq->queue = q; | 943 | dreq->queue = q; |
941 | INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, | 944 | INIT_DELAYED_WORK(&dreq->work, |
942 | dreq); | 945 | i2o_block_delayed_request_fn); |
943 | 946 | ||
944 | if (!queue_delayed_work(i2o_block_driver.event_queue, | 947 | if (!queue_delayed_work(i2o_block_driver.event_queue, |
945 | &dreq->work, | 948 | &dreq->work, |
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h index 4fdaa5bda412..d9fdc95b440d 100644 --- a/drivers/message/i2o/i2o_block.h +++ b/drivers/message/i2o/i2o_block.h | |||
@@ -96,7 +96,7 @@ struct i2o_block_request { | |||
96 | 96 | ||
97 | /* I2O Block device delayed request */ | 97 | /* I2O Block device delayed request */ |
98 | struct i2o_block_delayed_request { | 98 | struct i2o_block_delayed_request { |
99 | struct work_struct work; | 99 | struct delayed_work work; |
100 | struct request_queue *queue; | 100 | struct request_queue *queue; |
101 | }; | 101 | }; |
102 | 102 | ||
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c index 1ba8754e9383..2ab7add78f94 100644 --- a/drivers/misc/tifm_7xx1.c +++ b/drivers/misc/tifm_7xx1.c | |||
@@ -33,9 +33,10 @@ static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock) | |||
33 | spin_unlock_irqrestore(&fm->lock, flags); | 33 | spin_unlock_irqrestore(&fm->lock, flags); |
34 | } | 34 | } |
35 | 35 | ||
36 | static void tifm_7xx1_remove_media(void *adapter) | 36 | static void tifm_7xx1_remove_media(struct work_struct *work) |
37 | { | 37 | { |
38 | struct tifm_adapter *fm = adapter; | 38 | struct tifm_adapter *fm = |
39 | container_of(work, struct tifm_adapter, media_remover); | ||
39 | unsigned long flags; | 40 | unsigned long flags; |
40 | int cnt; | 41 | int cnt; |
41 | struct tifm_dev *sock; | 42 | struct tifm_dev *sock; |
@@ -169,9 +170,10 @@ tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num) | |||
169 | return base_addr + ((sock_num + 1) << 10); | 170 | return base_addr + ((sock_num + 1) << 10); |
170 | } | 171 | } |
171 | 172 | ||
172 | static void tifm_7xx1_insert_media(void *adapter) | 173 | static void tifm_7xx1_insert_media(struct work_struct *work) |
173 | { | 174 | { |
174 | struct tifm_adapter *fm = adapter; | 175 | struct tifm_adapter *fm = |
176 | container_of(work, struct tifm_adapter, media_inserter); | ||
175 | unsigned long flags; | 177 | unsigned long flags; |
176 | tifm_media_id media_id; | 178 | tifm_media_id media_id; |
177 | char *card_name = "xx"; | 179 | char *card_name = "xx"; |
@@ -261,7 +263,7 @@ static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state) | |||
261 | spin_unlock_irqrestore(&fm->lock, flags); | 263 | spin_unlock_irqrestore(&fm->lock, flags); |
262 | flush_workqueue(fm->wq); | 264 | flush_workqueue(fm->wq); |
263 | 265 | ||
264 | tifm_7xx1_remove_media(fm); | 266 | tifm_7xx1_remove_media(&fm->media_remover); |
265 | 267 | ||
266 | pci_set_power_state(dev, PCI_D3hot); | 268 | pci_set_power_state(dev, PCI_D3hot); |
267 | pci_disable_device(dev); | 269 | pci_disable_device(dev); |
@@ -328,8 +330,8 @@ static int tifm_7xx1_probe(struct pci_dev *dev, | |||
328 | if (!fm->sockets) | 330 | if (!fm->sockets) |
329 | goto err_out_free; | 331 | goto err_out_free; |
330 | 332 | ||
331 | INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media, fm); | 333 | INIT_WORK(&fm->media_inserter, tifm_7xx1_insert_media); |
332 | INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media, fm); | 334 | INIT_WORK(&fm->media_remover, tifm_7xx1_remove_media); |
333 | fm->eject = tifm_7xx1_eject; | 335 | fm->eject = tifm_7xx1_eject; |
334 | pci_set_drvdata(dev, fm); | 336 | pci_set_drvdata(dev, fm); |
335 | 337 | ||
@@ -384,7 +386,7 @@ static void tifm_7xx1_remove(struct pci_dev *dev) | |||
384 | 386 | ||
385 | flush_workqueue(fm->wq); | 387 | flush_workqueue(fm->wq); |
386 | 388 | ||
387 | tifm_7xx1_remove_media(fm); | 389 | tifm_7xx1_remove_media(&fm->media_remover); |
388 | 390 | ||
389 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); | 391 | writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE); |
390 | free_irq(dev->irq, fm); | 392 | free_irq(dev->irq, fm); |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 9d190022a490..6f2a282e2b97 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -1419,18 +1419,16 @@ static void mmc_setup(struct mmc_host *host) | |||
1419 | */ | 1419 | */ |
1420 | void mmc_detect_change(struct mmc_host *host, unsigned long delay) | 1420 | void mmc_detect_change(struct mmc_host *host, unsigned long delay) |
1421 | { | 1421 | { |
1422 | if (delay) | 1422 | mmc_schedule_delayed_work(&host->detect, delay); |
1423 | mmc_schedule_delayed_work(&host->detect, delay); | ||
1424 | else | ||
1425 | mmc_schedule_work(&host->detect); | ||
1426 | } | 1423 | } |
1427 | 1424 | ||
1428 | EXPORT_SYMBOL(mmc_detect_change); | 1425 | EXPORT_SYMBOL(mmc_detect_change); |
1429 | 1426 | ||
1430 | 1427 | ||
1431 | static void mmc_rescan(void *data) | 1428 | static void mmc_rescan(struct work_struct *work) |
1432 | { | 1429 | { |
1433 | struct mmc_host *host = data; | 1430 | struct mmc_host *host = |
1431 | container_of(work, struct mmc_host, detect.work); | ||
1434 | struct list_head *l, *n; | 1432 | struct list_head *l, *n; |
1435 | unsigned char power_mode; | 1433 | unsigned char power_mode; |
1436 | 1434 | ||
@@ -1513,7 +1511,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) | |||
1513 | spin_lock_init(&host->lock); | 1511 | spin_lock_init(&host->lock); |
1514 | init_waitqueue_head(&host->wq); | 1512 | init_waitqueue_head(&host->wq); |
1515 | INIT_LIST_HEAD(&host->cards); | 1513 | INIT_LIST_HEAD(&host->cards); |
1516 | INIT_WORK(&host->detect, mmc_rescan, host); | 1514 | INIT_DELAYED_WORK(&host->detect, mmc_rescan); |
1517 | 1515 | ||
1518 | /* | 1516 | /* |
1519 | * By default, hosts do not support SGIO or large requests. | 1517 | * By default, hosts do not support SGIO or large requests. |
@@ -1611,7 +1609,7 @@ EXPORT_SYMBOL(mmc_suspend_host); | |||
1611 | */ | 1609 | */ |
1612 | int mmc_resume_host(struct mmc_host *host) | 1610 | int mmc_resume_host(struct mmc_host *host) |
1613 | { | 1611 | { |
1614 | mmc_rescan(host); | 1612 | mmc_rescan(&host->detect.work); |
1615 | 1613 | ||
1616 | return 0; | 1614 | return 0; |
1617 | } | 1615 | } |
diff --git a/drivers/mmc/mmc.h b/drivers/mmc/mmc.h index cd5e0ab3d84b..149affe0b686 100644 --- a/drivers/mmc/mmc.h +++ b/drivers/mmc/mmc.h | |||
@@ -20,6 +20,6 @@ void mmc_remove_host_sysfs(struct mmc_host *host); | |||
20 | void mmc_free_host_sysfs(struct mmc_host *host); | 20 | void mmc_free_host_sysfs(struct mmc_host *host); |
21 | 21 | ||
22 | int mmc_schedule_work(struct work_struct *work); | 22 | int mmc_schedule_work(struct work_struct *work); |
23 | int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay); | 23 | int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay); |
24 | void mmc_flush_scheduled_work(void); | 24 | void mmc_flush_scheduled_work(void); |
25 | #endif | 25 | #endif |
diff --git a/drivers/mmc/mmc_sysfs.c b/drivers/mmc/mmc_sysfs.c index ac5329636045..e334acd045bc 100644 --- a/drivers/mmc/mmc_sysfs.c +++ b/drivers/mmc/mmc_sysfs.c | |||
@@ -321,17 +321,9 @@ void mmc_free_host_sysfs(struct mmc_host *host) | |||
321 | static struct workqueue_struct *workqueue; | 321 | static struct workqueue_struct *workqueue; |
322 | 322 | ||
323 | /* | 323 | /* |
324 | * Internal function. Schedule work in the MMC work queue. | ||
325 | */ | ||
326 | int mmc_schedule_work(struct work_struct *work) | ||
327 | { | ||
328 | return queue_work(workqueue, work); | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Internal function. Schedule delayed work in the MMC work queue. | 324 | * Internal function. Schedule delayed work in the MMC work queue. |
333 | */ | 325 | */ |
334 | int mmc_schedule_delayed_work(struct work_struct *work, unsigned long delay) | 326 | int mmc_schedule_delayed_work(struct delayed_work *work, unsigned long delay) |
335 | { | 327 | { |
336 | return queue_delayed_work(workqueue, work, delay); | 328 | return queue_delayed_work(workqueue, work, delay); |
337 | } | 329 | } |
diff --git a/drivers/mmc/tifm_sd.c b/drivers/mmc/tifm_sd.c index 0fdc55b08a6d..e846499a004c 100644 --- a/drivers/mmc/tifm_sd.c +++ b/drivers/mmc/tifm_sd.c | |||
@@ -99,7 +99,7 @@ struct tifm_sd { | |||
99 | 99 | ||
100 | struct mmc_request *req; | 100 | struct mmc_request *req; |
101 | struct work_struct cmd_handler; | 101 | struct work_struct cmd_handler; |
102 | struct work_struct abort_handler; | 102 | struct delayed_work abort_handler; |
103 | wait_queue_head_t can_eject; | 103 | wait_queue_head_t can_eject; |
104 | 104 | ||
105 | size_t written_blocks; | 105 | size_t written_blocks; |
@@ -496,9 +496,9 @@ err_out: | |||
496 | mmc_request_done(mmc, mrq); | 496 | mmc_request_done(mmc, mrq); |
497 | } | 497 | } |
498 | 498 | ||
499 | static void tifm_sd_end_cmd(void *data) | 499 | static void tifm_sd_end_cmd(struct work_struct *work) |
500 | { | 500 | { |
501 | struct tifm_sd *host = data; | 501 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
502 | struct tifm_dev *sock = host->dev; | 502 | struct tifm_dev *sock = host->dev; |
503 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 503 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
504 | struct mmc_request *mrq; | 504 | struct mmc_request *mrq; |
@@ -608,9 +608,9 @@ err_out: | |||
608 | mmc_request_done(mmc, mrq); | 608 | mmc_request_done(mmc, mrq); |
609 | } | 609 | } |
610 | 610 | ||
611 | static void tifm_sd_end_cmd_nodma(void *data) | 611 | static void tifm_sd_end_cmd_nodma(struct work_struct *work) |
612 | { | 612 | { |
613 | struct tifm_sd *host = (struct tifm_sd*)data; | 613 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
614 | struct tifm_dev *sock = host->dev; | 614 | struct tifm_dev *sock = host->dev; |
615 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 615 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
616 | struct mmc_request *mrq; | 616 | struct mmc_request *mrq; |
@@ -661,11 +661,14 @@ static void tifm_sd_end_cmd_nodma(void *data) | |||
661 | mmc_request_done(mmc, mrq); | 661 | mmc_request_done(mmc, mrq); |
662 | } | 662 | } |
663 | 663 | ||
664 | static void tifm_sd_abort(void *data) | 664 | static void tifm_sd_abort(struct work_struct *work) |
665 | { | 665 | { |
666 | struct tifm_sd *host = | ||
667 | container_of(work, struct tifm_sd, abort_handler.work); | ||
668 | |||
666 | printk(KERN_ERR DRIVER_NAME | 669 | printk(KERN_ERR DRIVER_NAME |
667 | ": card failed to respond for a long period of time"); | 670 | ": card failed to respond for a long period of time"); |
668 | tifm_eject(((struct tifm_sd*)data)->dev); | 671 | tifm_eject(host->dev); |
669 | } | 672 | } |
670 | 673 | ||
671 | static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 674 | static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
@@ -762,9 +765,9 @@ static struct mmc_host_ops tifm_sd_ops = { | |||
762 | .get_ro = tifm_sd_ro | 765 | .get_ro = tifm_sd_ro |
763 | }; | 766 | }; |
764 | 767 | ||
765 | static void tifm_sd_register_host(void *data) | 768 | static void tifm_sd_register_host(struct work_struct *work) |
766 | { | 769 | { |
767 | struct tifm_sd *host = (struct tifm_sd*)data; | 770 | struct tifm_sd *host = container_of(work, struct tifm_sd, cmd_handler); |
768 | struct tifm_dev *sock = host->dev; | 771 | struct tifm_dev *sock = host->dev; |
769 | struct mmc_host *mmc = tifm_get_drvdata(sock); | 772 | struct mmc_host *mmc = tifm_get_drvdata(sock); |
770 | unsigned long flags; | 773 | unsigned long flags; |
@@ -772,8 +775,7 @@ static void tifm_sd_register_host(void *data) | |||
772 | spin_lock_irqsave(&sock->lock, flags); | 775 | spin_lock_irqsave(&sock->lock, flags); |
773 | host->flags |= HOST_REG; | 776 | host->flags |= HOST_REG; |
774 | PREPARE_WORK(&host->cmd_handler, | 777 | PREPARE_WORK(&host->cmd_handler, |
775 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd, | 778 | no_dma ? tifm_sd_end_cmd_nodma : tifm_sd_end_cmd); |
776 | data); | ||
777 | spin_unlock_irqrestore(&sock->lock, flags); | 779 | spin_unlock_irqrestore(&sock->lock, flags); |
778 | dev_dbg(&sock->dev, "adding host\n"); | 780 | dev_dbg(&sock->dev, "adding host\n"); |
779 | mmc_add_host(mmc); | 781 | mmc_add_host(mmc); |
@@ -799,8 +801,8 @@ static int tifm_sd_probe(struct tifm_dev *sock) | |||
799 | host->dev = sock; | 801 | host->dev = sock; |
800 | host->clk_div = 61; | 802 | host->clk_div = 61; |
801 | init_waitqueue_head(&host->can_eject); | 803 | init_waitqueue_head(&host->can_eject); |
802 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host, host); | 804 | INIT_WORK(&host->cmd_handler, tifm_sd_register_host); |
803 | INIT_WORK(&host->abort_handler, tifm_sd_abort, host); | 805 | INIT_DELAYED_WORK(&host->abort_handler, tifm_sd_abort); |
804 | 806 | ||
805 | tifm_set_drvdata(sock, mmc); | 807 | tifm_set_drvdata(sock, mmc); |
806 | sock->signal_irq = tifm_sd_signal_irq; | 808 | sock->signal_irq = tifm_sd_signal_irq; |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index d02ed51abfcc..931028f672de 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -594,7 +594,7 @@ struct rtl8139_private { | |||
594 | u32 rx_config; | 594 | u32 rx_config; |
595 | struct rtl_extra_stats xstats; | 595 | struct rtl_extra_stats xstats; |
596 | 596 | ||
597 | struct work_struct thread; | 597 | struct delayed_work thread; |
598 | 598 | ||
599 | struct mii_if_info mii; | 599 | struct mii_if_info mii; |
600 | unsigned int regs_len; | 600 | unsigned int regs_len; |
@@ -636,8 +636,8 @@ static struct net_device_stats *rtl8139_get_stats (struct net_device *dev); | |||
636 | static void rtl8139_set_rx_mode (struct net_device *dev); | 636 | static void rtl8139_set_rx_mode (struct net_device *dev); |
637 | static void __set_rx_mode (struct net_device *dev); | 637 | static void __set_rx_mode (struct net_device *dev); |
638 | static void rtl8139_hw_start (struct net_device *dev); | 638 | static void rtl8139_hw_start (struct net_device *dev); |
639 | static void rtl8139_thread (void *_data); | 639 | static void rtl8139_thread (struct work_struct *work); |
640 | static void rtl8139_tx_timeout_task(void *_data); | 640 | static void rtl8139_tx_timeout_task(struct work_struct *work); |
641 | static const struct ethtool_ops rtl8139_ethtool_ops; | 641 | static const struct ethtool_ops rtl8139_ethtool_ops; |
642 | 642 | ||
643 | /* write MMIO register, with flush */ | 643 | /* write MMIO register, with flush */ |
@@ -1010,7 +1010,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev, | |||
1010 | (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); | 1010 | (debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1)); |
1011 | spin_lock_init (&tp->lock); | 1011 | spin_lock_init (&tp->lock); |
1012 | spin_lock_init (&tp->rx_lock); | 1012 | spin_lock_init (&tp->rx_lock); |
1013 | INIT_WORK(&tp->thread, rtl8139_thread, dev); | 1013 | INIT_DELAYED_WORK(&tp->thread, rtl8139_thread); |
1014 | tp->mii.dev = dev; | 1014 | tp->mii.dev = dev; |
1015 | tp->mii.mdio_read = mdio_read; | 1015 | tp->mii.mdio_read = mdio_read; |
1016 | tp->mii.mdio_write = mdio_write; | 1016 | tp->mii.mdio_write = mdio_write; |
@@ -1596,15 +1596,16 @@ static inline void rtl8139_thread_iter (struct net_device *dev, | |||
1596 | RTL_R8 (Config1)); | 1596 | RTL_R8 (Config1)); |
1597 | } | 1597 | } |
1598 | 1598 | ||
1599 | static void rtl8139_thread (void *_data) | 1599 | static void rtl8139_thread (struct work_struct *work) |
1600 | { | 1600 | { |
1601 | struct net_device *dev = _data; | 1601 | struct rtl8139_private *tp = |
1602 | struct rtl8139_private *tp = netdev_priv(dev); | 1602 | container_of(work, struct rtl8139_private, thread.work); |
1603 | struct net_device *dev = tp->mii.dev; | ||
1603 | unsigned long thr_delay = next_tick; | 1604 | unsigned long thr_delay = next_tick; |
1604 | 1605 | ||
1605 | if (tp->watchdog_fired) { | 1606 | if (tp->watchdog_fired) { |
1606 | tp->watchdog_fired = 0; | 1607 | tp->watchdog_fired = 0; |
1607 | rtl8139_tx_timeout_task(_data); | 1608 | rtl8139_tx_timeout_task(work); |
1608 | } else if (rtnl_trylock()) { | 1609 | } else if (rtnl_trylock()) { |
1609 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); | 1610 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); |
1610 | rtnl_unlock (); | 1611 | rtnl_unlock (); |
@@ -1646,10 +1647,11 @@ static inline void rtl8139_tx_clear (struct rtl8139_private *tp) | |||
1646 | /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ | 1647 | /* XXX account for unsent Tx packets in tp->stats.tx_dropped */ |
1647 | } | 1648 | } |
1648 | 1649 | ||
1649 | static void rtl8139_tx_timeout_task (void *_data) | 1650 | static void rtl8139_tx_timeout_task (struct work_struct *work) |
1650 | { | 1651 | { |
1651 | struct net_device *dev = _data; | 1652 | struct rtl8139_private *tp = |
1652 | struct rtl8139_private *tp = netdev_priv(dev); | 1653 | container_of(work, struct rtl8139_private, thread.work); |
1654 | struct net_device *dev = tp->mii.dev; | ||
1653 | void __iomem *ioaddr = tp->mmio_addr; | 1655 | void __iomem *ioaddr = tp->mmio_addr; |
1654 | int i; | 1656 | int i; |
1655 | u8 tmp8; | 1657 | u8 tmp8; |
@@ -1695,7 +1697,7 @@ static void rtl8139_tx_timeout (struct net_device *dev) | |||
1695 | struct rtl8139_private *tp = netdev_priv(dev); | 1697 | struct rtl8139_private *tp = netdev_priv(dev); |
1696 | 1698 | ||
1697 | if (!tp->have_thread) { | 1699 | if (!tp->have_thread) { |
1698 | INIT_WORK(&tp->thread, rtl8139_tx_timeout_task, dev); | 1700 | INIT_DELAYED_WORK(&tp->thread, rtl8139_tx_timeout_task); |
1699 | schedule_delayed_work(&tp->thread, next_tick); | 1701 | schedule_delayed_work(&tp->thread, next_tick); |
1700 | } else | 1702 | } else |
1701 | tp->watchdog_fired = 1; | 1703 | tp->watchdog_fired = 1; |
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index fc2f1d1c7ead..5bacb7587df4 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -4411,9 +4411,9 @@ bnx2_open(struct net_device *dev) | |||
4411 | } | 4411 | } |
4412 | 4412 | ||
4413 | static void | 4413 | static void |
4414 | bnx2_reset_task(void *data) | 4414 | bnx2_reset_task(struct work_struct *work) |
4415 | { | 4415 | { |
4416 | struct bnx2 *bp = data; | 4416 | struct bnx2 *bp = container_of(work, struct bnx2, reset_task); |
4417 | 4417 | ||
4418 | if (!netif_running(bp->dev)) | 4418 | if (!netif_running(bp->dev)) |
4419 | return; | 4419 | return; |
@@ -5702,7 +5702,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5702 | bp->pdev = pdev; | 5702 | bp->pdev = pdev; |
5703 | 5703 | ||
5704 | spin_lock_init(&bp->phy_lock); | 5704 | spin_lock_init(&bp->phy_lock); |
5705 | INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); | 5705 | INIT_WORK(&bp->reset_task, bnx2_reset_task); |
5706 | 5706 | ||
5707 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); | 5707 | dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); |
5708 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); | 5708 | mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1); |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index fd2cc13f7d97..c8126484c2be 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -4066,9 +4066,9 @@ static int cas_alloc_rxds(struct cas *cp) | |||
4066 | return 0; | 4066 | return 0; |
4067 | } | 4067 | } |
4068 | 4068 | ||
4069 | static void cas_reset_task(void *data) | 4069 | static void cas_reset_task(struct work_struct *work) |
4070 | { | 4070 | { |
4071 | struct cas *cp = (struct cas *) data; | 4071 | struct cas *cp = container_of(work, struct cas, reset_task); |
4072 | #if 0 | 4072 | #if 0 |
4073 | int pending = atomic_read(&cp->reset_task_pending); | 4073 | int pending = atomic_read(&cp->reset_task_pending); |
4074 | #else | 4074 | #else |
@@ -5006,7 +5006,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
5006 | atomic_set(&cp->reset_task_pending_spare, 0); | 5006 | atomic_set(&cp->reset_task_pending_spare, 0); |
5007 | atomic_set(&cp->reset_task_pending_mtu, 0); | 5007 | atomic_set(&cp->reset_task_pending_mtu, 0); |
5008 | #endif | 5008 | #endif |
5009 | INIT_WORK(&cp->reset_task, cas_reset_task, cp); | 5009 | INIT_WORK(&cp->reset_task, cas_reset_task); |
5010 | 5010 | ||
5011 | /* Default link parameters */ | 5011 | /* Default link parameters */ |
5012 | if (link_mode >= 0 && link_mode <= 6) | 5012 | if (link_mode >= 0 && link_mode <= 6) |
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h index b265941e1372..74758d2c7af8 100644 --- a/drivers/net/chelsio/common.h +++ b/drivers/net/chelsio/common.h | |||
@@ -279,7 +279,7 @@ struct adapter { | |||
279 | struct petp *tp; | 279 | struct petp *tp; |
280 | 280 | ||
281 | struct port_info port[MAX_NPORTS]; | 281 | struct port_info port[MAX_NPORTS]; |
282 | struct work_struct stats_update_task; | 282 | struct delayed_work stats_update_task; |
283 | struct timer_list stats_update_timer; | 283 | struct timer_list stats_update_timer; |
284 | 284 | ||
285 | spinlock_t tpi_lock; | 285 | spinlock_t tpi_lock; |
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h index 60901f25014e..cf9143499882 100644 --- a/drivers/net/chelsio/cphy.h +++ b/drivers/net/chelsio/cphy.h | |||
@@ -91,7 +91,7 @@ struct cphy { | |||
91 | int state; /* Link status state machine */ | 91 | int state; /* Link status state machine */ |
92 | adapter_t *adapter; /* associated adapter */ | 92 | adapter_t *adapter; /* associated adapter */ |
93 | 93 | ||
94 | struct work_struct phy_update; | 94 | struct delayed_work phy_update; |
95 | 95 | ||
96 | u16 bmsr; | 96 | u16 bmsr; |
97 | int count; | 97 | int count; |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index 53bec6739812..de48eadddbc4 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -953,10 +953,11 @@ static void t1_netpoll(struct net_device *dev) | |||
953 | * Periodic accumulation of MAC statistics. This is used only if the MAC | 953 | * Periodic accumulation of MAC statistics. This is used only if the MAC |
954 | * does not have any other way to prevent stats counter overflow. | 954 | * does not have any other way to prevent stats counter overflow. |
955 | */ | 955 | */ |
956 | static void mac_stats_task(void *data) | 956 | static void mac_stats_task(struct work_struct *work) |
957 | { | 957 | { |
958 | int i; | 958 | int i; |
959 | struct adapter *adapter = data; | 959 | struct adapter *adapter = |
960 | container_of(work, struct adapter, stats_update_task.work); | ||
960 | 961 | ||
961 | for_each_port(adapter, i) { | 962 | for_each_port(adapter, i) { |
962 | struct port_info *p = &adapter->port[i]; | 963 | struct port_info *p = &adapter->port[i]; |
@@ -977,9 +978,10 @@ static void mac_stats_task(void *data) | |||
977 | /* | 978 | /* |
978 | * Processes elmer0 external interrupts in process context. | 979 | * Processes elmer0 external interrupts in process context. |
979 | */ | 980 | */ |
980 | static void ext_intr_task(void *data) | 981 | static void ext_intr_task(struct work_struct *work) |
981 | { | 982 | { |
982 | struct adapter *adapter = data; | 983 | struct adapter *adapter = |
984 | container_of(work, struct adapter, ext_intr_handler_task); | ||
983 | 985 | ||
984 | t1_elmer0_ext_intr_handler(adapter); | 986 | t1_elmer0_ext_intr_handler(adapter); |
985 | 987 | ||
@@ -1113,9 +1115,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1113 | spin_lock_init(&adapter->mac_lock); | 1115 | spin_lock_init(&adapter->mac_lock); |
1114 | 1116 | ||
1115 | INIT_WORK(&adapter->ext_intr_handler_task, | 1117 | INIT_WORK(&adapter->ext_intr_handler_task, |
1116 | ext_intr_task, adapter); | 1118 | ext_intr_task); |
1117 | INIT_WORK(&adapter->stats_update_task, mac_stats_task, | 1119 | INIT_DELAYED_WORK(&adapter->stats_update_task, |
1118 | adapter); | 1120 | mac_stats_task); |
1119 | 1121 | ||
1120 | pci_set_drvdata(pdev, netdev); | 1122 | pci_set_drvdata(pdev, netdev); |
1121 | } | 1123 | } |
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c index 0b90014d5b3e..c7731b6f9de3 100644 --- a/drivers/net/chelsio/my3126.c +++ b/drivers/net/chelsio/my3126.c | |||
@@ -93,9 +93,11 @@ static int my3126_interrupt_handler(struct cphy *cphy) | |||
93 | return cphy_cause_link_change; | 93 | return cphy_cause_link_change; |
94 | } | 94 | } |
95 | 95 | ||
96 | static void my3216_poll(void *arg) | 96 | static void my3216_poll(struct work_struct *work) |
97 | { | 97 | { |
98 | my3126_interrupt_handler(arg); | 98 | struct cphy *cphy = container_of(work, struct cphy, phy_update.work); |
99 | |||
100 | my3126_interrupt_handler(cphy); | ||
99 | } | 101 | } |
100 | 102 | ||
101 | static int my3126_set_loopback(struct cphy *cphy, int on) | 103 | static int my3126_set_loopback(struct cphy *cphy, int on) |
@@ -171,7 +173,7 @@ static struct cphy *my3126_phy_create(adapter_t *adapter, | |||
171 | if (cphy) | 173 | if (cphy) |
172 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); | 174 | cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); |
173 | 175 | ||
174 | INIT_WORK(&cphy->phy_update, my3216_poll, cphy); | 176 | INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); |
175 | cphy->bmsr = 0; | 177 | cphy->bmsr = 0; |
176 | 178 | ||
177 | return (cphy); | 179 | return (cphy); |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 3a8df479cbda..03bf164f9e8d 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -2102,9 +2102,10 @@ static void e100_tx_timeout(struct net_device *netdev) | |||
2102 | schedule_work(&nic->tx_timeout_task); | 2102 | schedule_work(&nic->tx_timeout_task); |
2103 | } | 2103 | } |
2104 | 2104 | ||
2105 | static void e100_tx_timeout_task(struct net_device *netdev) | 2105 | static void e100_tx_timeout_task(struct work_struct *work) |
2106 | { | 2106 | { |
2107 | struct nic *nic = netdev_priv(netdev); | 2107 | struct nic *nic = container_of(work, struct nic, tx_timeout_task); |
2108 | struct net_device *netdev = nic->netdev; | ||
2108 | 2109 | ||
2109 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", | 2110 | DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", |
2110 | readb(&nic->csr->scb.status)); | 2111 | readb(&nic->csr->scb.status)); |
@@ -2637,8 +2638,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, | |||
2637 | nic->blink_timer.function = e100_blink_led; | 2638 | nic->blink_timer.function = e100_blink_led; |
2638 | nic->blink_timer.data = (unsigned long)nic; | 2639 | nic->blink_timer.data = (unsigned long)nic; |
2639 | 2640 | ||
2640 | INIT_WORK(&nic->tx_timeout_task, | 2641 | INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); |
2641 | (void (*)(void *))e100_tx_timeout_task, netdev); | ||
2642 | 2642 | ||
2643 | if((err = e100_alloc(nic))) { | 2643 | if((err = e100_alloc(nic))) { |
2644 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); | 2644 | DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 32dde0adb683..73f3a85fd238 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -190,7 +190,7 @@ void e1000_set_ethtool_ops(struct net_device *netdev); | |||
190 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); | 190 | static void e1000_enter_82542_rst(struct e1000_adapter *adapter); |
191 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); | 191 | static void e1000_leave_82542_rst(struct e1000_adapter *adapter); |
192 | static void e1000_tx_timeout(struct net_device *dev); | 192 | static void e1000_tx_timeout(struct net_device *dev); |
193 | static void e1000_reset_task(struct net_device *dev); | 193 | static void e1000_reset_task(struct work_struct *work); |
194 | static void e1000_smartspeed(struct e1000_adapter *adapter); | 194 | static void e1000_smartspeed(struct e1000_adapter *adapter); |
195 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, | 195 | static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, |
196 | struct sk_buff *skb); | 196 | struct sk_buff *skb); |
@@ -914,8 +914,7 @@ e1000_probe(struct pci_dev *pdev, | |||
914 | adapter->phy_info_timer.function = &e1000_update_phy_info; | 914 | adapter->phy_info_timer.function = &e1000_update_phy_info; |
915 | adapter->phy_info_timer.data = (unsigned long) adapter; | 915 | adapter->phy_info_timer.data = (unsigned long) adapter; |
916 | 916 | ||
917 | INIT_WORK(&adapter->reset_task, | 917 | INIT_WORK(&adapter->reset_task, e1000_reset_task); |
918 | (void (*)(void *))e1000_reset_task, netdev); | ||
919 | 918 | ||
920 | e1000_check_options(adapter); | 919 | e1000_check_options(adapter); |
921 | 920 | ||
@@ -3306,9 +3305,10 @@ e1000_tx_timeout(struct net_device *netdev) | |||
3306 | } | 3305 | } |
3307 | 3306 | ||
3308 | static void | 3307 | static void |
3309 | e1000_reset_task(struct net_device *netdev) | 3308 | e1000_reset_task(struct work_struct *work) |
3310 | { | 3309 | { |
3311 | struct e1000_adapter *adapter = netdev_priv(netdev); | 3310 | struct e1000_adapter *adapter = |
3311 | container_of(work, struct e1000_adapter, reset_task); | ||
3312 | 3312 | ||
3313 | e1000_reinit_locked(adapter); | 3313 | e1000_reinit_locked(adapter); |
3314 | } | 3314 | } |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 6ad696101418..83fa32f72398 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -2224,11 +2224,12 @@ static int ehea_stop(struct net_device *dev) | |||
2224 | return ret; | 2224 | return ret; |
2225 | } | 2225 | } |
2226 | 2226 | ||
2227 | static void ehea_reset_port(void *data) | 2227 | static void ehea_reset_port(struct work_struct *work) |
2228 | { | 2228 | { |
2229 | int ret; | 2229 | int ret; |
2230 | struct net_device *dev = data; | 2230 | struct ehea_port *port = |
2231 | struct ehea_port *port = netdev_priv(dev); | 2231 | container_of(work, struct ehea_port, reset_task); |
2232 | struct net_device *dev = port->netdev; | ||
2232 | 2233 | ||
2233 | port->resets++; | 2234 | port->resets++; |
2234 | down(&port->port_lock); | 2235 | down(&port->port_lock); |
@@ -2379,7 +2380,7 @@ static int ehea_setup_single_port(struct ehea_port *port, | |||
2379 | dev->tx_timeout = &ehea_tx_watchdog; | 2380 | dev->tx_timeout = &ehea_tx_watchdog; |
2380 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; | 2381 | dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; |
2381 | 2382 | ||
2382 | INIT_WORK(&port->reset_task, ehea_reset_port, dev); | 2383 | INIT_WORK(&port->reset_task, ehea_reset_port); |
2383 | 2384 | ||
2384 | ehea_set_ethtool_ops(dev); | 2385 | ehea_set_ethtool_ops(dev); |
2385 | 2386 | ||
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 1ed9cccd3c11..3c33d6f6a6a6 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -168,8 +168,9 @@ struct baycom_state { | |||
168 | int magic; | 168 | int magic; |
169 | 169 | ||
170 | struct pardevice *pdev; | 170 | struct pardevice *pdev; |
171 | struct net_device *dev; | ||
171 | unsigned int work_running; | 172 | unsigned int work_running; |
172 | struct work_struct run_work; | 173 | struct delayed_work run_work; |
173 | unsigned int modem; | 174 | unsigned int modem; |
174 | unsigned int bitrate; | 175 | unsigned int bitrate; |
175 | unsigned char stat; | 176 | unsigned char stat; |
@@ -659,16 +660,18 @@ static int receive(struct net_device *dev, int cnt) | |||
659 | #define GETTICK(x) | 660 | #define GETTICK(x) |
660 | #endif /* __i386__ */ | 661 | #endif /* __i386__ */ |
661 | 662 | ||
662 | static void epp_bh(struct net_device *dev) | 663 | static void epp_bh(struct work_struct *work) |
663 | { | 664 | { |
665 | struct net_device *dev; | ||
664 | struct baycom_state *bc; | 666 | struct baycom_state *bc; |
665 | struct parport *pp; | 667 | struct parport *pp; |
666 | unsigned char stat; | 668 | unsigned char stat; |
667 | unsigned char tmp[2]; | 669 | unsigned char tmp[2]; |
668 | unsigned int time1 = 0, time2 = 0, time3 = 0; | 670 | unsigned int time1 = 0, time2 = 0, time3 = 0; |
669 | int cnt, cnt2; | 671 | int cnt, cnt2; |
670 | 672 | ||
671 | bc = netdev_priv(dev); | 673 | bc = container_of(work, struct baycom_state, run_work.work); |
674 | dev = bc->dev; | ||
672 | if (!bc->work_running) | 675 | if (!bc->work_running) |
673 | return; | 676 | return; |
674 | baycom_int_freq(bc); | 677 | baycom_int_freq(bc); |
@@ -889,7 +892,7 @@ static int epp_open(struct net_device *dev) | |||
889 | return -EBUSY; | 892 | return -EBUSY; |
890 | } | 893 | } |
891 | dev->irq = /*pp->irq*/ 0; | 894 | dev->irq = /*pp->irq*/ 0; |
892 | INIT_WORK(&bc->run_work, (void *)(void *)epp_bh, dev); | 895 | INIT_DELAYED_WORK(&bc->run_work, epp_bh); |
893 | bc->work_running = 1; | 896 | bc->work_running = 1; |
894 | bc->modem = EPP_CONVENTIONAL; | 897 | bc->modem = EPP_CONVENTIONAL; |
895 | if (eppconfig(bc)) | 898 | if (eppconfig(bc)) |
@@ -1213,6 +1216,7 @@ static void __init baycom_epp_dev_setup(struct net_device *dev) | |||
1213 | /* | 1216 | /* |
1214 | * initialize part of the baycom_state struct | 1217 | * initialize part of the baycom_state struct |
1215 | */ | 1218 | */ |
1219 | bc->dev = dev; | ||
1216 | bc->magic = BAYCOM_MAGIC; | 1220 | bc->magic = BAYCOM_MAGIC; |
1217 | bc->cfg.fclk = 19666600; | 1221 | bc->cfg.fclk = 19666600; |
1218 | bc->cfg.bps = 9600; | 1222 | bc->cfg.bps = 9600; |
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index 0f8b9afd55b4..e6e721aff6f6 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c | |||
@@ -252,7 +252,7 @@ static inline void z8530_isr(struct scc_info *info); | |||
252 | static irqreturn_t scc_isr(int irq, void *dev_id); | 252 | static irqreturn_t scc_isr(int irq, void *dev_id); |
253 | static void rx_isr(struct scc_priv *priv); | 253 | static void rx_isr(struct scc_priv *priv); |
254 | static void special_condition(struct scc_priv *priv, int rc); | 254 | static void special_condition(struct scc_priv *priv, int rc); |
255 | static void rx_bh(void *arg); | 255 | static void rx_bh(struct work_struct *); |
256 | static void tx_isr(struct scc_priv *priv); | 256 | static void tx_isr(struct scc_priv *priv); |
257 | static void es_isr(struct scc_priv *priv); | 257 | static void es_isr(struct scc_priv *priv); |
258 | static void tm_isr(struct scc_priv *priv); | 258 | static void tm_isr(struct scc_priv *priv); |
@@ -579,7 +579,7 @@ static int __init setup_adapter(int card_base, int type, int n) | |||
579 | priv->param.clocks = TCTRxCP | RCRTxCP; | 579 | priv->param.clocks = TCTRxCP | RCRTxCP; |
580 | priv->param.persist = 256; | 580 | priv->param.persist = 256; |
581 | priv->param.dma = -1; | 581 | priv->param.dma = -1; |
582 | INIT_WORK(&priv->rx_work, rx_bh, priv); | 582 | INIT_WORK(&priv->rx_work, rx_bh); |
583 | dev->priv = priv; | 583 | dev->priv = priv; |
584 | sprintf(dev->name, "dmascc%i", 2 * n + i); | 584 | sprintf(dev->name, "dmascc%i", 2 * n + i); |
585 | dev->base_addr = card_base; | 585 | dev->base_addr = card_base; |
@@ -1272,9 +1272,9 @@ static void special_condition(struct scc_priv *priv, int rc) | |||
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | 1274 | ||
1275 | static void rx_bh(void *arg) | 1275 | static void rx_bh(struct work_struct *ugli_api) |
1276 | { | 1276 | { |
1277 | struct scc_priv *priv = arg; | 1277 | struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work); |
1278 | int i = priv->rx_tail; | 1278 | int i = priv->rx_tail; |
1279 | int cb; | 1279 | int cb; |
1280 | unsigned long flags; | 1280 | unsigned long flags; |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index b32c52ed19d7..f0c61f3b2a82 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -560,9 +560,9 @@ static inline int mcs_find_endpoints(struct mcs_cb *mcs, | |||
560 | return ret; | 560 | return ret; |
561 | } | 561 | } |
562 | 562 | ||
563 | static void mcs_speed_work(void *arg) | 563 | static void mcs_speed_work(struct work_struct *work) |
564 | { | 564 | { |
565 | struct mcs_cb *mcs = arg; | 565 | struct mcs_cb *mcs = container_of(work, struct mcs_cb, work); |
566 | struct net_device *netdev = mcs->netdev; | 566 | struct net_device *netdev = mcs->netdev; |
567 | 567 | ||
568 | mcs_speed_change(mcs); | 568 | mcs_speed_change(mcs); |
@@ -927,7 +927,7 @@ static int mcs_probe(struct usb_interface *intf, | |||
927 | irda_qos_bits_to_value(&mcs->qos); | 927 | irda_qos_bits_to_value(&mcs->qos); |
928 | 928 | ||
929 | /* Speed change work initialisation*/ | 929 | /* Speed change work initialisation*/ |
930 | INIT_WORK(&mcs->work, mcs_speed_work, mcs); | 930 | INIT_WORK(&mcs->work, mcs_speed_work); |
931 | 931 | ||
932 | /* Override the network functions we need to use */ | 932 | /* Override the network functions we need to use */ |
933 | ndev->hard_start_xmit = mcs_hard_xmit; | 933 | ndev->hard_start_xmit = mcs_hard_xmit; |
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h index 9fa294a546d6..2a57bc67ce35 100644 --- a/drivers/net/irda/sir-dev.h +++ b/drivers/net/irda/sir-dev.h | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | struct sir_fsm { | 23 | struct sir_fsm { |
24 | struct semaphore sem; | 24 | struct semaphore sem; |
25 | struct work_struct work; | 25 | struct delayed_work work; |
26 | unsigned state, substate; | 26 | unsigned state, substate; |
27 | int param; | 27 | int param; |
28 | int result; | 28 | int result; |
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 3b5854d10c17..17b0c3ab6201 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c | |||
@@ -100,9 +100,9 @@ static int sirdev_tx_complete_fsm(struct sir_dev *dev) | |||
100 | * Both must be unlocked/restarted on completion - but only on final exit. | 100 | * Both must be unlocked/restarted on completion - but only on final exit. |
101 | */ | 101 | */ |
102 | 102 | ||
103 | static void sirdev_config_fsm(void *data) | 103 | static void sirdev_config_fsm(struct work_struct *work) |
104 | { | 104 | { |
105 | struct sir_dev *dev = data; | 105 | struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); |
106 | struct sir_fsm *fsm = &dev->fsm; | 106 | struct sir_fsm *fsm = &dev->fsm; |
107 | int next_state; | 107 | int next_state; |
108 | int ret = -1; | 108 | int ret = -1; |
@@ -309,8 +309,8 @@ int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned par | |||
309 | fsm->param = param; | 309 | fsm->param = param; |
310 | fsm->result = 0; | 310 | fsm->result = 0; |
311 | 311 | ||
312 | INIT_WORK(&fsm->work, sirdev_config_fsm, dev); | 312 | INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); |
313 | queue_work(irda_sir_wq, &fsm->work); | 313 | queue_delayed_work(irda_sir_wq, &fsm->work, 0); |
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c index 2284e2ce1692..d6f4f185bf37 100644 --- a/drivers/net/iseries_veth.c +++ b/drivers/net/iseries_veth.c | |||
@@ -166,7 +166,7 @@ struct veth_msg { | |||
166 | 166 | ||
167 | struct veth_lpar_connection { | 167 | struct veth_lpar_connection { |
168 | HvLpIndex remote_lp; | 168 | HvLpIndex remote_lp; |
169 | struct work_struct statemachine_wq; | 169 | struct delayed_work statemachine_wq; |
170 | struct veth_msg *msgs; | 170 | struct veth_msg *msgs; |
171 | int num_events; | 171 | int num_events; |
172 | struct veth_cap_data local_caps; | 172 | struct veth_cap_data local_caps; |
@@ -456,7 +456,7 @@ static struct kobj_type veth_port_ktype = { | |||
456 | 456 | ||
457 | static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) | 457 | static inline void veth_kick_statemachine(struct veth_lpar_connection *cnx) |
458 | { | 458 | { |
459 | schedule_work(&cnx->statemachine_wq); | 459 | schedule_delayed_work(&cnx->statemachine_wq, 0); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void veth_take_cap(struct veth_lpar_connection *cnx, | 462 | static void veth_take_cap(struct veth_lpar_connection *cnx, |
@@ -638,9 +638,11 @@ static int veth_process_caps(struct veth_lpar_connection *cnx) | |||
638 | } | 638 | } |
639 | 639 | ||
640 | /* FIXME: The gotos here are a bit dubious */ | 640 | /* FIXME: The gotos here are a bit dubious */ |
641 | static void veth_statemachine(void *p) | 641 | static void veth_statemachine(struct work_struct *work) |
642 | { | 642 | { |
643 | struct veth_lpar_connection *cnx = (struct veth_lpar_connection *)p; | 643 | struct veth_lpar_connection *cnx = |
644 | container_of(work, struct veth_lpar_connection, | ||
645 | statemachine_wq.work); | ||
644 | int rlp = cnx->remote_lp; | 646 | int rlp = cnx->remote_lp; |
645 | int rc; | 647 | int rc; |
646 | 648 | ||
@@ -827,7 +829,7 @@ static int veth_init_connection(u8 rlp) | |||
827 | 829 | ||
828 | cnx->remote_lp = rlp; | 830 | cnx->remote_lp = rlp; |
829 | spin_lock_init(&cnx->lock); | 831 | spin_lock_init(&cnx->lock); |
830 | INIT_WORK(&cnx->statemachine_wq, veth_statemachine, cnx); | 832 | INIT_DELAYED_WORK(&cnx->statemachine_wq, veth_statemachine); |
831 | 833 | ||
832 | init_timer(&cnx->ack_timer); | 834 | init_timer(&cnx->ack_timer); |
833 | cnx->ack_timer.function = veth_timed_ack; | 835 | cnx->ack_timer.function = veth_timed_ack; |
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index 7b127212e62b..e628126c9c49 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c | |||
@@ -106,7 +106,7 @@ static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); | |||
106 | static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); | 106 | static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); |
107 | void ixgb_set_ethtool_ops(struct net_device *netdev); | 107 | void ixgb_set_ethtool_ops(struct net_device *netdev); |
108 | static void ixgb_tx_timeout(struct net_device *dev); | 108 | static void ixgb_tx_timeout(struct net_device *dev); |
109 | static void ixgb_tx_timeout_task(struct net_device *dev); | 109 | static void ixgb_tx_timeout_task(struct work_struct *work); |
110 | static void ixgb_vlan_rx_register(struct net_device *netdev, | 110 | static void ixgb_vlan_rx_register(struct net_device *netdev, |
111 | struct vlan_group *grp); | 111 | struct vlan_group *grp); |
112 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); | 112 | static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); |
@@ -489,8 +489,7 @@ ixgb_probe(struct pci_dev *pdev, | |||
489 | adapter->watchdog_timer.function = &ixgb_watchdog; | 489 | adapter->watchdog_timer.function = &ixgb_watchdog; |
490 | adapter->watchdog_timer.data = (unsigned long)adapter; | 490 | adapter->watchdog_timer.data = (unsigned long)adapter; |
491 | 491 | ||
492 | INIT_WORK(&adapter->tx_timeout_task, | 492 | INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); |
493 | (void (*)(void *))ixgb_tx_timeout_task, netdev); | ||
494 | 493 | ||
495 | strcpy(netdev->name, "eth%d"); | 494 | strcpy(netdev->name, "eth%d"); |
496 | if((err = register_netdev(netdev))) | 495 | if((err = register_netdev(netdev))) |
@@ -1493,9 +1492,10 @@ ixgb_tx_timeout(struct net_device *netdev) | |||
1493 | } | 1492 | } |
1494 | 1493 | ||
1495 | static void | 1494 | static void |
1496 | ixgb_tx_timeout_task(struct net_device *netdev) | 1495 | ixgb_tx_timeout_task(struct work_struct *work) |
1497 | { | 1496 | { |
1498 | struct ixgb_adapter *adapter = netdev_priv(netdev); | 1497 | struct ixgb_adapter *adapter = |
1498 | container_of(work, struct ixgb_adapter, tx_timeout_task); | ||
1499 | 1499 | ||
1500 | adapter->tx_timeout_count++; | 1500 | adapter->tx_timeout_count++; |
1501 | ixgb_down(adapter, TRUE); | 1501 | ixgb_down(adapter, TRUE); |
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 9997081c6dae..d9f48bb04b05 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -277,9 +277,11 @@ static void mv643xx_eth_tx_timeout(struct net_device *dev) | |||
277 | * | 277 | * |
278 | * Actual routine to reset the adapter when a timeout on Tx has occurred | 278 | * Actual routine to reset the adapter when a timeout on Tx has occurred |
279 | */ | 279 | */ |
280 | static void mv643xx_eth_tx_timeout_task(struct net_device *dev) | 280 | static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) |
281 | { | 281 | { |
282 | struct mv643xx_private *mp = netdev_priv(dev); | 282 | struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, |
283 | tx_timeout_task); | ||
284 | struct net_device *dev = mp->mii.dev; /* yuck */ | ||
283 | 285 | ||
284 | if (!netif_running(dev)) | 286 | if (!netif_running(dev)) |
285 | return; | 287 | return; |
@@ -1360,8 +1362,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1360 | #endif | 1362 | #endif |
1361 | 1363 | ||
1362 | /* Configure the timeout task */ | 1364 | /* Configure the timeout task */ |
1363 | INIT_WORK(&mp->tx_timeout_task, | 1365 | INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task); |
1364 | (void (*)(void *))mv643xx_eth_tx_timeout_task, dev); | ||
1365 | 1366 | ||
1366 | spin_lock_init(&mp->lock); | 1367 | spin_lock_init(&mp->lock); |
1367 | 1368 | ||
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 36350e6db1c1..38df42802386 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2615,9 +2615,10 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) | |||
2615 | * This watchdog is used to check whether the board has suffered | 2615 | * This watchdog is used to check whether the board has suffered |
2616 | * from a parity error and needs to be recovered. | 2616 | * from a parity error and needs to be recovered. |
2617 | */ | 2617 | */ |
2618 | static void myri10ge_watchdog(void *arg) | 2618 | static void myri10ge_watchdog(struct work_struct *work) |
2619 | { | 2619 | { |
2620 | struct myri10ge_priv *mgp = arg; | 2620 | struct myri10ge_priv *mgp = |
2621 | container_of(work, struct myri10ge_priv, watchdog_work); | ||
2621 | u32 reboot; | 2622 | u32 reboot; |
2622 | int status; | 2623 | int status; |
2623 | u16 cmd, vendor; | 2624 | u16 cmd, vendor; |
@@ -2887,7 +2888,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2887 | (unsigned long)mgp); | 2888 | (unsigned long)mgp); |
2888 | 2889 | ||
2889 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); | 2890 | SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops); |
2890 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp); | 2891 | INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog); |
2891 | status = register_netdev(netdev); | 2892 | status = register_netdev(netdev); |
2892 | if (status != 0) { | 2893 | if (status != 0) { |
2893 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); | 2894 | dev_err(&pdev->dev, "register_netdev failed: %d\n", status); |
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index d925053fe597..9c588af8ab74 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -714,6 +714,7 @@ struct netxen_adapter { | |||
714 | spinlock_t lock; | 714 | spinlock_t lock; |
715 | struct work_struct watchdog_task; | 715 | struct work_struct watchdog_task; |
716 | struct work_struct tx_timeout_task; | 716 | struct work_struct tx_timeout_task; |
717 | struct net_device *netdev; | ||
717 | struct timer_list watchdog_timer; | 718 | struct timer_list watchdog_timer; |
718 | 719 | ||
719 | u32 curr_window; | 720 | u32 curr_window; |
@@ -921,7 +922,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | |||
921 | struct netxen_port *port); | 922 | struct netxen_port *port); |
922 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter); | 923 | int netxen_nic_rx_has_work(struct netxen_adapter *adapter); |
923 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter); | 924 | int netxen_nic_tx_has_work(struct netxen_adapter *adapter); |
924 | void netxen_watchdog_task(unsigned long v); | 925 | void netxen_watchdog_task(struct work_struct *work); |
925 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, | 926 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, |
926 | u32 ringid); | 927 | u32 ringid); |
927 | void netxen_process_cmd_ring(unsigned long data); | 928 | void netxen_process_cmd_ring(unsigned long data); |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 0dca029bc3e5..eae18236aefa 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -710,12 +710,13 @@ static inline int netxen_nic_check_temp(struct netxen_adapter *adapter) | |||
710 | return rv; | 710 | return rv; |
711 | } | 711 | } |
712 | 712 | ||
713 | void netxen_watchdog_task(unsigned long v) | 713 | void netxen_watchdog_task(struct work_struct *work) |
714 | { | 714 | { |
715 | int port_num; | 715 | int port_num; |
716 | struct netxen_port *port; | 716 | struct netxen_port *port; |
717 | struct net_device *netdev; | 717 | struct net_device *netdev; |
718 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | 718 | struct netxen_adapter *adapter = |
719 | container_of(work, struct netxen_adapter, watchdog_task); | ||
719 | 720 | ||
720 | if (netxen_nic_check_temp(adapter)) | 721 | if (netxen_nic_check_temp(adapter)) |
721 | return; | 722 | return; |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 1cb662d5bd76..df0bb36a1cfb 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -64,7 +64,7 @@ static int netxen_nic_open(struct net_device *netdev); | |||
64 | static int netxen_nic_close(struct net_device *netdev); | 64 | static int netxen_nic_close(struct net_device *netdev); |
65 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); | 65 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); |
66 | static void netxen_tx_timeout(struct net_device *netdev); | 66 | static void netxen_tx_timeout(struct net_device *netdev); |
67 | static void netxen_tx_timeout_task(struct net_device *netdev); | 67 | static void netxen_tx_timeout_task(struct work_struct *work); |
68 | static void netxen_watchdog(unsigned long); | 68 | static void netxen_watchdog(unsigned long); |
69 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); | 69 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); |
70 | static int netxen_nic_ioctl(struct net_device *netdev, | 70 | static int netxen_nic_ioctl(struct net_device *netdev, |
@@ -274,8 +274,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
274 | adapter->ahw.xg_linkup = 0; | 274 | adapter->ahw.xg_linkup = 0; |
275 | adapter->watchdog_timer.function = &netxen_watchdog; | 275 | adapter->watchdog_timer.function = &netxen_watchdog; |
276 | adapter->watchdog_timer.data = (unsigned long)adapter; | 276 | adapter->watchdog_timer.data = (unsigned long)adapter; |
277 | INIT_WORK(&adapter->watchdog_task, | 277 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); |
278 | (void (*)(void *))netxen_watchdog_task, adapter); | ||
279 | adapter->ahw.pdev = pdev; | 278 | adapter->ahw.pdev = pdev; |
280 | adapter->proc_cmd_buf_counter = 0; | 279 | adapter->proc_cmd_buf_counter = 0; |
281 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); | 280 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); |
@@ -379,8 +378,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
379 | dev_addr); | 378 | dev_addr); |
380 | } | 379 | } |
381 | } | 380 | } |
382 | INIT_WORK(&adapter->tx_timeout_task, | 381 | adapter->netdev = netdev; |
383 | (void (*)(void *))netxen_tx_timeout_task, netdev); | 382 | INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task); |
384 | netif_carrier_off(netdev); | 383 | netif_carrier_off(netdev); |
385 | netif_stop_queue(netdev); | 384 | netif_stop_queue(netdev); |
386 | 385 | ||
@@ -938,18 +937,20 @@ static void netxen_tx_timeout(struct net_device *netdev) | |||
938 | schedule_work(&adapter->tx_timeout_task); | 937 | schedule_work(&adapter->tx_timeout_task); |
939 | } | 938 | } |
940 | 939 | ||
941 | static void netxen_tx_timeout_task(struct net_device *netdev) | 940 | static void netxen_tx_timeout_task(struct work_struct *work) |
942 | { | 941 | { |
943 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | 942 | struct netxen_adapter *adapter = |
943 | container_of(work, struct netxen_adapter, tx_timeout_task); | ||
944 | struct net_device *netdev = adapter->netdev; | ||
944 | unsigned long flags; | 945 | unsigned long flags; |
945 | 946 | ||
946 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | 947 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", |
947 | netxen_nic_driver_name, netdev->name); | 948 | netxen_nic_driver_name, netdev->name); |
948 | 949 | ||
949 | spin_lock_irqsave(&port->adapter->lock, flags); | 950 | spin_lock_irqsave(&adapter->lock, flags); |
950 | netxen_nic_close(netdev); | 951 | netxen_nic_close(netdev); |
951 | netxen_nic_open(netdev); | 952 | netxen_nic_open(netdev); |
952 | spin_unlock_irqrestore(&port->adapter->lock, flags); | 953 | spin_unlock_irqrestore(&adapter->lock, flags); |
953 | netdev->trans_start = jiffies; | 954 | netdev->trans_start = jiffies; |
954 | netif_wake_queue(netdev); | 955 | netif_wake_queue(netdev); |
955 | } | 956 | } |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index b0127c71a5b6..312e0e331712 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -427,6 +427,7 @@ struct ns83820 { | |||
427 | u8 __iomem *base; | 427 | u8 __iomem *base; |
428 | 428 | ||
429 | struct pci_dev *pci_dev; | 429 | struct pci_dev *pci_dev; |
430 | struct net_device *ndev; | ||
430 | 431 | ||
431 | #ifdef NS83820_VLAN_ACCEL_SUPPORT | 432 | #ifdef NS83820_VLAN_ACCEL_SUPPORT |
432 | struct vlan_group *vlgrp; | 433 | struct vlan_group *vlgrp; |
@@ -631,10 +632,10 @@ static void fastcall rx_refill_atomic(struct net_device *ndev) | |||
631 | } | 632 | } |
632 | 633 | ||
633 | /* REFILL */ | 634 | /* REFILL */ |
634 | static inline void queue_refill(void *_dev) | 635 | static inline void queue_refill(struct work_struct *work) |
635 | { | 636 | { |
636 | struct net_device *ndev = _dev; | 637 | struct ns83820 *dev = container_of(work, struct ns83820, tq_refill); |
637 | struct ns83820 *dev = PRIV(ndev); | 638 | struct net_device *ndev = dev->ndev; |
638 | 639 | ||
639 | rx_refill(ndev, GFP_KERNEL); | 640 | rx_refill(ndev, GFP_KERNEL); |
640 | if (dev->rx_info.up) | 641 | if (dev->rx_info.up) |
@@ -1841,6 +1842,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1841 | 1842 | ||
1842 | ndev = alloc_etherdev(sizeof(struct ns83820)); | 1843 | ndev = alloc_etherdev(sizeof(struct ns83820)); |
1843 | dev = PRIV(ndev); | 1844 | dev = PRIV(ndev); |
1845 | dev->ndev = ndev; | ||
1844 | err = -ENOMEM; | 1846 | err = -ENOMEM; |
1845 | if (!dev) | 1847 | if (!dev) |
1846 | goto out; | 1848 | goto out; |
@@ -1853,7 +1855,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev, const struct pci_ | |||
1853 | SET_MODULE_OWNER(ndev); | 1855 | SET_MODULE_OWNER(ndev); |
1854 | SET_NETDEV_DEV(ndev, &pci_dev->dev); | 1856 | SET_NETDEV_DEV(ndev, &pci_dev->dev); |
1855 | 1857 | ||
1856 | INIT_WORK(&dev->tq_refill, queue_refill, ndev); | 1858 | INIT_WORK(&dev->tq_refill, queue_refill); |
1857 | tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); | 1859 | tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev); |
1858 | 1860 | ||
1859 | err = pci_enable_device(pci_dev); | 1861 | err = pci_enable_device(pci_dev); |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index 69813406782d..8478dca3d8d1 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
@@ -332,6 +332,7 @@ static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); | |||
332 | */ | 332 | */ |
333 | 333 | ||
334 | typedef struct local_info_t { | 334 | typedef struct local_info_t { |
335 | struct net_device *dev; | ||
335 | struct pcmcia_device *p_dev; | 336 | struct pcmcia_device *p_dev; |
336 | dev_node_t node; | 337 | dev_node_t node; |
337 | struct net_device_stats stats; | 338 | struct net_device_stats stats; |
@@ -353,7 +354,7 @@ typedef struct local_info_t { | |||
353 | */ | 354 | */ |
354 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); | 355 | static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); |
355 | static void do_tx_timeout(struct net_device *dev); | 356 | static void do_tx_timeout(struct net_device *dev); |
356 | static void xirc2ps_tx_timeout_task(void *data); | 357 | static void xirc2ps_tx_timeout_task(struct work_struct *work); |
357 | static struct net_device_stats *do_get_stats(struct net_device *dev); | 358 | static struct net_device_stats *do_get_stats(struct net_device *dev); |
358 | static void set_addresses(struct net_device *dev); | 359 | static void set_addresses(struct net_device *dev); |
359 | static void set_multicast_list(struct net_device *dev); | 360 | static void set_multicast_list(struct net_device *dev); |
@@ -567,6 +568,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
567 | if (!dev) | 568 | if (!dev) |
568 | return -ENOMEM; | 569 | return -ENOMEM; |
569 | local = netdev_priv(dev); | 570 | local = netdev_priv(dev); |
571 | local->dev = dev; | ||
570 | local->p_dev = link; | 572 | local->p_dev = link; |
571 | link->priv = dev; | 573 | link->priv = dev; |
572 | 574 | ||
@@ -591,7 +593,7 @@ xirc2ps_probe(struct pcmcia_device *link) | |||
591 | #ifdef HAVE_TX_TIMEOUT | 593 | #ifdef HAVE_TX_TIMEOUT |
592 | dev->tx_timeout = do_tx_timeout; | 594 | dev->tx_timeout = do_tx_timeout; |
593 | dev->watchdog_timeo = TX_TIMEOUT; | 595 | dev->watchdog_timeo = TX_TIMEOUT; |
594 | INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev); | 596 | INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task); |
595 | #endif | 597 | #endif |
596 | 598 | ||
597 | return xirc2ps_config(link); | 599 | return xirc2ps_config(link); |
@@ -1324,9 +1326,11 @@ xirc2ps_interrupt(int irq, void *dev_id) | |||
1324 | /*====================================================================*/ | 1326 | /*====================================================================*/ |
1325 | 1327 | ||
1326 | static void | 1328 | static void |
1327 | xirc2ps_tx_timeout_task(void *data) | 1329 | xirc2ps_tx_timeout_task(struct work_struct *work) |
1328 | { | 1330 | { |
1329 | struct net_device *dev = data; | 1331 | local_info_t *local = |
1332 | container_of(work, local_info_t, tx_timeout_task); | ||
1333 | struct net_device *dev = local->dev; | ||
1330 | /* reset the card */ | 1334 | /* reset the card */ |
1331 | do_reset(dev,1); | 1335 | do_reset(dev,1); |
1332 | dev->trans_start = jiffies; | 1336 | dev->trans_start = jiffies; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 88237bdb5255..4044bb1ada86 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -397,7 +397,7 @@ out_unlock: | |||
397 | EXPORT_SYMBOL(phy_start_aneg); | 397 | EXPORT_SYMBOL(phy_start_aneg); |
398 | 398 | ||
399 | 399 | ||
400 | static void phy_change(void *data); | 400 | static void phy_change(struct work_struct *work); |
401 | static void phy_timer(unsigned long data); | 401 | static void phy_timer(unsigned long data); |
402 | 402 | ||
403 | /* phy_start_machine: | 403 | /* phy_start_machine: |
@@ -555,7 +555,7 @@ int phy_start_interrupts(struct phy_device *phydev) | |||
555 | { | 555 | { |
556 | int err = 0; | 556 | int err = 0; |
557 | 557 | ||
558 | INIT_WORK(&phydev->phy_queue, phy_change, phydev); | 558 | INIT_WORK(&phydev->phy_queue, phy_change); |
559 | 559 | ||
560 | if (request_irq(phydev->irq, phy_interrupt, | 560 | if (request_irq(phydev->irq, phy_interrupt, |
561 | IRQF_SHARED, | 561 | IRQF_SHARED, |
@@ -598,10 +598,11 @@ EXPORT_SYMBOL(phy_stop_interrupts); | |||
598 | 598 | ||
599 | 599 | ||
600 | /* Scheduled by the phy_interrupt/timer to handle PHY changes */ | 600 | /* Scheduled by the phy_interrupt/timer to handle PHY changes */ |
601 | static void phy_change(void *data) | 601 | static void phy_change(struct work_struct *work) |
602 | { | 602 | { |
603 | int err; | 603 | int err; |
604 | struct phy_device *phydev = data; | 604 | struct phy_device *phydev = |
605 | container_of(work, struct phy_device, phy_queue); | ||
605 | 606 | ||
606 | err = phy_disable_interrupts(phydev); | 607 | err = phy_disable_interrupts(phydev); |
607 | 608 | ||
diff --git a/drivers/net/plip.c b/drivers/net/plip.c index 71afb274498f..6bb085f54437 100644 --- a/drivers/net/plip.c +++ b/drivers/net/plip.c | |||
@@ -138,9 +138,9 @@ static const unsigned int net_debug = NET_DEBUG; | |||
138 | #define PLIP_NIBBLE_WAIT 3000 | 138 | #define PLIP_NIBBLE_WAIT 3000 |
139 | 139 | ||
140 | /* Bottom halves */ | 140 | /* Bottom halves */ |
141 | static void plip_kick_bh(struct net_device *dev); | 141 | static void plip_kick_bh(struct work_struct *work); |
142 | static void plip_bh(struct net_device *dev); | 142 | static void plip_bh(struct work_struct *work); |
143 | static void plip_timer_bh(struct net_device *dev); | 143 | static void plip_timer_bh(struct work_struct *work); |
144 | 144 | ||
145 | /* Interrupt handler */ | 145 | /* Interrupt handler */ |
146 | static void plip_interrupt(int irq, void *dev_id); | 146 | static void plip_interrupt(int irq, void *dev_id); |
@@ -207,9 +207,10 @@ struct plip_local { | |||
207 | 207 | ||
208 | struct net_local { | 208 | struct net_local { |
209 | struct net_device_stats enet_stats; | 209 | struct net_device_stats enet_stats; |
210 | struct net_device *dev; | ||
210 | struct work_struct immediate; | 211 | struct work_struct immediate; |
211 | struct work_struct deferred; | 212 | struct delayed_work deferred; |
212 | struct work_struct timer; | 213 | struct delayed_work timer; |
213 | struct plip_local snd_data; | 214 | struct plip_local snd_data; |
214 | struct plip_local rcv_data; | 215 | struct plip_local rcv_data; |
215 | struct pardevice *pardev; | 216 | struct pardevice *pardev; |
@@ -306,11 +307,11 @@ plip_init_netdev(struct net_device *dev) | |||
306 | nl->nibble = PLIP_NIBBLE_WAIT; | 307 | nl->nibble = PLIP_NIBBLE_WAIT; |
307 | 308 | ||
308 | /* Initialize task queue structures */ | 309 | /* Initialize task queue structures */ |
309 | INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev); | 310 | INIT_WORK(&nl->immediate, plip_bh); |
310 | INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev); | 311 | INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh); |
311 | 312 | ||
312 | if (dev->irq == -1) | 313 | if (dev->irq == -1) |
313 | INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev); | 314 | INIT_DELAYED_WORK(&nl->timer, plip_timer_bh); |
314 | 315 | ||
315 | spin_lock_init(&nl->lock); | 316 | spin_lock_init(&nl->lock); |
316 | } | 317 | } |
@@ -319,9 +320,10 @@ plip_init_netdev(struct net_device *dev) | |||
319 | This routine is kicked by do_timer(). | 320 | This routine is kicked by do_timer(). |
320 | Request `plip_bh' to be invoked. */ | 321 | Request `plip_bh' to be invoked. */ |
321 | static void | 322 | static void |
322 | plip_kick_bh(struct net_device *dev) | 323 | plip_kick_bh(struct work_struct *work) |
323 | { | 324 | { |
324 | struct net_local *nl = netdev_priv(dev); | 325 | struct net_local *nl = |
326 | container_of(work, struct net_local, deferred.work); | ||
325 | 327 | ||
326 | if (nl->is_deferred) | 328 | if (nl->is_deferred) |
327 | schedule_work(&nl->immediate); | 329 | schedule_work(&nl->immediate); |
@@ -362,9 +364,9 @@ static const plip_func connection_state_table[] = | |||
362 | 364 | ||
363 | /* Bottom half handler of PLIP. */ | 365 | /* Bottom half handler of PLIP. */ |
364 | static void | 366 | static void |
365 | plip_bh(struct net_device *dev) | 367 | plip_bh(struct work_struct *work) |
366 | { | 368 | { |
367 | struct net_local *nl = netdev_priv(dev); | 369 | struct net_local *nl = container_of(work, struct net_local, immediate); |
368 | struct plip_local *snd = &nl->snd_data; | 370 | struct plip_local *snd = &nl->snd_data; |
369 | struct plip_local *rcv = &nl->rcv_data; | 371 | struct plip_local *rcv = &nl->rcv_data; |
370 | plip_func f; | 372 | plip_func f; |
@@ -372,20 +374,21 @@ plip_bh(struct net_device *dev) | |||
372 | 374 | ||
373 | nl->is_deferred = 0; | 375 | nl->is_deferred = 0; |
374 | f = connection_state_table[nl->connection]; | 376 | f = connection_state_table[nl->connection]; |
375 | if ((r = (*f)(dev, nl, snd, rcv)) != OK | 377 | if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK |
376 | && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) { | 378 | && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) { |
377 | nl->is_deferred = 1; | 379 | nl->is_deferred = 1; |
378 | schedule_delayed_work(&nl->deferred, 1); | 380 | schedule_delayed_work(&nl->deferred, 1); |
379 | } | 381 | } |
380 | } | 382 | } |
381 | 383 | ||
382 | static void | 384 | static void |
383 | plip_timer_bh(struct net_device *dev) | 385 | plip_timer_bh(struct work_struct *work) |
384 | { | 386 | { |
385 | struct net_local *nl = netdev_priv(dev); | 387 | struct net_local *nl = |
388 | container_of(work, struct net_local, timer.work); | ||
386 | 389 | ||
387 | if (!(atomic_read (&nl->kill_timer))) { | 390 | if (!(atomic_read (&nl->kill_timer))) { |
388 | plip_interrupt (-1, dev); | 391 | plip_interrupt (-1, nl->dev); |
389 | 392 | ||
390 | schedule_delayed_work(&nl->timer, 1); | 393 | schedule_delayed_work(&nl->timer, 1); |
391 | } | 394 | } |
@@ -1284,6 +1287,7 @@ static void plip_attach (struct parport *port) | |||
1284 | } | 1287 | } |
1285 | 1288 | ||
1286 | nl = netdev_priv(dev); | 1289 | nl = netdev_priv(dev); |
1290 | nl->dev = dev; | ||
1287 | nl->pardev = parport_register_device(port, name, plip_preempt, | 1291 | nl->pardev = parport_register_device(port, name, plip_preempt, |
1288 | plip_wakeup, plip_interrupt, | 1292 | plip_wakeup, plip_interrupt, |
1289 | 0, dev); | 1293 | 0, dev); |
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index ec640f6229ae..d79d141a601d 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c | |||
@@ -2008,7 +2008,7 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) | |||
2008 | "%s: Another function issued a reset to the " | 2008 | "%s: Another function issued a reset to the " |
2009 | "chip. ISR value = %x.\n", ndev->name, value); | 2009 | "chip. ISR value = %x.\n", ndev->name, value); |
2010 | } | 2010 | } |
2011 | queue_work(qdev->workqueue, &qdev->reset_work); | 2011 | queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); |
2012 | spin_unlock(&qdev->adapter_lock); | 2012 | spin_unlock(&qdev->adapter_lock); |
2013 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { | 2013 | } else if (value & ISP_IMR_DISABLE_CMPL_INT) { |
2014 | ql_disable_interrupts(qdev); | 2014 | ql_disable_interrupts(qdev); |
@@ -3182,11 +3182,13 @@ static void ql3xxx_tx_timeout(struct net_device *ndev) | |||
3182 | /* | 3182 | /* |
3183 | * Wake up the worker to process this event. | 3183 | * Wake up the worker to process this event. |
3184 | */ | 3184 | */ |
3185 | queue_work(qdev->workqueue, &qdev->tx_timeout_work); | 3185 | queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); |
3186 | } | 3186 | } |
3187 | 3187 | ||
3188 | static void ql_reset_work(struct ql3_adapter *qdev) | 3188 | static void ql_reset_work(struct work_struct *work) |
3189 | { | 3189 | { |
3190 | struct ql3_adapter *qdev = | ||
3191 | container_of(work, struct ql3_adapter, reset_work.work); | ||
3190 | struct net_device *ndev = qdev->ndev; | 3192 | struct net_device *ndev = qdev->ndev; |
3191 | u32 value; | 3193 | u32 value; |
3192 | struct ql_tx_buf_cb *tx_cb; | 3194 | struct ql_tx_buf_cb *tx_cb; |
@@ -3278,9 +3280,12 @@ static void ql_reset_work(struct ql3_adapter *qdev) | |||
3278 | } | 3280 | } |
3279 | } | 3281 | } |
3280 | 3282 | ||
3281 | static void ql_tx_timeout_work(struct ql3_adapter *qdev) | 3283 | static void ql_tx_timeout_work(struct work_struct *work) |
3282 | { | 3284 | { |
3283 | ql_cycle_adapter(qdev,QL_DO_RESET); | 3285 | struct ql3_adapter *qdev = |
3286 | container_of(work, struct ql3_adapter, tx_timeout_work.work); | ||
3287 | |||
3288 | ql_cycle_adapter(qdev, QL_DO_RESET); | ||
3284 | } | 3289 | } |
3285 | 3290 | ||
3286 | static void ql_get_board_info(struct ql3_adapter *qdev) | 3291 | static void ql_get_board_info(struct ql3_adapter *qdev) |
@@ -3459,9 +3464,8 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, | |||
3459 | netif_stop_queue(ndev); | 3464 | netif_stop_queue(ndev); |
3460 | 3465 | ||
3461 | qdev->workqueue = create_singlethread_workqueue(ndev->name); | 3466 | qdev->workqueue = create_singlethread_workqueue(ndev->name); |
3462 | INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev); | 3467 | INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); |
3463 | INIT_WORK(&qdev->tx_timeout_work, | 3468 | INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); |
3464 | (void (*)(void *))ql_tx_timeout_work, qdev); | ||
3465 | 3469 | ||
3466 | init_timer(&qdev->adapter_timer); | 3470 | init_timer(&qdev->adapter_timer); |
3467 | qdev->adapter_timer.function = ql3xxx_timer; | 3471 | qdev->adapter_timer.function = ql3xxx_timer; |
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 65da2c0bfda6..ea94de7fd071 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h | |||
@@ -1186,8 +1186,8 @@ struct ql3_adapter { | |||
1186 | u32 numPorts; | 1186 | u32 numPorts; |
1187 | struct net_device_stats stats; | 1187 | struct net_device_stats stats; |
1188 | struct workqueue_struct *workqueue; | 1188 | struct workqueue_struct *workqueue; |
1189 | struct work_struct reset_work; | 1189 | struct delayed_work reset_work; |
1190 | struct work_struct tx_timeout_work; | 1190 | struct delayed_work tx_timeout_work; |
1191 | u32 max_frame_size; | 1191 | u32 max_frame_size; |
1192 | }; | 1192 | }; |
1193 | 1193 | ||
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 45d3ca431957..85a392fab5cc 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -424,6 +424,7 @@ struct ring_info { | |||
424 | struct rtl8169_private { | 424 | struct rtl8169_private { |
425 | void __iomem *mmio_addr; /* memory map physical address */ | 425 | void __iomem *mmio_addr; /* memory map physical address */ |
426 | struct pci_dev *pci_dev; /* Index of PCI device */ | 426 | struct pci_dev *pci_dev; /* Index of PCI device */ |
427 | struct net_device *dev; | ||
427 | struct net_device_stats stats; /* statistics of net device */ | 428 | struct net_device_stats stats; /* statistics of net device */ |
428 | spinlock_t lock; /* spin lock flag */ | 429 | spinlock_t lock; /* spin lock flag */ |
429 | u32 msg_enable; | 430 | u32 msg_enable; |
@@ -455,7 +456,7 @@ struct rtl8169_private { | |||
455 | void (*phy_reset_enable)(void __iomem *); | 456 | void (*phy_reset_enable)(void __iomem *); |
456 | unsigned int (*phy_reset_pending)(void __iomem *); | 457 | unsigned int (*phy_reset_pending)(void __iomem *); |
457 | unsigned int (*link_ok)(void __iomem *); | 458 | unsigned int (*link_ok)(void __iomem *); |
458 | struct work_struct task; | 459 | struct delayed_work task; |
459 | unsigned wol_enabled : 1; | 460 | unsigned wol_enabled : 1; |
460 | }; | 461 | }; |
461 | 462 | ||
@@ -1510,6 +1511,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1510 | SET_MODULE_OWNER(dev); | 1511 | SET_MODULE_OWNER(dev); |
1511 | SET_NETDEV_DEV(dev, &pdev->dev); | 1512 | SET_NETDEV_DEV(dev, &pdev->dev); |
1512 | tp = netdev_priv(dev); | 1513 | tp = netdev_priv(dev); |
1514 | tp->dev = dev; | ||
1513 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); | 1515 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
1514 | 1516 | ||
1515 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 1517 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
@@ -1782,7 +1784,7 @@ static int rtl8169_open(struct net_device *dev) | |||
1782 | if (retval < 0) | 1784 | if (retval < 0) |
1783 | goto err_free_rx; | 1785 | goto err_free_rx; |
1784 | 1786 | ||
1785 | INIT_WORK(&tp->task, NULL, dev); | 1787 | INIT_DELAYED_WORK(&tp->task, NULL); |
1786 | 1788 | ||
1787 | rtl8169_hw_start(dev); | 1789 | rtl8169_hw_start(dev); |
1788 | 1790 | ||
@@ -2105,11 +2107,11 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) | |||
2105 | tp->cur_tx = tp->dirty_tx = 0; | 2107 | tp->cur_tx = tp->dirty_tx = 0; |
2106 | } | 2108 | } |
2107 | 2109 | ||
2108 | static void rtl8169_schedule_work(struct net_device *dev, void (*task)(void *)) | 2110 | static void rtl8169_schedule_work(struct net_device *dev, work_func_t task) |
2109 | { | 2111 | { |
2110 | struct rtl8169_private *tp = netdev_priv(dev); | 2112 | struct rtl8169_private *tp = netdev_priv(dev); |
2111 | 2113 | ||
2112 | PREPARE_WORK(&tp->task, task, dev); | 2114 | PREPARE_DELAYED_WORK(&tp->task, task); |
2113 | schedule_delayed_work(&tp->task, 4); | 2115 | schedule_delayed_work(&tp->task, 4); |
2114 | } | 2116 | } |
2115 | 2117 | ||
@@ -2128,9 +2130,11 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev) | |||
2128 | netif_poll_enable(dev); | 2130 | netif_poll_enable(dev); |
2129 | } | 2131 | } |
2130 | 2132 | ||
2131 | static void rtl8169_reinit_task(void *_data) | 2133 | static void rtl8169_reinit_task(struct work_struct *work) |
2132 | { | 2134 | { |
2133 | struct net_device *dev = _data; | 2135 | struct rtl8169_private *tp = |
2136 | container_of(work, struct rtl8169_private, task.work); | ||
2137 | struct net_device *dev = tp->dev; | ||
2134 | int ret; | 2138 | int ret; |
2135 | 2139 | ||
2136 | if (netif_running(dev)) { | 2140 | if (netif_running(dev)) { |
@@ -2153,10 +2157,11 @@ static void rtl8169_reinit_task(void *_data) | |||
2153 | } | 2157 | } |
2154 | } | 2158 | } |
2155 | 2159 | ||
2156 | static void rtl8169_reset_task(void *_data) | 2160 | static void rtl8169_reset_task(struct work_struct *work) |
2157 | { | 2161 | { |
2158 | struct net_device *dev = _data; | 2162 | struct rtl8169_private *tp = |
2159 | struct rtl8169_private *tp = netdev_priv(dev); | 2163 | container_of(work, struct rtl8169_private, task.work); |
2164 | struct net_device *dev = tp->dev; | ||
2160 | 2165 | ||
2161 | if (!netif_running(dev)) | 2166 | if (!netif_running(dev)) |
2162 | return; | 2167 | return; |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 33569ec9dbfc..250cdbeefdfd 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
@@ -5872,9 +5872,9 @@ static void s2io_tasklet(unsigned long dev_addr) | |||
5872 | * Description: Sets the link status for the adapter | 5872 | * Description: Sets the link status for the adapter |
5873 | */ | 5873 | */ |
5874 | 5874 | ||
5875 | static void s2io_set_link(unsigned long data) | 5875 | static void s2io_set_link(struct work_struct *work) |
5876 | { | 5876 | { |
5877 | nic_t *nic = (nic_t *) data; | 5877 | nic_t *nic = container_of(work, nic_t, set_link_task); |
5878 | struct net_device *dev = nic->dev; | 5878 | struct net_device *dev = nic->dev; |
5879 | XENA_dev_config_t __iomem *bar0 = nic->bar0; | 5879 | XENA_dev_config_t __iomem *bar0 = nic->bar0; |
5880 | register u64 val64; | 5880 | register u64 val64; |
@@ -6379,10 +6379,10 @@ static int s2io_card_up(nic_t * sp) | |||
6379 | * spin lock. | 6379 | * spin lock. |
6380 | */ | 6380 | */ |
6381 | 6381 | ||
6382 | static void s2io_restart_nic(unsigned long data) | 6382 | static void s2io_restart_nic(struct work_struct *work) |
6383 | { | 6383 | { |
6384 | struct net_device *dev = (struct net_device *) data; | 6384 | nic_t *sp = container_of(work, nic_t, rst_timer_task); |
6385 | nic_t *sp = dev->priv; | 6385 | struct net_device *dev = sp->dev; |
6386 | 6386 | ||
6387 | s2io_card_down(sp); | 6387 | s2io_card_down(sp); |
6388 | if (s2io_card_up(sp)) { | 6388 | if (s2io_card_up(sp)) { |
@@ -6992,10 +6992,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
6992 | 6992 | ||
6993 | dev->tx_timeout = &s2io_tx_watchdog; | 6993 | dev->tx_timeout = &s2io_tx_watchdog; |
6994 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; | 6994 | dev->watchdog_timeo = WATCH_DOG_TIMEOUT; |
6995 | INIT_WORK(&sp->rst_timer_task, | 6995 | INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); |
6996 | (void (*)(void *)) s2io_restart_nic, dev); | 6996 | INIT_WORK(&sp->set_link_task, s2io_set_link); |
6997 | INIT_WORK(&sp->set_link_task, | ||
6998 | (void (*)(void *)) s2io_set_link, sp); | ||
6999 | 6997 | ||
7000 | pci_save_state(sp->pdev); | 6998 | pci_save_state(sp->pdev); |
7001 | 6999 | ||
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 12b719f4d00f..3b0bafd273c8 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
@@ -1000,7 +1000,7 @@ s2io_msix_fifo_handle(int irq, void *dev_id); | |||
1000 | static irqreturn_t s2io_isr(int irq, void *dev_id); | 1000 | static irqreturn_t s2io_isr(int irq, void *dev_id); |
1001 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); | 1001 | static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); |
1002 | static const struct ethtool_ops netdev_ethtool_ops; | 1002 | static const struct ethtool_ops netdev_ethtool_ops; |
1003 | static void s2io_set_link(unsigned long data); | 1003 | static void s2io_set_link(struct work_struct *work); |
1004 | static int s2io_set_swapper(nic_t * sp); | 1004 | static int s2io_set_swapper(nic_t * sp); |
1005 | static void s2io_card_down(nic_t *nic); | 1005 | static void s2io_card_down(nic_t *nic); |
1006 | static int s2io_card_up(nic_t *nic); | 1006 | static int s2io_card_up(nic_t *nic); |
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index aaba458584fb..b70ed79d4121 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
@@ -280,6 +280,7 @@ enum sis190_feature { | |||
280 | struct sis190_private { | 280 | struct sis190_private { |
281 | void __iomem *mmio_addr; | 281 | void __iomem *mmio_addr; |
282 | struct pci_dev *pci_dev; | 282 | struct pci_dev *pci_dev; |
283 | struct net_device *dev; | ||
283 | struct net_device_stats stats; | 284 | struct net_device_stats stats; |
284 | spinlock_t lock; | 285 | spinlock_t lock; |
285 | u32 rx_buf_sz; | 286 | u32 rx_buf_sz; |
@@ -897,10 +898,11 @@ static void sis190_hw_start(struct net_device *dev) | |||
897 | netif_start_queue(dev); | 898 | netif_start_queue(dev); |
898 | } | 899 | } |
899 | 900 | ||
900 | static void sis190_phy_task(void * data) | 901 | static void sis190_phy_task(struct work_struct *work) |
901 | { | 902 | { |
902 | struct net_device *dev = data; | 903 | struct sis190_private *tp = |
903 | struct sis190_private *tp = netdev_priv(dev); | 904 | container_of(work, struct sis190_private, phy_task); |
905 | struct net_device *dev = tp->dev; | ||
904 | void __iomem *ioaddr = tp->mmio_addr; | 906 | void __iomem *ioaddr = tp->mmio_addr; |
905 | int phy_id = tp->mii_if.phy_id; | 907 | int phy_id = tp->mii_if.phy_id; |
906 | u16 val; | 908 | u16 val; |
@@ -1047,7 +1049,7 @@ static int sis190_open(struct net_device *dev) | |||
1047 | if (rc < 0) | 1049 | if (rc < 0) |
1048 | goto err_free_rx_1; | 1050 | goto err_free_rx_1; |
1049 | 1051 | ||
1050 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | 1052 | INIT_WORK(&tp->phy_task, sis190_phy_task); |
1051 | 1053 | ||
1052 | sis190_request_timer(dev); | 1054 | sis190_request_timer(dev); |
1053 | 1055 | ||
@@ -1436,6 +1438,7 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) | |||
1436 | SET_NETDEV_DEV(dev, &pdev->dev); | 1438 | SET_NETDEV_DEV(dev, &pdev->dev); |
1437 | 1439 | ||
1438 | tp = netdev_priv(dev); | 1440 | tp = netdev_priv(dev); |
1441 | tp->dev = dev; | ||
1439 | tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); | 1442 | tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); |
1440 | 1443 | ||
1441 | rc = pci_enable_device(pdev); | 1444 | rc = pci_enable_device(pdev); |
@@ -1798,7 +1801,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev, | |||
1798 | 1801 | ||
1799 | sis190_init_rxfilter(dev); | 1802 | sis190_init_rxfilter(dev); |
1800 | 1803 | ||
1801 | INIT_WORK(&tp->phy_task, sis190_phy_task, dev); | 1804 | INIT_WORK(&tp->phy_task, sis190_phy_task); |
1802 | 1805 | ||
1803 | dev->open = sis190_open; | 1806 | dev->open = sis190_open; |
1804 | dev->stop = sis190_close; | 1807 | dev->stop = sis190_close; |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 5513907e8393..b60f0451f6cd 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -1327,10 +1327,11 @@ static void xm_check_link(struct net_device *dev) | |||
1327 | * Since internal PHY is wired to a level triggered pin, can't | 1327 | * Since internal PHY is wired to a level triggered pin, can't |
1328 | * get an interrupt when carrier is detected. | 1328 | * get an interrupt when carrier is detected. |
1329 | */ | 1329 | */ |
1330 | static void xm_link_timer(void *arg) | 1330 | static void xm_link_timer(struct work_struct *work) |
1331 | { | 1331 | { |
1332 | struct net_device *dev = arg; | 1332 | struct skge_port *skge = |
1333 | struct skge_port *skge = netdev_priv(arg); | 1333 | container_of(work, struct skge_port, link_thread.work); |
1334 | struct net_device *dev = skge->netdev; | ||
1334 | struct skge_hw *hw = skge->hw; | 1335 | struct skge_hw *hw = skge->hw; |
1335 | int port = skge->port; | 1336 | int port = skge->port; |
1336 | 1337 | ||
@@ -3072,9 +3073,9 @@ static void skge_error_irq(struct skge_hw *hw) | |||
3072 | * because accessing phy registers requires spin wait which might | 3073 | * because accessing phy registers requires spin wait which might |
3073 | * cause excess interrupt latency. | 3074 | * cause excess interrupt latency. |
3074 | */ | 3075 | */ |
3075 | static void skge_extirq(void *arg) | 3076 | static void skge_extirq(struct work_struct *work) |
3076 | { | 3077 | { |
3077 | struct skge_hw *hw = arg; | 3078 | struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); |
3078 | int port; | 3079 | int port; |
3079 | 3080 | ||
3080 | mutex_lock(&hw->phy_mutex); | 3081 | mutex_lock(&hw->phy_mutex); |
@@ -3456,7 +3457,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3456 | skge->port = port; | 3457 | skge->port = port; |
3457 | 3458 | ||
3458 | /* Only used for Genesis XMAC */ | 3459 | /* Only used for Genesis XMAC */ |
3459 | INIT_WORK(&skge->link_thread, xm_link_timer, dev); | 3460 | INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); |
3460 | 3461 | ||
3461 | if (hw->chip_id != CHIP_ID_GENESIS) { | 3462 | if (hw->chip_id != CHIP_ID_GENESIS) { |
3462 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 3463 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
@@ -3543,7 +3544,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3543 | 3544 | ||
3544 | hw->pdev = pdev; | 3545 | hw->pdev = pdev; |
3545 | mutex_init(&hw->phy_mutex); | 3546 | mutex_init(&hw->phy_mutex); |
3546 | INIT_WORK(&hw->phy_work, skge_extirq, hw); | 3547 | INIT_WORK(&hw->phy_work, skge_extirq); |
3547 | spin_lock_init(&hw->hw_lock); | 3548 | spin_lock_init(&hw->hw_lock); |
3548 | 3549 | ||
3549 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3550 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 537c0aaa1db8..23e5275d920c 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2456,7 +2456,7 @@ struct skge_port { | |||
2456 | 2456 | ||
2457 | struct net_device_stats net_stats; | 2457 | struct net_device_stats net_stats; |
2458 | 2458 | ||
2459 | struct work_struct link_thread; | 2459 | struct delayed_work link_thread; |
2460 | enum pause_control flow_control; | 2460 | enum pause_control flow_control; |
2461 | enum pause_status flow_status; | 2461 | enum pause_status flow_status; |
2462 | u8 rx_csum; | 2462 | u8 rx_csum; |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 95b6478f55c6..e62a9586fb95 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -210,6 +210,7 @@ struct smc_local { | |||
210 | 210 | ||
211 | /* work queue */ | 211 | /* work queue */ |
212 | struct work_struct phy_configure; | 212 | struct work_struct phy_configure; |
213 | struct net_device *dev; | ||
213 | int work_pending; | 214 | int work_pending; |
214 | 215 | ||
215 | spinlock_t lock; | 216 | spinlock_t lock; |
@@ -1114,10 +1115,11 @@ static void smc_phy_check_media(struct net_device *dev, int init) | |||
1114 | * of autonegotiation.) If the RPC ANEG bit is cleared, the selection | 1115 | * of autonegotiation.) If the RPC ANEG bit is cleared, the selection |
1115 | * is controlled by the RPC SPEED and RPC DPLX bits. | 1116 | * is controlled by the RPC SPEED and RPC DPLX bits. |
1116 | */ | 1117 | */ |
1117 | static void smc_phy_configure(void *data) | 1118 | static void smc_phy_configure(struct work_struct *work) |
1118 | { | 1119 | { |
1119 | struct net_device *dev = data; | 1120 | struct smc_local *lp = |
1120 | struct smc_local *lp = netdev_priv(dev); | 1121 | container_of(work, struct smc_local, phy_configure); |
1122 | struct net_device *dev = lp->dev; | ||
1121 | void __iomem *ioaddr = lp->base; | 1123 | void __iomem *ioaddr = lp->base; |
1122 | int phyaddr = lp->mii.phy_id; | 1124 | int phyaddr = lp->mii.phy_id; |
1123 | int my_phy_caps; /* My PHY capabilities */ | 1125 | int my_phy_caps; /* My PHY capabilities */ |
@@ -1592,7 +1594,7 @@ smc_open(struct net_device *dev) | |||
1592 | 1594 | ||
1593 | /* Configure the PHY, initialize the link state */ | 1595 | /* Configure the PHY, initialize the link state */ |
1594 | if (lp->phy_type != 0) | 1596 | if (lp->phy_type != 0) |
1595 | smc_phy_configure(dev); | 1597 | smc_phy_configure(&lp->phy_configure); |
1596 | else { | 1598 | else { |
1597 | spin_lock_irq(&lp->lock); | 1599 | spin_lock_irq(&lp->lock); |
1598 | smc_10bt_check_media(dev, 1); | 1600 | smc_10bt_check_media(dev, 1); |
@@ -1972,7 +1974,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr) | |||
1972 | #endif | 1974 | #endif |
1973 | 1975 | ||
1974 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); | 1976 | tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev); |
1975 | INIT_WORK(&lp->phy_configure, smc_phy_configure, dev); | 1977 | INIT_WORK(&lp->phy_configure, smc_phy_configure); |
1978 | lp->dev = dev; | ||
1976 | lp->mii.phy_id_mask = 0x1f; | 1979 | lp->mii.phy_id_mask = 0x1f; |
1977 | lp->mii.reg_num_mask = 0x1f; | 1980 | lp->mii.reg_num_mask = 0x1f; |
1978 | lp->mii.force_media = 0; | 1981 | lp->mii.force_media = 0; |
@@ -2322,7 +2325,7 @@ static int smc_drv_resume(struct platform_device *dev) | |||
2322 | smc_reset(ndev); | 2325 | smc_reset(ndev); |
2323 | smc_enable(ndev); | 2326 | smc_enable(ndev); |
2324 | if (lp->phy_type != 0) | 2327 | if (lp->phy_type != 0) |
2325 | smc_phy_configure(ndev); | 2328 | smc_phy_configure(&lp->phy_configure); |
2326 | netif_device_attach(ndev); | 2329 | netif_device_attach(ndev); |
2327 | } | 2330 | } |
2328 | } | 2331 | } |
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c index 13e0a43e423b..ebb6aa39f9c7 100644 --- a/drivers/net/spider_net.c +++ b/drivers/net/spider_net.c | |||
@@ -1939,10 +1939,11 @@ spider_net_stop(struct net_device *netdev) | |||
1939 | * called as task when tx hangs, resets interface (if interface is up) | 1939 | * called as task when tx hangs, resets interface (if interface is up) |
1940 | */ | 1940 | */ |
1941 | static void | 1941 | static void |
1942 | spider_net_tx_timeout_task(void *data) | 1942 | spider_net_tx_timeout_task(struct work_struct *work) |
1943 | { | 1943 | { |
1944 | struct net_device *netdev = data; | 1944 | struct spider_net_card *card = |
1945 | struct spider_net_card *card = netdev_priv(netdev); | 1945 | container_of(work, struct spider_net_card, tx_timeout_task); |
1946 | struct net_device *netdev = card->netdev; | ||
1946 | 1947 | ||
1947 | if (!(netdev->flags & IFF_UP)) | 1948 | if (!(netdev->flags & IFF_UP)) |
1948 | goto out; | 1949 | goto out; |
@@ -2116,7 +2117,7 @@ spider_net_alloc_card(void) | |||
2116 | card = netdev_priv(netdev); | 2117 | card = netdev_priv(netdev); |
2117 | card->netdev = netdev; | 2118 | card->netdev = netdev; |
2118 | card->msg_enable = SPIDER_NET_DEFAULT_MSG; | 2119 | card->msg_enable = SPIDER_NET_DEFAULT_MSG; |
2119 | INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task, netdev); | 2120 | INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task); |
2120 | init_waitqueue_head(&card->waitq); | 2121 | init_waitqueue_head(&card->waitq); |
2121 | atomic_set(&card->tx_timeout_task_counter, 0); | 2122 | atomic_set(&card->tx_timeout_task_counter, 0); |
2122 | 2123 | ||
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index cf44e72399b9..785e4a535f9e 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -2282,9 +2282,9 @@ static void gem_do_stop(struct net_device *dev, int wol) | |||
2282 | } | 2282 | } |
2283 | } | 2283 | } |
2284 | 2284 | ||
2285 | static void gem_reset_task(void *data) | 2285 | static void gem_reset_task(struct work_struct *work) |
2286 | { | 2286 | { |
2287 | struct gem *gp = (struct gem *) data; | 2287 | struct gem *gp = container_of(work, struct gem, reset_task); |
2288 | 2288 | ||
2289 | mutex_lock(&gp->pm_mutex); | 2289 | mutex_lock(&gp->pm_mutex); |
2290 | 2290 | ||
@@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3044 | gp->link_timer.function = gem_link_timer; | 3044 | gp->link_timer.function = gem_link_timer; |
3045 | gp->link_timer.data = (unsigned long) gp; | 3045 | gp->link_timer.data = (unsigned long) gp; |
3046 | 3046 | ||
3047 | INIT_WORK(&gp->reset_task, gem_reset_task, gp); | 3047 | INIT_WORK(&gp->reset_task, gem_reset_task); |
3048 | 3048 | ||
3049 | gp->lstate = link_down; | 3049 | gp->lstate = link_down; |
3050 | gp->timer_ticks = 0; | 3050 | gp->timer_ticks = 0; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index c20bb998e0e5..d9123c9adc1e 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3654,9 +3654,9 @@ static void tg3_poll_controller(struct net_device *dev) | |||
3654 | } | 3654 | } |
3655 | #endif | 3655 | #endif |
3656 | 3656 | ||
3657 | static void tg3_reset_task(void *_data) | 3657 | static void tg3_reset_task(struct work_struct *work) |
3658 | { | 3658 | { |
3659 | struct tg3 *tp = _data; | 3659 | struct tg3 *tp = container_of(work, struct tg3, reset_task); |
3660 | unsigned int restart_timer; | 3660 | unsigned int restart_timer; |
3661 | 3661 | ||
3662 | tg3_full_lock(tp, 0); | 3662 | tg3_full_lock(tp, 0); |
@@ -11734,7 +11734,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
11734 | #endif | 11734 | #endif |
11735 | spin_lock_init(&tp->lock); | 11735 | spin_lock_init(&tp->lock); |
11736 | spin_lock_init(&tp->indirect_lock); | 11736 | spin_lock_init(&tp->indirect_lock); |
11737 | INIT_WORK(&tp->reset_task, tg3_reset_task, tp); | 11737 | INIT_WORK(&tp->reset_task, tg3_reset_task); |
11738 | 11738 | ||
11739 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); | 11739 | tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len); |
11740 | if (tp->regs == 0UL) { | 11740 | if (tp->regs == 0UL) { |
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c index e14f5a00f65a..f85f00251123 100644 --- a/drivers/net/tlan.c +++ b/drivers/net/tlan.c | |||
@@ -296,6 +296,7 @@ static void TLan_SetMulticastList( struct net_device *); | |||
296 | static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); | 296 | static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); |
297 | static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); | 297 | static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); |
298 | static void TLan_tx_timeout( struct net_device *dev); | 298 | static void TLan_tx_timeout( struct net_device *dev); |
299 | static void TLan_tx_timeout_work(struct work_struct *work); | ||
299 | static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); | 300 | static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); |
300 | 301 | ||
301 | static u32 TLan_HandleInvalid( struct net_device *, u16 ); | 302 | static u32 TLan_HandleInvalid( struct net_device *, u16 ); |
@@ -562,6 +563,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
562 | priv = netdev_priv(dev); | 563 | priv = netdev_priv(dev); |
563 | 564 | ||
564 | priv->pciDev = pdev; | 565 | priv->pciDev = pdev; |
566 | priv->dev = dev; | ||
565 | 567 | ||
566 | /* Is this a PCI device? */ | 568 | /* Is this a PCI device? */ |
567 | if (pdev) { | 569 | if (pdev) { |
@@ -634,7 +636,7 @@ static int __devinit TLan_probe1(struct pci_dev *pdev, | |||
634 | 636 | ||
635 | /* This will be used when we get an adapter error from | 637 | /* This will be used when we get an adapter error from |
636 | * within our irq handler */ | 638 | * within our irq handler */ |
637 | INIT_WORK(&priv->tlan_tqueue, (void *)(void*)TLan_tx_timeout, dev); | 639 | INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work); |
638 | 640 | ||
639 | spin_lock_init(&priv->lock); | 641 | spin_lock_init(&priv->lock); |
640 | 642 | ||
@@ -1040,6 +1042,25 @@ static void TLan_tx_timeout(struct net_device *dev) | |||
1040 | } | 1042 | } |
1041 | 1043 | ||
1042 | 1044 | ||
1045 | /*************************************************************** | ||
1046 | * TLan_tx_timeout_work | ||
1047 | * | ||
1048 | * Returns: nothing | ||
1049 | * | ||
1050 | * Params: | ||
1051 | * work work item of device which timed out | ||
1052 | * | ||
1053 | **************************************************************/ | ||
1054 | |||
1055 | static void TLan_tx_timeout_work(struct work_struct *work) | ||
1056 | { | ||
1057 | TLanPrivateInfo *priv = | ||
1058 | container_of(work, TLanPrivateInfo, tlan_tqueue); | ||
1059 | |||
1060 | TLan_tx_timeout(priv->dev); | ||
1061 | } | ||
1062 | |||
1063 | |||
1043 | 1064 | ||
1044 | /*************************************************************** | 1065 | /*************************************************************** |
1045 | * TLan_StartTx | 1066 | * TLan_StartTx |
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h index a44e2f2ef62a..41ce0b665937 100644 --- a/drivers/net/tlan.h +++ b/drivers/net/tlan.h | |||
@@ -170,6 +170,7 @@ typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; | |||
170 | typedef struct tlan_private_tag { | 170 | typedef struct tlan_private_tag { |
171 | struct net_device *nextDevice; | 171 | struct net_device *nextDevice; |
172 | struct pci_dev *pciDev; | 172 | struct pci_dev *pciDev; |
173 | struct net_device *dev; | ||
173 | void *dmaStorage; | 174 | void *dmaStorage; |
174 | dma_addr_t dmaStorageDMA; | 175 | dma_addr_t dmaStorageDMA; |
175 | unsigned int dmaSize; | 176 | unsigned int dmaSize; |
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c index fa3a2bb105ad..942b839ccc5b 100644 --- a/drivers/net/tulip/21142.c +++ b/drivers/net/tulip/21142.c | |||
@@ -26,10 +26,11 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; | |||
26 | 26 | ||
27 | /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list | 27 | /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list |
28 | of available transceivers. */ | 28 | of available transceivers. */ |
29 | void t21142_media_task(void *data) | 29 | void t21142_media_task(struct work_struct *work) |
30 | { | 30 | { |
31 | struct net_device *dev = data; | 31 | struct tulip_private *tp = |
32 | struct tulip_private *tp = netdev_priv(dev); | 32 | container_of(work, struct tulip_private, media_work); |
33 | struct net_device *dev = tp->dev; | ||
33 | void __iomem *ioaddr = tp->base_addr; | 34 | void __iomem *ioaddr = tp->base_addr; |
34 | int csr12 = ioread32(ioaddr + CSR12); | 35 | int csr12 = ioread32(ioaddr + CSR12); |
35 | int next_tick = 60*HZ; | 36 | int next_tick = 60*HZ; |
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c index 066e5d6bcbd8..df326fe1cc8f 100644 --- a/drivers/net/tulip/timer.c +++ b/drivers/net/tulip/timer.c | |||
@@ -18,10 +18,11 @@ | |||
18 | #include "tulip.h" | 18 | #include "tulip.h" |
19 | 19 | ||
20 | 20 | ||
21 | void tulip_media_task(void *data) | 21 | void tulip_media_task(struct work_struct *work) |
22 | { | 22 | { |
23 | struct net_device *dev = data; | 23 | struct tulip_private *tp = |
24 | struct tulip_private *tp = netdev_priv(dev); | 24 | container_of(work, struct tulip_private, media_work); |
25 | struct net_device *dev = tp->dev; | ||
25 | void __iomem *ioaddr = tp->base_addr; | 26 | void __iomem *ioaddr = tp->base_addr; |
26 | u32 csr12 = ioread32(ioaddr + CSR12); | 27 | u32 csr12 = ioread32(ioaddr + CSR12); |
27 | int next_tick = 2*HZ; | 28 | int next_tick = 2*HZ; |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index ad107f45c7b1..25f25da76917 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -44,7 +44,7 @@ struct tulip_chip_table { | |||
44 | int valid_intrs; /* CSR7 interrupt enable settings */ | 44 | int valid_intrs; /* CSR7 interrupt enable settings */ |
45 | int flags; | 45 | int flags; |
46 | void (*media_timer) (unsigned long); | 46 | void (*media_timer) (unsigned long); |
47 | void (*media_task) (void *); | 47 | work_func_t media_task; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | 50 | ||
@@ -392,6 +392,7 @@ struct tulip_private { | |||
392 | int csr12_shadow; | 392 | int csr12_shadow; |
393 | int pad0; /* Used for 8-byte alignment */ | 393 | int pad0; /* Used for 8-byte alignment */ |
394 | struct work_struct media_work; | 394 | struct work_struct media_work; |
395 | struct net_device *dev; | ||
395 | }; | 396 | }; |
396 | 397 | ||
397 | 398 | ||
@@ -406,7 +407,7 @@ struct eeprom_fixup { | |||
406 | 407 | ||
407 | /* 21142.c */ | 408 | /* 21142.c */ |
408 | extern u16 t21142_csr14[]; | 409 | extern u16 t21142_csr14[]; |
409 | void t21142_media_task(void *data); | 410 | void t21142_media_task(struct work_struct *work); |
410 | void t21142_start_nway(struct net_device *dev); | 411 | void t21142_start_nway(struct net_device *dev); |
411 | void t21142_lnk_change(struct net_device *dev, int csr5); | 412 | void t21142_lnk_change(struct net_device *dev, int csr5); |
412 | 413 | ||
@@ -444,7 +445,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5); | |||
444 | void pnic_timer(unsigned long data); | 445 | void pnic_timer(unsigned long data); |
445 | 446 | ||
446 | /* timer.c */ | 447 | /* timer.c */ |
447 | void tulip_media_task(void *data); | 448 | void tulip_media_task(struct work_struct *work); |
448 | void mxic_timer(unsigned long data); | 449 | void mxic_timer(unsigned long data); |
449 | void comet_timer(unsigned long data); | 450 | void comet_timer(unsigned long data); |
450 | 451 | ||
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 0aee618f883c..5a35354aa523 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -1367,6 +1367,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1367 | * it is zeroed and aligned in alloc_etherdev | 1367 | * it is zeroed and aligned in alloc_etherdev |
1368 | */ | 1368 | */ |
1369 | tp = netdev_priv(dev); | 1369 | tp = netdev_priv(dev); |
1370 | tp->dev = dev; | ||
1370 | 1371 | ||
1371 | tp->rx_ring = pci_alloc_consistent(pdev, | 1372 | tp->rx_ring = pci_alloc_consistent(pdev, |
1372 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + | 1373 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + |
@@ -1389,7 +1390,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1389 | tp->timer.data = (unsigned long)dev; | 1390 | tp->timer.data = (unsigned long)dev; |
1390 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; | 1391 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; |
1391 | 1392 | ||
1392 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev); | 1393 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); |
1393 | 1394 | ||
1394 | dev->base_addr = (unsigned long)ioaddr; | 1395 | dev->base_addr = (unsigned long)ioaddr; |
1395 | 1396 | ||
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 931cbdf6d791..b2a23aed4428 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c | |||
@@ -125,8 +125,8 @@ static int cpc_tty_write_room(struct tty_struct *tty); | |||
125 | static int cpc_tty_chars_in_buffer(struct tty_struct *tty); | 125 | static int cpc_tty_chars_in_buffer(struct tty_struct *tty); |
126 | static void cpc_tty_flush_buffer(struct tty_struct *tty); | 126 | static void cpc_tty_flush_buffer(struct tty_struct *tty); |
127 | static void cpc_tty_hangup(struct tty_struct *tty); | 127 | static void cpc_tty_hangup(struct tty_struct *tty); |
128 | static void cpc_tty_rx_work(void *data); | 128 | static void cpc_tty_rx_work(struct work_struct *work); |
129 | static void cpc_tty_tx_work(void *data); | 129 | static void cpc_tty_tx_work(struct work_struct *work); |
130 | static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); | 130 | static int cpc_tty_send_to_card(pc300dev_t *dev,void *buf, int len); |
131 | static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); | 131 | static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx); |
132 | static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); | 132 | static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); |
@@ -261,8 +261,8 @@ void cpc_tty_init(pc300dev_t *pc300dev) | |||
261 | cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; | 261 | cpc_tty->tty_minor = port + CPC_TTY_MINOR_START; |
262 | cpc_tty->pc300dev = pc300dev; | 262 | cpc_tty->pc300dev = pc300dev; |
263 | 263 | ||
264 | INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work, (void *)cpc_tty); | 264 | INIT_WORK(&cpc_tty->tty_tx_work, cpc_tty_tx_work); |
265 | INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work, (void *)port); | 265 | INIT_WORK(&cpc_tty->tty_rx_work, cpc_tty_rx_work); |
266 | 266 | ||
267 | cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; | 267 | cpc_tty->buf_rx.first = cpc_tty->buf_rx.last = NULL; |
268 | 268 | ||
@@ -659,21 +659,23 @@ static void cpc_tty_hangup(struct tty_struct *tty) | |||
659 | * o call the line disc. read | 659 | * o call the line disc. read |
660 | * o free memory | 660 | * o free memory |
661 | */ | 661 | */ |
662 | static void cpc_tty_rx_work(void * data) | 662 | static void cpc_tty_rx_work(struct work_struct *work) |
663 | { | 663 | { |
664 | st_cpc_tty_area *cpc_tty; | ||
664 | unsigned long port; | 665 | unsigned long port; |
665 | int i, j; | 666 | int i, j; |
666 | st_cpc_tty_area *cpc_tty; | ||
667 | volatile st_cpc_rx_buf *buf; | 667 | volatile st_cpc_rx_buf *buf; |
668 | char flags=0,flg_rx=1; | 668 | char flags=0,flg_rx=1; |
669 | struct tty_ldisc *ld; | 669 | struct tty_ldisc *ld; |
670 | 670 | ||
671 | if (cpc_tty_cnt == 0) return; | 671 | if (cpc_tty_cnt == 0) return; |
672 | |||
673 | 672 | ||
674 | for (i=0; (i < 4) && flg_rx ; i++) { | 673 | for (i=0; (i < 4) && flg_rx ; i++) { |
675 | flg_rx = 0; | 674 | flg_rx = 0; |
676 | port = (unsigned long)data; | 675 | |
676 | cpc_tty = container_of(work, st_cpc_tty_area, tty_rx_work); | ||
677 | port = cpc_tty - cpc_tty_area; | ||
678 | |||
677 | for (j=0; j < CPC_TTY_NPORTS; j++) { | 679 | for (j=0; j < CPC_TTY_NPORTS; j++) { |
678 | cpc_tty = &cpc_tty_area[port]; | 680 | cpc_tty = &cpc_tty_area[port]; |
679 | 681 | ||
@@ -882,9 +884,10 @@ void cpc_tty_receive(pc300dev_t *pc300dev) | |||
882 | * o if need call line discipline wakeup | 884 | * o if need call line discipline wakeup |
883 | * o call wake_up_interruptible | 885 | * o call wake_up_interruptible |
884 | */ | 886 | */ |
885 | static void cpc_tty_tx_work(void *data) | 887 | static void cpc_tty_tx_work(struct work_struct *work) |
886 | { | 888 | { |
887 | st_cpc_tty_area *cpc_tty = (st_cpc_tty_area *) data; | 889 | st_cpc_tty_area *cpc_tty = |
890 | container_of(work, st_cpc_tty_area, tty_tx_work); | ||
888 | struct tty_struct *tty; | 891 | struct tty_struct *tty; |
889 | 892 | ||
890 | CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); | 893 | CPC_TTY_DBG("%s: cpc_tty_tx_work init\n",cpc_tty->name); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h index 94dfb92fab5c..8286678513b9 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx.h +++ b/drivers/net/wireless/bcm43xx/bcm43xx.h | |||
@@ -819,7 +819,7 @@ struct bcm43xx_private { | |||
819 | struct tasklet_struct isr_tasklet; | 819 | struct tasklet_struct isr_tasklet; |
820 | 820 | ||
821 | /* Periodic tasks */ | 821 | /* Periodic tasks */ |
822 | struct work_struct periodic_work; | 822 | struct delayed_work periodic_work; |
823 | unsigned int periodic_state; | 823 | unsigned int periodic_state; |
824 | 824 | ||
825 | struct work_struct restart_work; | 825 | struct work_struct restart_work; |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 5b3c27359a18..2ec2e5afce67 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -3215,9 +3215,10 @@ static void do_periodic_work(struct bcm43xx_private *bcm) | |||
3215 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); | 3215 | schedule_delayed_work(&bcm->periodic_work, HZ * 15); |
3216 | } | 3216 | } |
3217 | 3217 | ||
3218 | static void bcm43xx_periodic_work_handler(void *d) | 3218 | static void bcm43xx_periodic_work_handler(struct work_struct *work) |
3219 | { | 3219 | { |
3220 | struct bcm43xx_private *bcm = d; | 3220 | struct bcm43xx_private *bcm = |
3221 | container_of(work, struct bcm43xx_private, periodic_work.work); | ||
3221 | struct net_device *net_dev = bcm->net_dev; | 3222 | struct net_device *net_dev = bcm->net_dev; |
3222 | unsigned long flags; | 3223 | unsigned long flags; |
3223 | u32 savedirqs = 0; | 3224 | u32 savedirqs = 0; |
@@ -3279,11 +3280,11 @@ void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) | |||
3279 | 3280 | ||
3280 | void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) | 3281 | void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) |
3281 | { | 3282 | { |
3282 | struct work_struct *work = &(bcm->periodic_work); | 3283 | struct delayed_work *work = &bcm->periodic_work; |
3283 | 3284 | ||
3284 | assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); | 3285 | assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); |
3285 | INIT_WORK(work, bcm43xx_periodic_work_handler, bcm); | 3286 | INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler); |
3286 | schedule_work(work); | 3287 | schedule_delayed_work(work, 0); |
3287 | } | 3288 | } |
3288 | 3289 | ||
3289 | static void bcm43xx_security_init(struct bcm43xx_private *bcm) | 3290 | static void bcm43xx_security_init(struct bcm43xx_private *bcm) |
@@ -3635,7 +3636,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) | |||
3635 | bcm43xx_periodic_tasks_setup(bcm); | 3636 | bcm43xx_periodic_tasks_setup(bcm); |
3636 | 3637 | ||
3637 | /*FIXME: This should be handled by softmac instead. */ | 3638 | /*FIXME: This should be handled by softmac instead. */ |
3638 | schedule_work(&bcm->softmac->associnfo.work); | 3639 | schedule_delayed_work(&bcm->softmac->associnfo.work, 0); |
3639 | 3640 | ||
3640 | out: | 3641 | out: |
3641 | mutex_unlock(&(bcm)->mutex); | 3642 | mutex_unlock(&(bcm)->mutex); |
@@ -4182,9 +4183,10 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev) | |||
4182 | /* Hard-reset the chip. Do not call this directly. | 4183 | /* Hard-reset the chip. Do not call this directly. |
4183 | * Use bcm43xx_controller_restart() | 4184 | * Use bcm43xx_controller_restart() |
4184 | */ | 4185 | */ |
4185 | static void bcm43xx_chip_reset(void *_bcm) | 4186 | static void bcm43xx_chip_reset(struct work_struct *work) |
4186 | { | 4187 | { |
4187 | struct bcm43xx_private *bcm = _bcm; | 4188 | struct bcm43xx_private *bcm = |
4189 | container_of(work, struct bcm43xx_private, restart_work); | ||
4188 | struct bcm43xx_phyinfo *phy; | 4190 | struct bcm43xx_phyinfo *phy; |
4189 | int err = -ENODEV; | 4191 | int err = -ENODEV; |
4190 | 4192 | ||
@@ -4211,7 +4213,7 @@ void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason) | |||
4211 | if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) | 4213 | if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) |
4212 | return; | 4214 | return; |
4213 | printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); | 4215 | printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); |
4214 | INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); | 4216 | INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset); |
4215 | schedule_work(&bcm->restart_work); | 4217 | schedule_work(&bcm->restart_work); |
4216 | } | 4218 | } |
4217 | 4219 | ||
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h index e663518bd570..e89c890d16fd 100644 --- a/drivers/net/wireless/hostap/hostap.h +++ b/drivers/net/wireless/hostap/hostap.h | |||
@@ -35,7 +35,7 @@ int hostap_80211_get_hdrlen(u16 fc); | |||
35 | struct net_device_stats *hostap_get_stats(struct net_device *dev); | 35 | struct net_device_stats *hostap_get_stats(struct net_device *dev); |
36 | void hostap_setup_dev(struct net_device *dev, local_info_t *local, | 36 | void hostap_setup_dev(struct net_device *dev, local_info_t *local, |
37 | int main_dev); | 37 | int main_dev); |
38 | void hostap_set_multicast_list_queue(void *data); | 38 | void hostap_set_multicast_list_queue(struct work_struct *work); |
39 | int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); | 39 | int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked); |
40 | int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); | 40 | int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked); |
41 | void hostap_cleanup(local_info_t *local); | 41 | void hostap_cleanup(local_info_t *local); |
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index ba13125024cb..08bc57a4b895 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c | |||
@@ -49,10 +49,10 @@ MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs " | |||
49 | static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); | 49 | static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); |
50 | static void hostap_event_expired_sta(struct net_device *dev, | 50 | static void hostap_event_expired_sta(struct net_device *dev, |
51 | struct sta_info *sta); | 51 | struct sta_info *sta); |
52 | static void handle_add_proc_queue(void *data); | 52 | static void handle_add_proc_queue(struct work_struct *work); |
53 | 53 | ||
54 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 54 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
55 | static void handle_wds_oper_queue(void *data); | 55 | static void handle_wds_oper_queue(struct work_struct *work); |
56 | static void prism2_send_mgmt(struct net_device *dev, | 56 | static void prism2_send_mgmt(struct net_device *dev, |
57 | u16 type_subtype, char *body, | 57 | u16 type_subtype, char *body, |
58 | int body_len, u8 *addr, u16 tx_cb_idx); | 58 | int body_len, u8 *addr, u16 tx_cb_idx); |
@@ -807,7 +807,7 @@ void hostap_init_data(local_info_t *local) | |||
807 | INIT_LIST_HEAD(&ap->sta_list); | 807 | INIT_LIST_HEAD(&ap->sta_list); |
808 | 808 | ||
809 | /* Initialize task queue structure for AP management */ | 809 | /* Initialize task queue structure for AP management */ |
810 | INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap); | 810 | INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); |
811 | 811 | ||
812 | ap->tx_callback_idx = | 812 | ap->tx_callback_idx = |
813 | hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); | 813 | hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); |
@@ -815,7 +815,7 @@ void hostap_init_data(local_info_t *local) | |||
815 | printk(KERN_WARNING "%s: failed to register TX callback for " | 815 | printk(KERN_WARNING "%s: failed to register TX callback for " |
816 | "AP\n", local->dev->name); | 816 | "AP\n", local->dev->name); |
817 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 817 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
818 | INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local); | 818 | INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); |
819 | 819 | ||
820 | ap->tx_callback_auth = | 820 | ap->tx_callback_auth = |
821 | hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); | 821 | hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); |
@@ -1062,9 +1062,10 @@ static int prism2_sta_proc_read(char *page, char **start, off_t off, | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | 1064 | ||
1065 | static void handle_add_proc_queue(void *data) | 1065 | static void handle_add_proc_queue(struct work_struct *work) |
1066 | { | 1066 | { |
1067 | struct ap_data *ap = (struct ap_data *) data; | 1067 | struct ap_data *ap = container_of(work, struct ap_data, |
1068 | add_sta_proc_queue); | ||
1068 | struct sta_info *sta; | 1069 | struct sta_info *sta; |
1069 | char name[20]; | 1070 | char name[20]; |
1070 | struct add_sta_proc_data *entry, *prev; | 1071 | struct add_sta_proc_data *entry, *prev; |
@@ -1952,9 +1953,11 @@ static void handle_pspoll(local_info_t *local, | |||
1952 | 1953 | ||
1953 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT | 1954 | #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT |
1954 | 1955 | ||
1955 | static void handle_wds_oper_queue(void *data) | 1956 | static void handle_wds_oper_queue(struct work_struct *work) |
1956 | { | 1957 | { |
1957 | local_info_t *local = data; | 1958 | struct ap_data *ap = container_of(work, struct ap_data, |
1959 | wds_oper_queue); | ||
1960 | local_info_t *local = ap->local; | ||
1958 | struct wds_oper_data *entry, *prev; | 1961 | struct wds_oper_data *entry, *prev; |
1959 | 1962 | ||
1960 | spin_lock_bh(&local->lock); | 1963 | spin_lock_bh(&local->lock); |
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c index ed00ebb6e7f4..c19e68636a1c 100644 --- a/drivers/net/wireless/hostap/hostap_hw.c +++ b/drivers/net/wireless/hostap/hostap_hw.c | |||
@@ -1645,9 +1645,9 @@ static void prism2_schedule_reset(local_info_t *local) | |||
1645 | 1645 | ||
1646 | /* Called only as scheduled task after noticing card timeout in interrupt | 1646 | /* Called only as scheduled task after noticing card timeout in interrupt |
1647 | * context */ | 1647 | * context */ |
1648 | static void handle_reset_queue(void *data) | 1648 | static void handle_reset_queue(struct work_struct *work) |
1649 | { | 1649 | { |
1650 | local_info_t *local = (local_info_t *) data; | 1650 | local_info_t *local = container_of(work, local_info_t, reset_queue); |
1651 | 1651 | ||
1652 | printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); | 1652 | printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); |
1653 | prism2_hw_reset(local->dev); | 1653 | prism2_hw_reset(local->dev); |
@@ -2896,9 +2896,10 @@ static void hostap_passive_scan(unsigned long data) | |||
2896 | 2896 | ||
2897 | /* Called only as a scheduled task when communications quality values should | 2897 | /* Called only as a scheduled task when communications quality values should |
2898 | * be updated. */ | 2898 | * be updated. */ |
2899 | static void handle_comms_qual_update(void *data) | 2899 | static void handle_comms_qual_update(struct work_struct *work) |
2900 | { | 2900 | { |
2901 | local_info_t *local = data; | 2901 | local_info_t *local = |
2902 | container_of(work, local_info_t, comms_qual_update); | ||
2902 | prism2_update_comms_qual(local->dev); | 2903 | prism2_update_comms_qual(local->dev); |
2903 | } | 2904 | } |
2904 | 2905 | ||
@@ -3050,9 +3051,9 @@ static int prism2_set_tim(struct net_device *dev, int aid, int set) | |||
3050 | } | 3051 | } |
3051 | 3052 | ||
3052 | 3053 | ||
3053 | static void handle_set_tim_queue(void *data) | 3054 | static void handle_set_tim_queue(struct work_struct *work) |
3054 | { | 3055 | { |
3055 | local_info_t *local = (local_info_t *) data; | 3056 | local_info_t *local = container_of(work, local_info_t, set_tim_queue); |
3056 | struct set_tim_data *entry; | 3057 | struct set_tim_data *entry; |
3057 | u16 val; | 3058 | u16 val; |
3058 | 3059 | ||
@@ -3209,15 +3210,15 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, | |||
3209 | local->scan_channel_mask = 0xffff; | 3210 | local->scan_channel_mask = 0xffff; |
3210 | 3211 | ||
3211 | /* Initialize task queue structures */ | 3212 | /* Initialize task queue structures */ |
3212 | INIT_WORK(&local->reset_queue, handle_reset_queue, local); | 3213 | INIT_WORK(&local->reset_queue, handle_reset_queue); |
3213 | INIT_WORK(&local->set_multicast_list_queue, | 3214 | INIT_WORK(&local->set_multicast_list_queue, |
3214 | hostap_set_multicast_list_queue, local->dev); | 3215 | hostap_set_multicast_list_queue); |
3215 | 3216 | ||
3216 | INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local); | 3217 | INIT_WORK(&local->set_tim_queue, handle_set_tim_queue); |
3217 | INIT_LIST_HEAD(&local->set_tim_list); | 3218 | INIT_LIST_HEAD(&local->set_tim_list); |
3218 | spin_lock_init(&local->set_tim_lock); | 3219 | spin_lock_init(&local->set_tim_lock); |
3219 | 3220 | ||
3220 | INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local); | 3221 | INIT_WORK(&local->comms_qual_update, handle_comms_qual_update); |
3221 | 3222 | ||
3222 | /* Initialize tasklets for handling hardware IRQ related operations | 3223 | /* Initialize tasklets for handling hardware IRQ related operations |
3223 | * outside hw IRQ handler */ | 3224 | * outside hw IRQ handler */ |
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c index 50f72d831cf4..5fd2b1ad7f5e 100644 --- a/drivers/net/wireless/hostap/hostap_info.c +++ b/drivers/net/wireless/hostap/hostap_info.c | |||
@@ -474,9 +474,9 @@ static void handle_info_queue_scanresults(local_info_t *local) | |||
474 | 474 | ||
475 | /* Called only as scheduled task after receiving info frames (used to avoid | 475 | /* Called only as scheduled task after receiving info frames (used to avoid |
476 | * pending too much time in HW IRQ handler). */ | 476 | * pending too much time in HW IRQ handler). */ |
477 | static void handle_info_queue(void *data) | 477 | static void handle_info_queue(struct work_struct *work) |
478 | { | 478 | { |
479 | local_info_t *local = (local_info_t *) data; | 479 | local_info_t *local = container_of(work, local_info_t, info_queue); |
480 | 480 | ||
481 | if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, | 481 | if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, |
482 | &local->pending_info)) | 482 | &local->pending_info)) |
@@ -493,7 +493,7 @@ void hostap_info_init(local_info_t *local) | |||
493 | { | 493 | { |
494 | skb_queue_head_init(&local->info_list); | 494 | skb_queue_head_init(&local->info_list); |
495 | #ifndef PRISM2_NO_STATION_MODES | 495 | #ifndef PRISM2_NO_STATION_MODES |
496 | INIT_WORK(&local->info_queue, handle_info_queue, local); | 496 | INIT_WORK(&local->info_queue, handle_info_queue); |
497 | #endif /* PRISM2_NO_STATION_MODES */ | 497 | #endif /* PRISM2_NO_STATION_MODES */ |
498 | } | 498 | } |
499 | 499 | ||
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 53374fcba77e..0796be9d9e77 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
@@ -767,14 +767,14 @@ static int prism2_set_mac_address(struct net_device *dev, void *p) | |||
767 | 767 | ||
768 | /* TODO: to be further implemented as soon as Prism2 fully supports | 768 | /* TODO: to be further implemented as soon as Prism2 fully supports |
769 | * GroupAddresses and correct documentation is available */ | 769 | * GroupAddresses and correct documentation is available */ |
770 | void hostap_set_multicast_list_queue(void *data) | 770 | void hostap_set_multicast_list_queue(struct work_struct *work) |
771 | { | 771 | { |
772 | struct net_device *dev = (struct net_device *) data; | 772 | local_info_t *local = |
773 | container_of(work, local_info_t, set_multicast_list_queue); | ||
774 | struct net_device *dev = local->dev; | ||
773 | struct hostap_interface *iface; | 775 | struct hostap_interface *iface; |
774 | local_info_t *local; | ||
775 | 776 | ||
776 | iface = netdev_priv(dev); | 777 | iface = netdev_priv(dev); |
777 | local = iface->local; | ||
778 | if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, | 778 | if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, |
779 | local->is_promisc)) { | 779 | local->is_promisc)) { |
780 | printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", | 780 | printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", |
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c index 79607b8b877c..1bcd352a813b 100644 --- a/drivers/net/wireless/ipw2100.c +++ b/drivers/net/wireless/ipw2100.c | |||
@@ -316,7 +316,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv, | |||
316 | struct ipw2100_fw *fw); | 316 | struct ipw2100_fw *fw); |
317 | static int ipw2100_ucode_download(struct ipw2100_priv *priv, | 317 | static int ipw2100_ucode_download(struct ipw2100_priv *priv, |
318 | struct ipw2100_fw *fw); | 318 | struct ipw2100_fw *fw); |
319 | static void ipw2100_wx_event_work(struct ipw2100_priv *priv); | 319 | static void ipw2100_wx_event_work(struct work_struct *work); |
320 | static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); | 320 | static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); |
321 | static struct iw_handler_def ipw2100_wx_handler_def; | 321 | static struct iw_handler_def ipw2100_wx_handler_def; |
322 | 322 | ||
@@ -679,7 +679,8 @@ static void schedule_reset(struct ipw2100_priv *priv) | |||
679 | queue_delayed_work(priv->workqueue, &priv->reset_work, | 679 | queue_delayed_work(priv->workqueue, &priv->reset_work, |
680 | priv->reset_backoff * HZ); | 680 | priv->reset_backoff * HZ); |
681 | else | 681 | else |
682 | queue_work(priv->workqueue, &priv->reset_work); | 682 | queue_delayed_work(priv->workqueue, &priv->reset_work, |
683 | 0); | ||
683 | 684 | ||
684 | if (priv->reset_backoff < MAX_RESET_BACKOFF) | 685 | if (priv->reset_backoff < MAX_RESET_BACKOFF) |
685 | priv->reset_backoff++; | 686 | priv->reset_backoff++; |
@@ -1873,8 +1874,10 @@ static void ipw2100_down(struct ipw2100_priv *priv) | |||
1873 | netif_stop_queue(priv->net_dev); | 1874 | netif_stop_queue(priv->net_dev); |
1874 | } | 1875 | } |
1875 | 1876 | ||
1876 | static void ipw2100_reset_adapter(struct ipw2100_priv *priv) | 1877 | static void ipw2100_reset_adapter(struct work_struct *work) |
1877 | { | 1878 | { |
1879 | struct ipw2100_priv *priv = | ||
1880 | container_of(work, struct ipw2100_priv, reset_work.work); | ||
1878 | unsigned long flags; | 1881 | unsigned long flags; |
1879 | union iwreq_data wrqu = { | 1882 | union iwreq_data wrqu = { |
1880 | .ap_addr = { | 1883 | .ap_addr = { |
@@ -2071,9 +2074,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) | |||
2071 | return; | 2074 | return; |
2072 | 2075 | ||
2073 | if (priv->status & STATUS_SECURITY_UPDATED) | 2076 | if (priv->status & STATUS_SECURITY_UPDATED) |
2074 | queue_work(priv->workqueue, &priv->security_work); | 2077 | queue_delayed_work(priv->workqueue, &priv->security_work, 0); |
2075 | 2078 | ||
2076 | queue_work(priv->workqueue, &priv->wx_event_work); | 2079 | queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); |
2077 | } | 2080 | } |
2078 | 2081 | ||
2079 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | 2082 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) |
@@ -5524,8 +5527,11 @@ static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) | |||
5524 | return err; | 5527 | return err; |
5525 | } | 5528 | } |
5526 | 5529 | ||
5527 | static void ipw2100_security_work(struct ipw2100_priv *priv) | 5530 | static void ipw2100_security_work(struct work_struct *work) |
5528 | { | 5531 | { |
5532 | struct ipw2100_priv *priv = | ||
5533 | container_of(work, struct ipw2100_priv, security_work.work); | ||
5534 | |||
5529 | /* If we happen to have reconnected before we get a chance to | 5535 | /* If we happen to have reconnected before we get a chance to |
5530 | * process this, then update the security settings--which causes | 5536 | * process this, then update the security settings--which causes |
5531 | * a disassociation to occur */ | 5537 | * a disassociation to occur */ |
@@ -5748,7 +5754,7 @@ static int ipw2100_set_address(struct net_device *dev, void *p) | |||
5748 | 5754 | ||
5749 | priv->reset_backoff = 0; | 5755 | priv->reset_backoff = 0; |
5750 | mutex_unlock(&priv->action_mutex); | 5756 | mutex_unlock(&priv->action_mutex); |
5751 | ipw2100_reset_adapter(priv); | 5757 | ipw2100_reset_adapter(&priv->reset_work.work); |
5752 | return 0; | 5758 | return 0; |
5753 | 5759 | ||
5754 | done: | 5760 | done: |
@@ -5910,9 +5916,10 @@ static const struct ethtool_ops ipw2100_ethtool_ops = { | |||
5910 | .get_drvinfo = ipw_ethtool_get_drvinfo, | 5916 | .get_drvinfo = ipw_ethtool_get_drvinfo, |
5911 | }; | 5917 | }; |
5912 | 5918 | ||
5913 | static void ipw2100_hang_check(void *adapter) | 5919 | static void ipw2100_hang_check(struct work_struct *work) |
5914 | { | 5920 | { |
5915 | struct ipw2100_priv *priv = adapter; | 5921 | struct ipw2100_priv *priv = |
5922 | container_of(work, struct ipw2100_priv, hang_check.work); | ||
5916 | unsigned long flags; | 5923 | unsigned long flags; |
5917 | u32 rtc = 0xa5a5a5a5; | 5924 | u32 rtc = 0xa5a5a5a5; |
5918 | u32 len = sizeof(rtc); | 5925 | u32 len = sizeof(rtc); |
@@ -5952,9 +5959,10 @@ static void ipw2100_hang_check(void *adapter) | |||
5952 | spin_unlock_irqrestore(&priv->low_lock, flags); | 5959 | spin_unlock_irqrestore(&priv->low_lock, flags); |
5953 | } | 5960 | } |
5954 | 5961 | ||
5955 | static void ipw2100_rf_kill(void *adapter) | 5962 | static void ipw2100_rf_kill(struct work_struct *work) |
5956 | { | 5963 | { |
5957 | struct ipw2100_priv *priv = adapter; | 5964 | struct ipw2100_priv *priv = |
5965 | container_of(work, struct ipw2100_priv, rf_kill.work); | ||
5958 | unsigned long flags; | 5966 | unsigned long flags; |
5959 | 5967 | ||
5960 | spin_lock_irqsave(&priv->low_lock, flags); | 5968 | spin_lock_irqsave(&priv->low_lock, flags); |
@@ -6103,14 +6111,11 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
6103 | 6111 | ||
6104 | priv->workqueue = create_workqueue(DRV_NAME); | 6112 | priv->workqueue = create_workqueue(DRV_NAME); |
6105 | 6113 | ||
6106 | INIT_WORK(&priv->reset_work, | 6114 | INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); |
6107 | (void (*)(void *))ipw2100_reset_adapter, priv); | 6115 | INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); |
6108 | INIT_WORK(&priv->security_work, | 6116 | INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); |
6109 | (void (*)(void *))ipw2100_security_work, priv); | 6117 | INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); |
6110 | INIT_WORK(&priv->wx_event_work, | 6118 | INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); |
6111 | (void (*)(void *))ipw2100_wx_event_work, priv); | ||
6112 | INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv); | ||
6113 | INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv); | ||
6114 | 6119 | ||
6115 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | 6120 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) |
6116 | ipw2100_irq_tasklet, (unsigned long)priv); | 6121 | ipw2100_irq_tasklet, (unsigned long)priv); |
@@ -8281,8 +8286,10 @@ static struct iw_handler_def ipw2100_wx_handler_def = { | |||
8281 | .get_wireless_stats = ipw2100_wx_wireless_stats, | 8286 | .get_wireless_stats = ipw2100_wx_wireless_stats, |
8282 | }; | 8287 | }; |
8283 | 8288 | ||
8284 | static void ipw2100_wx_event_work(struct ipw2100_priv *priv) | 8289 | static void ipw2100_wx_event_work(struct work_struct *work) |
8285 | { | 8290 | { |
8291 | struct ipw2100_priv *priv = | ||
8292 | container_of(work, struct ipw2100_priv, wx_event_work.work); | ||
8286 | union iwreq_data wrqu; | 8293 | union iwreq_data wrqu; |
8287 | int len = ETH_ALEN; | 8294 | int len = ETH_ALEN; |
8288 | 8295 | ||
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h index 55b7227198df..de7d384d38af 100644 --- a/drivers/net/wireless/ipw2100.h +++ b/drivers/net/wireless/ipw2100.h | |||
@@ -583,11 +583,11 @@ struct ipw2100_priv { | |||
583 | struct tasklet_struct irq_tasklet; | 583 | struct tasklet_struct irq_tasklet; |
584 | 584 | ||
585 | struct workqueue_struct *workqueue; | 585 | struct workqueue_struct *workqueue; |
586 | struct work_struct reset_work; | 586 | struct delayed_work reset_work; |
587 | struct work_struct security_work; | 587 | struct delayed_work security_work; |
588 | struct work_struct wx_event_work; | 588 | struct delayed_work wx_event_work; |
589 | struct work_struct hang_check; | 589 | struct delayed_work hang_check; |
590 | struct work_struct rf_kill; | 590 | struct delayed_work rf_kill; |
591 | 591 | ||
592 | u32 interrupts; | 592 | u32 interrupts; |
593 | int tx_interrupts; | 593 | int tx_interrupts; |
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index c692d01a76ca..e82e56bb85e1 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c | |||
@@ -187,9 +187,9 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *); | |||
187 | static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); | 187 | static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *); |
188 | static void ipw_rx_queue_replenish(void *); | 188 | static void ipw_rx_queue_replenish(void *); |
189 | static int ipw_up(struct ipw_priv *); | 189 | static int ipw_up(struct ipw_priv *); |
190 | static void ipw_bg_up(void *); | 190 | static void ipw_bg_up(struct work_struct *work); |
191 | static void ipw_down(struct ipw_priv *); | 191 | static void ipw_down(struct ipw_priv *); |
192 | static void ipw_bg_down(void *); | 192 | static void ipw_bg_down(struct work_struct *work); |
193 | static int ipw_config(struct ipw_priv *); | 193 | static int ipw_config(struct ipw_priv *); |
194 | static int init_supported_rates(struct ipw_priv *priv, | 194 | static int init_supported_rates(struct ipw_priv *priv, |
195 | struct ipw_supported_rates *prates); | 195 | struct ipw_supported_rates *prates); |
@@ -862,11 +862,12 @@ static void ipw_led_link_on(struct ipw_priv *priv) | |||
862 | spin_unlock_irqrestore(&priv->lock, flags); | 862 | spin_unlock_irqrestore(&priv->lock, flags); |
863 | } | 863 | } |
864 | 864 | ||
865 | static void ipw_bg_led_link_on(void *data) | 865 | static void ipw_bg_led_link_on(struct work_struct *work) |
866 | { | 866 | { |
867 | struct ipw_priv *priv = data; | 867 | struct ipw_priv *priv = |
868 | container_of(work, struct ipw_priv, led_link_on.work); | ||
868 | mutex_lock(&priv->mutex); | 869 | mutex_lock(&priv->mutex); |
869 | ipw_led_link_on(data); | 870 | ipw_led_link_on(priv); |
870 | mutex_unlock(&priv->mutex); | 871 | mutex_unlock(&priv->mutex); |
871 | } | 872 | } |
872 | 873 | ||
@@ -906,11 +907,12 @@ static void ipw_led_link_off(struct ipw_priv *priv) | |||
906 | spin_unlock_irqrestore(&priv->lock, flags); | 907 | spin_unlock_irqrestore(&priv->lock, flags); |
907 | } | 908 | } |
908 | 909 | ||
909 | static void ipw_bg_led_link_off(void *data) | 910 | static void ipw_bg_led_link_off(struct work_struct *work) |
910 | { | 911 | { |
911 | struct ipw_priv *priv = data; | 912 | struct ipw_priv *priv = |
913 | container_of(work, struct ipw_priv, led_link_off.work); | ||
912 | mutex_lock(&priv->mutex); | 914 | mutex_lock(&priv->mutex); |
913 | ipw_led_link_off(data); | 915 | ipw_led_link_off(priv); |
914 | mutex_unlock(&priv->mutex); | 916 | mutex_unlock(&priv->mutex); |
915 | } | 917 | } |
916 | 918 | ||
@@ -985,11 +987,12 @@ static void ipw_led_activity_off(struct ipw_priv *priv) | |||
985 | spin_unlock_irqrestore(&priv->lock, flags); | 987 | spin_unlock_irqrestore(&priv->lock, flags); |
986 | } | 988 | } |
987 | 989 | ||
988 | static void ipw_bg_led_activity_off(void *data) | 990 | static void ipw_bg_led_activity_off(struct work_struct *work) |
989 | { | 991 | { |
990 | struct ipw_priv *priv = data; | 992 | struct ipw_priv *priv = |
993 | container_of(work, struct ipw_priv, led_act_off.work); | ||
991 | mutex_lock(&priv->mutex); | 994 | mutex_lock(&priv->mutex); |
992 | ipw_led_activity_off(data); | 995 | ipw_led_activity_off(priv); |
993 | mutex_unlock(&priv->mutex); | 996 | mutex_unlock(&priv->mutex); |
994 | } | 997 | } |
995 | 998 | ||
@@ -2228,11 +2231,12 @@ static void ipw_adapter_restart(void *adapter) | |||
2228 | } | 2231 | } |
2229 | } | 2232 | } |
2230 | 2233 | ||
2231 | static void ipw_bg_adapter_restart(void *data) | 2234 | static void ipw_bg_adapter_restart(struct work_struct *work) |
2232 | { | 2235 | { |
2233 | struct ipw_priv *priv = data; | 2236 | struct ipw_priv *priv = |
2237 | container_of(work, struct ipw_priv, adapter_restart); | ||
2234 | mutex_lock(&priv->mutex); | 2238 | mutex_lock(&priv->mutex); |
2235 | ipw_adapter_restart(data); | 2239 | ipw_adapter_restart(priv); |
2236 | mutex_unlock(&priv->mutex); | 2240 | mutex_unlock(&priv->mutex); |
2237 | } | 2241 | } |
2238 | 2242 | ||
@@ -2249,11 +2253,12 @@ static void ipw_scan_check(void *data) | |||
2249 | } | 2253 | } |
2250 | } | 2254 | } |
2251 | 2255 | ||
2252 | static void ipw_bg_scan_check(void *data) | 2256 | static void ipw_bg_scan_check(struct work_struct *work) |
2253 | { | 2257 | { |
2254 | struct ipw_priv *priv = data; | 2258 | struct ipw_priv *priv = |
2259 | container_of(work, struct ipw_priv, scan_check.work); | ||
2255 | mutex_lock(&priv->mutex); | 2260 | mutex_lock(&priv->mutex); |
2256 | ipw_scan_check(data); | 2261 | ipw_scan_check(priv); |
2257 | mutex_unlock(&priv->mutex); | 2262 | mutex_unlock(&priv->mutex); |
2258 | } | 2263 | } |
2259 | 2264 | ||
@@ -3831,17 +3836,19 @@ static int ipw_disassociate(void *data) | |||
3831 | return 1; | 3836 | return 1; |
3832 | } | 3837 | } |
3833 | 3838 | ||
3834 | static void ipw_bg_disassociate(void *data) | 3839 | static void ipw_bg_disassociate(struct work_struct *work) |
3835 | { | 3840 | { |
3836 | struct ipw_priv *priv = data; | 3841 | struct ipw_priv *priv = |
3842 | container_of(work, struct ipw_priv, disassociate); | ||
3837 | mutex_lock(&priv->mutex); | 3843 | mutex_lock(&priv->mutex); |
3838 | ipw_disassociate(data); | 3844 | ipw_disassociate(priv); |
3839 | mutex_unlock(&priv->mutex); | 3845 | mutex_unlock(&priv->mutex); |
3840 | } | 3846 | } |
3841 | 3847 | ||
3842 | static void ipw_system_config(void *data) | 3848 | static void ipw_system_config(struct work_struct *work) |
3843 | { | 3849 | { |
3844 | struct ipw_priv *priv = data; | 3850 | struct ipw_priv *priv = |
3851 | container_of(work, struct ipw_priv, system_config); | ||
3845 | 3852 | ||
3846 | #ifdef CONFIG_IPW2200_PROMISCUOUS | 3853 | #ifdef CONFIG_IPW2200_PROMISCUOUS |
3847 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { | 3854 | if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) { |
@@ -4208,11 +4215,12 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4208 | IPW_STATS_INTERVAL); | 4215 | IPW_STATS_INTERVAL); |
4209 | } | 4216 | } |
4210 | 4217 | ||
4211 | static void ipw_bg_gather_stats(void *data) | 4218 | static void ipw_bg_gather_stats(struct work_struct *work) |
4212 | { | 4219 | { |
4213 | struct ipw_priv *priv = data; | 4220 | struct ipw_priv *priv = |
4221 | container_of(work, struct ipw_priv, gather_stats.work); | ||
4214 | mutex_lock(&priv->mutex); | 4222 | mutex_lock(&priv->mutex); |
4215 | ipw_gather_stats(data); | 4223 | ipw_gather_stats(priv); |
4216 | mutex_unlock(&priv->mutex); | 4224 | mutex_unlock(&priv->mutex); |
4217 | } | 4225 | } |
4218 | 4226 | ||
@@ -4268,8 +4276,8 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4268 | if (!(priv->status & STATUS_ROAMING)) { | 4276 | if (!(priv->status & STATUS_ROAMING)) { |
4269 | priv->status |= STATUS_ROAMING; | 4277 | priv->status |= STATUS_ROAMING; |
4270 | if (!(priv->status & STATUS_SCANNING)) | 4278 | if (!(priv->status & STATUS_SCANNING)) |
4271 | queue_work(priv->workqueue, | 4279 | queue_delayed_work(priv->workqueue, |
4272 | &priv->request_scan); | 4280 | &priv->request_scan, 0); |
4273 | } | 4281 | } |
4274 | return; | 4282 | return; |
4275 | } | 4283 | } |
@@ -4607,8 +4615,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4607 | #ifdef CONFIG_IPW2200_MONITOR | 4615 | #ifdef CONFIG_IPW2200_MONITOR |
4608 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 4616 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
4609 | priv->status |= STATUS_SCAN_FORCED; | 4617 | priv->status |= STATUS_SCAN_FORCED; |
4610 | queue_work(priv->workqueue, | 4618 | queue_delayed_work(priv->workqueue, |
4611 | &priv->request_scan); | 4619 | &priv->request_scan, 0); |
4612 | break; | 4620 | break; |
4613 | } | 4621 | } |
4614 | priv->status &= ~STATUS_SCAN_FORCED; | 4622 | priv->status &= ~STATUS_SCAN_FORCED; |
@@ -4631,8 +4639,8 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4631 | /* Don't schedule if we aborted the scan */ | 4639 | /* Don't schedule if we aborted the scan */ |
4632 | priv->status &= ~STATUS_ROAMING; | 4640 | priv->status &= ~STATUS_ROAMING; |
4633 | } else if (priv->status & STATUS_SCAN_PENDING) | 4641 | } else if (priv->status & STATUS_SCAN_PENDING) |
4634 | queue_work(priv->workqueue, | 4642 | queue_delayed_work(priv->workqueue, |
4635 | &priv->request_scan); | 4643 | &priv->request_scan, 0); |
4636 | else if (priv->config & CFG_BACKGROUND_SCAN | 4644 | else if (priv->config & CFG_BACKGROUND_SCAN |
4637 | && priv->status & STATUS_ASSOCIATED) | 4645 | && priv->status & STATUS_ASSOCIATED) |
4638 | queue_delayed_work(priv->workqueue, | 4646 | queue_delayed_work(priv->workqueue, |
@@ -5055,11 +5063,12 @@ static void ipw_rx_queue_replenish(void *data) | |||
5055 | ipw_rx_queue_restock(priv); | 5063 | ipw_rx_queue_restock(priv); |
5056 | } | 5064 | } |
5057 | 5065 | ||
5058 | static void ipw_bg_rx_queue_replenish(void *data) | 5066 | static void ipw_bg_rx_queue_replenish(struct work_struct *work) |
5059 | { | 5067 | { |
5060 | struct ipw_priv *priv = data; | 5068 | struct ipw_priv *priv = |
5069 | container_of(work, struct ipw_priv, rx_replenish); | ||
5061 | mutex_lock(&priv->mutex); | 5070 | mutex_lock(&priv->mutex); |
5062 | ipw_rx_queue_replenish(data); | 5071 | ipw_rx_queue_replenish(priv); |
5063 | mutex_unlock(&priv->mutex); | 5072 | mutex_unlock(&priv->mutex); |
5064 | } | 5073 | } |
5065 | 5074 | ||
@@ -5489,9 +5498,10 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv, | |||
5489 | return 1; | 5498 | return 1; |
5490 | } | 5499 | } |
5491 | 5500 | ||
5492 | static void ipw_merge_adhoc_network(void *data) | 5501 | static void ipw_merge_adhoc_network(struct work_struct *work) |
5493 | { | 5502 | { |
5494 | struct ipw_priv *priv = data; | 5503 | struct ipw_priv *priv = |
5504 | container_of(work, struct ipw_priv, merge_networks); | ||
5495 | struct ieee80211_network *network = NULL; | 5505 | struct ieee80211_network *network = NULL; |
5496 | struct ipw_network_match match = { | 5506 | struct ipw_network_match match = { |
5497 | .network = priv->assoc_network | 5507 | .network = priv->assoc_network |
@@ -5948,11 +5958,12 @@ static void ipw_adhoc_check(void *data) | |||
5948 | priv->assoc_request.beacon_interval); | 5958 | priv->assoc_request.beacon_interval); |
5949 | } | 5959 | } |
5950 | 5960 | ||
5951 | static void ipw_bg_adhoc_check(void *data) | 5961 | static void ipw_bg_adhoc_check(struct work_struct *work) |
5952 | { | 5962 | { |
5953 | struct ipw_priv *priv = data; | 5963 | struct ipw_priv *priv = |
5964 | container_of(work, struct ipw_priv, adhoc_check.work); | ||
5954 | mutex_lock(&priv->mutex); | 5965 | mutex_lock(&priv->mutex); |
5955 | ipw_adhoc_check(data); | 5966 | ipw_adhoc_check(priv); |
5956 | mutex_unlock(&priv->mutex); | 5967 | mutex_unlock(&priv->mutex); |
5957 | } | 5968 | } |
5958 | 5969 | ||
@@ -6299,19 +6310,26 @@ done: | |||
6299 | return err; | 6310 | return err; |
6300 | } | 6311 | } |
6301 | 6312 | ||
6302 | static int ipw_request_passive_scan(struct ipw_priv *priv) { | 6313 | static void ipw_request_passive_scan(struct work_struct *work) |
6303 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | 6314 | { |
6315 | struct ipw_priv *priv = | ||
6316 | container_of(work, struct ipw_priv, request_passive_scan); | ||
6317 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE); | ||
6304 | } | 6318 | } |
6305 | 6319 | ||
6306 | static int ipw_request_scan(struct ipw_priv *priv) { | 6320 | static void ipw_request_scan(struct work_struct *work) |
6307 | return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | 6321 | { |
6322 | struct ipw_priv *priv = | ||
6323 | container_of(work, struct ipw_priv, request_scan.work); | ||
6324 | ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE); | ||
6308 | } | 6325 | } |
6309 | 6326 | ||
6310 | static void ipw_bg_abort_scan(void *data) | 6327 | static void ipw_bg_abort_scan(struct work_struct *work) |
6311 | { | 6328 | { |
6312 | struct ipw_priv *priv = data; | 6329 | struct ipw_priv *priv = |
6330 | container_of(work, struct ipw_priv, abort_scan); | ||
6313 | mutex_lock(&priv->mutex); | 6331 | mutex_lock(&priv->mutex); |
6314 | ipw_abort_scan(data); | 6332 | ipw_abort_scan(priv); |
6315 | mutex_unlock(&priv->mutex); | 6333 | mutex_unlock(&priv->mutex); |
6316 | } | 6334 | } |
6317 | 6335 | ||
@@ -7084,9 +7102,10 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv, | |||
7084 | /* | 7102 | /* |
7085 | * background support to run QoS activate functionality | 7103 | * background support to run QoS activate functionality |
7086 | */ | 7104 | */ |
7087 | static void ipw_bg_qos_activate(void *data) | 7105 | static void ipw_bg_qos_activate(struct work_struct *work) |
7088 | { | 7106 | { |
7089 | struct ipw_priv *priv = data; | 7107 | struct ipw_priv *priv = |
7108 | container_of(work, struct ipw_priv, qos_activate); | ||
7090 | 7109 | ||
7091 | if (priv == NULL) | 7110 | if (priv == NULL) |
7092 | return; | 7111 | return; |
@@ -7394,11 +7413,12 @@ static void ipw_roam(void *data) | |||
7394 | priv->status &= ~STATUS_ROAMING; | 7413 | priv->status &= ~STATUS_ROAMING; |
7395 | } | 7414 | } |
7396 | 7415 | ||
7397 | static void ipw_bg_roam(void *data) | 7416 | static void ipw_bg_roam(struct work_struct *work) |
7398 | { | 7417 | { |
7399 | struct ipw_priv *priv = data; | 7418 | struct ipw_priv *priv = |
7419 | container_of(work, struct ipw_priv, roam); | ||
7400 | mutex_lock(&priv->mutex); | 7420 | mutex_lock(&priv->mutex); |
7401 | ipw_roam(data); | 7421 | ipw_roam(priv); |
7402 | mutex_unlock(&priv->mutex); | 7422 | mutex_unlock(&priv->mutex); |
7403 | } | 7423 | } |
7404 | 7424 | ||
@@ -7479,8 +7499,8 @@ static int ipw_associate(void *data) | |||
7479 | &priv->request_scan, | 7499 | &priv->request_scan, |
7480 | SCAN_INTERVAL); | 7500 | SCAN_INTERVAL); |
7481 | else | 7501 | else |
7482 | queue_work(priv->workqueue, | 7502 | queue_delayed_work(priv->workqueue, |
7483 | &priv->request_scan); | 7503 | &priv->request_scan, 0); |
7484 | } | 7504 | } |
7485 | 7505 | ||
7486 | return 0; | 7506 | return 0; |
@@ -7491,11 +7511,12 @@ static int ipw_associate(void *data) | |||
7491 | return 1; | 7511 | return 1; |
7492 | } | 7512 | } |
7493 | 7513 | ||
7494 | static void ipw_bg_associate(void *data) | 7514 | static void ipw_bg_associate(struct work_struct *work) |
7495 | { | 7515 | { |
7496 | struct ipw_priv *priv = data; | 7516 | struct ipw_priv *priv = |
7517 | container_of(work, struct ipw_priv, associate); | ||
7497 | mutex_lock(&priv->mutex); | 7518 | mutex_lock(&priv->mutex); |
7498 | ipw_associate(data); | 7519 | ipw_associate(priv); |
7499 | mutex_unlock(&priv->mutex); | 7520 | mutex_unlock(&priv->mutex); |
7500 | } | 7521 | } |
7501 | 7522 | ||
@@ -9410,7 +9431,7 @@ static int ipw_wx_set_scan(struct net_device *dev, | |||
9410 | 9431 | ||
9411 | IPW_DEBUG_WX("Start scan\n"); | 9432 | IPW_DEBUG_WX("Start scan\n"); |
9412 | 9433 | ||
9413 | queue_work(priv->workqueue, &priv->request_scan); | 9434 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); |
9414 | 9435 | ||
9415 | return 0; | 9436 | return 0; |
9416 | } | 9437 | } |
@@ -10547,11 +10568,12 @@ static void ipw_rf_kill(void *adapter) | |||
10547 | spin_unlock_irqrestore(&priv->lock, flags); | 10568 | spin_unlock_irqrestore(&priv->lock, flags); |
10548 | } | 10569 | } |
10549 | 10570 | ||
10550 | static void ipw_bg_rf_kill(void *data) | 10571 | static void ipw_bg_rf_kill(struct work_struct *work) |
10551 | { | 10572 | { |
10552 | struct ipw_priv *priv = data; | 10573 | struct ipw_priv *priv = |
10574 | container_of(work, struct ipw_priv, rf_kill.work); | ||
10553 | mutex_lock(&priv->mutex); | 10575 | mutex_lock(&priv->mutex); |
10554 | ipw_rf_kill(data); | 10576 | ipw_rf_kill(priv); |
10555 | mutex_unlock(&priv->mutex); | 10577 | mutex_unlock(&priv->mutex); |
10556 | } | 10578 | } |
10557 | 10579 | ||
@@ -10582,11 +10604,12 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10582 | queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); | 10604 | queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); |
10583 | } | 10605 | } |
10584 | 10606 | ||
10585 | static void ipw_bg_link_up(void *data) | 10607 | static void ipw_bg_link_up(struct work_struct *work) |
10586 | { | 10608 | { |
10587 | struct ipw_priv *priv = data; | 10609 | struct ipw_priv *priv = |
10610 | container_of(work, struct ipw_priv, link_up); | ||
10588 | mutex_lock(&priv->mutex); | 10611 | mutex_lock(&priv->mutex); |
10589 | ipw_link_up(data); | 10612 | ipw_link_up(priv); |
10590 | mutex_unlock(&priv->mutex); | 10613 | mutex_unlock(&priv->mutex); |
10591 | } | 10614 | } |
10592 | 10615 | ||
@@ -10606,15 +10629,16 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10606 | 10629 | ||
10607 | if (!(priv->status & STATUS_EXIT_PENDING)) { | 10630 | if (!(priv->status & STATUS_EXIT_PENDING)) { |
10608 | /* Queue up another scan... */ | 10631 | /* Queue up another scan... */ |
10609 | queue_work(priv->workqueue, &priv->request_scan); | 10632 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); |
10610 | } | 10633 | } |
10611 | } | 10634 | } |
10612 | 10635 | ||
10613 | static void ipw_bg_link_down(void *data) | 10636 | static void ipw_bg_link_down(struct work_struct *work) |
10614 | { | 10637 | { |
10615 | struct ipw_priv *priv = data; | 10638 | struct ipw_priv *priv = |
10639 | container_of(work, struct ipw_priv, link_down); | ||
10616 | mutex_lock(&priv->mutex); | 10640 | mutex_lock(&priv->mutex); |
10617 | ipw_link_down(data); | 10641 | ipw_link_down(priv); |
10618 | mutex_unlock(&priv->mutex); | 10642 | mutex_unlock(&priv->mutex); |
10619 | } | 10643 | } |
10620 | 10644 | ||
@@ -10626,38 +10650,30 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10626 | init_waitqueue_head(&priv->wait_command_queue); | 10650 | init_waitqueue_head(&priv->wait_command_queue); |
10627 | init_waitqueue_head(&priv->wait_state); | 10651 | init_waitqueue_head(&priv->wait_state); |
10628 | 10652 | ||
10629 | INIT_WORK(&priv->adhoc_check, ipw_bg_adhoc_check, priv); | 10653 | INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check); |
10630 | INIT_WORK(&priv->associate, ipw_bg_associate, priv); | 10654 | INIT_WORK(&priv->associate, ipw_bg_associate); |
10631 | INIT_WORK(&priv->disassociate, ipw_bg_disassociate, priv); | 10655 | INIT_WORK(&priv->disassociate, ipw_bg_disassociate); |
10632 | INIT_WORK(&priv->system_config, ipw_system_config, priv); | 10656 | INIT_WORK(&priv->system_config, ipw_system_config); |
10633 | INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish, priv); | 10657 | INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish); |
10634 | INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart, priv); | 10658 | INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart); |
10635 | INIT_WORK(&priv->rf_kill, ipw_bg_rf_kill, priv); | 10659 | INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill); |
10636 | INIT_WORK(&priv->up, (void (*)(void *))ipw_bg_up, priv); | 10660 | INIT_WORK(&priv->up, ipw_bg_up); |
10637 | INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); | 10661 | INIT_WORK(&priv->down, ipw_bg_down); |
10638 | INIT_WORK(&priv->request_scan, | 10662 | INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan); |
10639 | (void (*)(void *))ipw_request_scan, priv); | 10663 | INIT_WORK(&priv->request_passive_scan, ipw_request_passive_scan); |
10640 | INIT_WORK(&priv->request_passive_scan, | 10664 | INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats); |
10641 | (void (*)(void *))ipw_request_passive_scan, priv); | 10665 | INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan); |
10642 | INIT_WORK(&priv->gather_stats, | 10666 | INIT_WORK(&priv->roam, ipw_bg_roam); |
10643 | (void (*)(void *))ipw_bg_gather_stats, priv); | 10667 | INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check); |
10644 | INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); | 10668 | INIT_WORK(&priv->link_up, ipw_bg_link_up); |
10645 | INIT_WORK(&priv->roam, ipw_bg_roam, priv); | 10669 | INIT_WORK(&priv->link_down, ipw_bg_link_down); |
10646 | INIT_WORK(&priv->scan_check, ipw_bg_scan_check, priv); | 10670 | INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on); |
10647 | INIT_WORK(&priv->link_up, (void (*)(void *))ipw_bg_link_up, priv); | 10671 | INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off); |
10648 | INIT_WORK(&priv->link_down, (void (*)(void *))ipw_bg_link_down, priv); | 10672 | INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off); |
10649 | INIT_WORK(&priv->led_link_on, (void (*)(void *))ipw_bg_led_link_on, | 10673 | INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network); |
10650 | priv); | ||
10651 | INIT_WORK(&priv->led_link_off, (void (*)(void *))ipw_bg_led_link_off, | ||
10652 | priv); | ||
10653 | INIT_WORK(&priv->led_act_off, (void (*)(void *))ipw_bg_led_activity_off, | ||
10654 | priv); | ||
10655 | INIT_WORK(&priv->merge_networks, | ||
10656 | (void (*)(void *))ipw_merge_adhoc_network, priv); | ||
10657 | 10674 | ||
10658 | #ifdef CONFIG_IPW2200_QOS | 10675 | #ifdef CONFIG_IPW2200_QOS |
10659 | INIT_WORK(&priv->qos_activate, (void (*)(void *))ipw_bg_qos_activate, | 10676 | INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate); |
10660 | priv); | ||
10661 | #endif /* CONFIG_IPW2200_QOS */ | 10677 | #endif /* CONFIG_IPW2200_QOS */ |
10662 | 10678 | ||
10663 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | 10679 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) |
@@ -11190,7 +11206,8 @@ static int ipw_up(struct ipw_priv *priv) | |||
11190 | 11206 | ||
11191 | /* If configure to try and auto-associate, kick | 11207 | /* If configure to try and auto-associate, kick |
11192 | * off a scan. */ | 11208 | * off a scan. */ |
11193 | queue_work(priv->workqueue, &priv->request_scan); | 11209 | queue_delayed_work(priv->workqueue, |
11210 | &priv->request_scan, 0); | ||
11194 | 11211 | ||
11195 | return 0; | 11212 | return 0; |
11196 | } | 11213 | } |
@@ -11211,11 +11228,12 @@ static int ipw_up(struct ipw_priv *priv) | |||
11211 | return -EIO; | 11228 | return -EIO; |
11212 | } | 11229 | } |
11213 | 11230 | ||
11214 | static void ipw_bg_up(void *data) | 11231 | static void ipw_bg_up(struct work_struct *work) |
11215 | { | 11232 | { |
11216 | struct ipw_priv *priv = data; | 11233 | struct ipw_priv *priv = |
11234 | container_of(work, struct ipw_priv, up); | ||
11217 | mutex_lock(&priv->mutex); | 11235 | mutex_lock(&priv->mutex); |
11218 | ipw_up(data); | 11236 | ipw_up(priv); |
11219 | mutex_unlock(&priv->mutex); | 11237 | mutex_unlock(&priv->mutex); |
11220 | } | 11238 | } |
11221 | 11239 | ||
@@ -11282,11 +11300,12 @@ static void ipw_down(struct ipw_priv *priv) | |||
11282 | ipw_led_radio_off(priv); | 11300 | ipw_led_radio_off(priv); |
11283 | } | 11301 | } |
11284 | 11302 | ||
11285 | static void ipw_bg_down(void *data) | 11303 | static void ipw_bg_down(struct work_struct *work) |
11286 | { | 11304 | { |
11287 | struct ipw_priv *priv = data; | 11305 | struct ipw_priv *priv = |
11306 | container_of(work, struct ipw_priv, down); | ||
11288 | mutex_lock(&priv->mutex); | 11307 | mutex_lock(&priv->mutex); |
11289 | ipw_down(data); | 11308 | ipw_down(priv); |
11290 | mutex_unlock(&priv->mutex); | 11309 | mutex_unlock(&priv->mutex); |
11291 | } | 11310 | } |
11292 | 11311 | ||
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h index dad5eedefbf1..626a240a87d8 100644 --- a/drivers/net/wireless/ipw2200.h +++ b/drivers/net/wireless/ipw2200.h | |||
@@ -1290,21 +1290,21 @@ struct ipw_priv { | |||
1290 | 1290 | ||
1291 | struct workqueue_struct *workqueue; | 1291 | struct workqueue_struct *workqueue; |
1292 | 1292 | ||
1293 | struct work_struct adhoc_check; | 1293 | struct delayed_work adhoc_check; |
1294 | struct work_struct associate; | 1294 | struct work_struct associate; |
1295 | struct work_struct disassociate; | 1295 | struct work_struct disassociate; |
1296 | struct work_struct system_config; | 1296 | struct work_struct system_config; |
1297 | struct work_struct rx_replenish; | 1297 | struct work_struct rx_replenish; |
1298 | struct work_struct request_scan; | 1298 | struct delayed_work request_scan; |
1299 | struct work_struct request_passive_scan; | 1299 | struct work_struct request_passive_scan; |
1300 | struct work_struct adapter_restart; | 1300 | struct work_struct adapter_restart; |
1301 | struct work_struct rf_kill; | 1301 | struct delayed_work rf_kill; |
1302 | struct work_struct up; | 1302 | struct work_struct up; |
1303 | struct work_struct down; | 1303 | struct work_struct down; |
1304 | struct work_struct gather_stats; | 1304 | struct delayed_work gather_stats; |
1305 | struct work_struct abort_scan; | 1305 | struct work_struct abort_scan; |
1306 | struct work_struct roam; | 1306 | struct work_struct roam; |
1307 | struct work_struct scan_check; | 1307 | struct delayed_work scan_check; |
1308 | struct work_struct link_up; | 1308 | struct work_struct link_up; |
1309 | struct work_struct link_down; | 1309 | struct work_struct link_down; |
1310 | 1310 | ||
@@ -1319,9 +1319,9 @@ struct ipw_priv { | |||
1319 | u32 led_ofdm_on; | 1319 | u32 led_ofdm_on; |
1320 | u32 led_ofdm_off; | 1320 | u32 led_ofdm_off; |
1321 | 1321 | ||
1322 | struct work_struct led_link_on; | 1322 | struct delayed_work led_link_on; |
1323 | struct work_struct led_link_off; | 1323 | struct delayed_work led_link_off; |
1324 | struct work_struct led_act_off; | 1324 | struct delayed_work led_act_off; |
1325 | struct work_struct merge_networks; | 1325 | struct work_struct merge_networks; |
1326 | 1326 | ||
1327 | struct ipw_cmd_log *cmdlog; | 1327 | struct ipw_cmd_log *cmdlog; |
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index 336cabac13b3..936c888e03e1 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c | |||
@@ -980,9 +980,11 @@ static void print_linkstatus(struct net_device *dev, u16 status) | |||
980 | } | 980 | } |
981 | 981 | ||
982 | /* Search scan results for requested BSSID, join it if found */ | 982 | /* Search scan results for requested BSSID, join it if found */ |
983 | static void orinoco_join_ap(struct net_device *dev) | 983 | static void orinoco_join_ap(struct work_struct *work) |
984 | { | 984 | { |
985 | struct orinoco_private *priv = netdev_priv(dev); | 985 | struct orinoco_private *priv = |
986 | container_of(work, struct orinoco_private, join_work); | ||
987 | struct net_device *dev = priv->ndev; | ||
986 | struct hermes *hw = &priv->hw; | 988 | struct hermes *hw = &priv->hw; |
987 | int err; | 989 | int err; |
988 | unsigned long flags; | 990 | unsigned long flags; |
@@ -1055,9 +1057,11 @@ static void orinoco_join_ap(struct net_device *dev) | |||
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | /* Send new BSSID to userspace */ | 1059 | /* Send new BSSID to userspace */ |
1058 | static void orinoco_send_wevents(struct net_device *dev) | 1060 | static void orinoco_send_wevents(struct work_struct *work) |
1059 | { | 1061 | { |
1060 | struct orinoco_private *priv = netdev_priv(dev); | 1062 | struct orinoco_private *priv = |
1063 | container_of(work, struct orinoco_private, wevent_work); | ||
1064 | struct net_device *dev = priv->ndev; | ||
1061 | struct hermes *hw = &priv->hw; | 1065 | struct hermes *hw = &priv->hw; |
1062 | union iwreq_data wrqu; | 1066 | union iwreq_data wrqu; |
1063 | int err; | 1067 | int err; |
@@ -1864,9 +1868,11 @@ __orinoco_set_multicast_list(struct net_device *dev) | |||
1864 | 1868 | ||
1865 | /* This must be called from user context, without locks held - use | 1869 | /* This must be called from user context, without locks held - use |
1866 | * schedule_work() */ | 1870 | * schedule_work() */ |
1867 | static void orinoco_reset(struct net_device *dev) | 1871 | static void orinoco_reset(struct work_struct *work) |
1868 | { | 1872 | { |
1869 | struct orinoco_private *priv = netdev_priv(dev); | 1873 | struct orinoco_private *priv = |
1874 | container_of(work, struct orinoco_private, reset_work); | ||
1875 | struct net_device *dev = priv->ndev; | ||
1870 | struct hermes *hw = &priv->hw; | 1876 | struct hermes *hw = &priv->hw; |
1871 | int err; | 1877 | int err; |
1872 | unsigned long flags; | 1878 | unsigned long flags; |
@@ -2434,9 +2440,9 @@ struct net_device *alloc_orinocodev(int sizeof_card, | |||
2434 | priv->hw_unavailable = 1; /* orinoco_init() must clear this | 2440 | priv->hw_unavailable = 1; /* orinoco_init() must clear this |
2435 | * before anything else touches the | 2441 | * before anything else touches the |
2436 | * hardware */ | 2442 | * hardware */ |
2437 | INIT_WORK(&priv->reset_work, (void (*)(void *))orinoco_reset, dev); | 2443 | INIT_WORK(&priv->reset_work, orinoco_reset); |
2438 | INIT_WORK(&priv->join_work, (void (*)(void *))orinoco_join_ap, dev); | 2444 | INIT_WORK(&priv->join_work, orinoco_join_ap); |
2439 | INIT_WORK(&priv->wevent_work, (void (*)(void *))orinoco_send_wevents, dev); | 2445 | INIT_WORK(&priv->wevent_work, orinoco_send_wevents); |
2440 | 2446 | ||
2441 | netif_carrier_off(dev); | 2447 | netif_carrier_off(dev); |
2442 | priv->last_linkstatus = 0xffff; | 2448 | priv->last_linkstatus = 0xffff; |
@@ -3608,7 +3614,7 @@ static int orinoco_ioctl_reset(struct net_device *dev, | |||
3608 | printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); | 3614 | printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); |
3609 | 3615 | ||
3610 | /* Firmware reset */ | 3616 | /* Firmware reset */ |
3611 | orinoco_reset(dev); | 3617 | orinoco_reset(&priv->reset_work); |
3612 | } else { | 3618 | } else { |
3613 | printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); | 3619 | printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); |
3614 | 3620 | ||
@@ -4154,7 +4160,7 @@ static int orinoco_ioctl_commit(struct net_device *dev, | |||
4154 | return 0; | 4160 | return 0; |
4155 | 4161 | ||
4156 | if (priv->broken_disableport) { | 4162 | if (priv->broken_disableport) { |
4157 | orinoco_reset(dev); | 4163 | orinoco_reset(&priv->reset_work); |
4158 | return 0; | 4164 | return 0; |
4159 | } | 4165 | } |
4160 | 4166 | ||
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c index 4a20e45de3ca..a87eb51886c8 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.c +++ b/drivers/net/wireless/prism54/isl_ioctl.c | |||
@@ -157,8 +157,9 @@ prism54_mib_init(islpci_private *priv) | |||
157 | * schedule_work(), thus we can as well use sleeping semaphore | 157 | * schedule_work(), thus we can as well use sleeping semaphore |
158 | * locking */ | 158 | * locking */ |
159 | void | 159 | void |
160 | prism54_update_stats(islpci_private *priv) | 160 | prism54_update_stats(struct work_struct *work) |
161 | { | 161 | { |
162 | islpci_private *priv = container_of(work, islpci_private, stats_work); | ||
162 | char *data; | 163 | char *data; |
163 | int j; | 164 | int j; |
164 | struct obj_bss bss, *bss2; | 165 | struct obj_bss bss, *bss2; |
@@ -2493,9 +2494,10 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid, | |||
2493 | * interrupt context, no locks held. | 2494 | * interrupt context, no locks held. |
2494 | */ | 2495 | */ |
2495 | void | 2496 | void |
2496 | prism54_process_trap(void *data) | 2497 | prism54_process_trap(struct work_struct *work) |
2497 | { | 2498 | { |
2498 | struct islpci_mgmtframe *frame = data; | 2499 | struct islpci_mgmtframe *frame = |
2500 | container_of(work, struct islpci_mgmtframe, ws); | ||
2499 | struct net_device *ndev = frame->ndev; | 2501 | struct net_device *ndev = frame->ndev; |
2500 | enum oid_num_t n = mgt_oidtonum(frame->header->oid); | 2502 | enum oid_num_t n = mgt_oidtonum(frame->header->oid); |
2501 | 2503 | ||
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h index e8183d30c52e..bcfbfb9281d2 100644 --- a/drivers/net/wireless/prism54/isl_ioctl.h +++ b/drivers/net/wireless/prism54/isl_ioctl.h | |||
@@ -31,12 +31,12 @@ | |||
31 | void prism54_mib_init(islpci_private *); | 31 | void prism54_mib_init(islpci_private *); |
32 | 32 | ||
33 | struct iw_statistics *prism54_get_wireless_stats(struct net_device *); | 33 | struct iw_statistics *prism54_get_wireless_stats(struct net_device *); |
34 | void prism54_update_stats(islpci_private *); | 34 | void prism54_update_stats(struct work_struct *); |
35 | 35 | ||
36 | void prism54_acl_init(struct islpci_acl *); | 36 | void prism54_acl_init(struct islpci_acl *); |
37 | void prism54_acl_clean(struct islpci_acl *); | 37 | void prism54_acl_clean(struct islpci_acl *); |
38 | 38 | ||
39 | void prism54_process_trap(void *); | 39 | void prism54_process_trap(struct work_struct *); |
40 | 40 | ||
41 | void prism54_wpa_bss_ie_init(islpci_private *priv); | 41 | void prism54_wpa_bss_ie_init(islpci_private *priv); |
42 | void prism54_wpa_bss_ie_clean(islpci_private *priv); | 42 | void prism54_wpa_bss_ie_clean(islpci_private *priv); |
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c index 1e0603ca436c..f057fd9fcd79 100644 --- a/drivers/net/wireless/prism54/islpci_dev.c +++ b/drivers/net/wireless/prism54/islpci_dev.c | |||
@@ -860,11 +860,10 @@ islpci_setup(struct pci_dev *pdev) | |||
860 | priv->state_off = 1; | 860 | priv->state_off = 1; |
861 | 861 | ||
862 | /* initialize workqueue's */ | 862 | /* initialize workqueue's */ |
863 | INIT_WORK(&priv->stats_work, | 863 | INIT_WORK(&priv->stats_work, prism54_update_stats); |
864 | (void (*)(void *)) prism54_update_stats, priv); | ||
865 | priv->stats_timestamp = 0; | 864 | priv->stats_timestamp = 0; |
866 | 865 | ||
867 | INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake, priv); | 866 | INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake); |
868 | priv->reset_task_pending = 0; | 867 | priv->reset_task_pending = 0; |
869 | 868 | ||
870 | /* allocate various memory areas */ | 869 | /* allocate various memory areas */ |
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c index 676d83813dc8..b1122912ee2d 100644 --- a/drivers/net/wireless/prism54/islpci_eth.c +++ b/drivers/net/wireless/prism54/islpci_eth.c | |||
@@ -480,9 +480,9 @@ islpci_eth_receive(islpci_private *priv) | |||
480 | } | 480 | } |
481 | 481 | ||
482 | void | 482 | void |
483 | islpci_do_reset_and_wake(void *data) | 483 | islpci_do_reset_and_wake(struct work_struct *work) |
484 | { | 484 | { |
485 | islpci_private *priv = data; | 485 | islpci_private *priv = container_of(work, islpci_private, reset_task); |
486 | 486 | ||
487 | islpci_reset(priv, 1); | 487 | islpci_reset(priv, 1); |
488 | priv->reset_task_pending = 0; | 488 | priv->reset_task_pending = 0; |
diff --git a/drivers/net/wireless/prism54/islpci_eth.h b/drivers/net/wireless/prism54/islpci_eth.h index 26789454067c..5bf820defbd0 100644 --- a/drivers/net/wireless/prism54/islpci_eth.h +++ b/drivers/net/wireless/prism54/islpci_eth.h | |||
@@ -67,6 +67,6 @@ void islpci_eth_cleanup_transmit(islpci_private *, isl38xx_control_block *); | |||
67 | int islpci_eth_transmit(struct sk_buff *, struct net_device *); | 67 | int islpci_eth_transmit(struct sk_buff *, struct net_device *); |
68 | int islpci_eth_receive(islpci_private *); | 68 | int islpci_eth_receive(islpci_private *); |
69 | void islpci_eth_tx_timeout(struct net_device *); | 69 | void islpci_eth_tx_timeout(struct net_device *); |
70 | void islpci_do_reset_and_wake(void *data); | 70 | void islpci_do_reset_and_wake(struct work_struct *); |
71 | 71 | ||
72 | #endif /* _ISL_GEN_H */ | 72 | #endif /* _ISL_GEN_H */ |
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c index 036a875054c9..2246f7930b4e 100644 --- a/drivers/net/wireless/prism54/islpci_mgt.c +++ b/drivers/net/wireless/prism54/islpci_mgt.c | |||
@@ -386,7 +386,7 @@ islpci_mgt_receive(struct net_device *ndev) | |||
386 | 386 | ||
387 | /* Create work to handle trap out of interrupt | 387 | /* Create work to handle trap out of interrupt |
388 | * context. */ | 388 | * context. */ |
389 | INIT_WORK(&frame->ws, prism54_process_trap, frame); | 389 | INIT_WORK(&frame->ws, prism54_process_trap); |
390 | schedule_work(&frame->ws); | 390 | schedule_work(&frame->ws); |
391 | 391 | ||
392 | } else { | 392 | } else { |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 2696f95b9278..f1573a9c2336 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c | |||
@@ -32,8 +32,8 @@ | |||
32 | 32 | ||
33 | static void ieee_init(struct ieee80211_device *ieee); | 33 | static void ieee_init(struct ieee80211_device *ieee); |
34 | static void softmac_init(struct ieee80211softmac_device *sm); | 34 | static void softmac_init(struct ieee80211softmac_device *sm); |
35 | static void set_rts_cts_work(void *d); | 35 | static void set_rts_cts_work(struct work_struct *work); |
36 | static void set_basic_rates_work(void *d); | 36 | static void set_basic_rates_work(struct work_struct *work); |
37 | 37 | ||
38 | static void housekeeping_init(struct zd_mac *mac); | 38 | static void housekeeping_init(struct zd_mac *mac); |
39 | static void housekeeping_enable(struct zd_mac *mac); | 39 | static void housekeeping_enable(struct zd_mac *mac); |
@@ -48,8 +48,8 @@ int zd_mac_init(struct zd_mac *mac, | |||
48 | memset(mac, 0, sizeof(*mac)); | 48 | memset(mac, 0, sizeof(*mac)); |
49 | spin_lock_init(&mac->lock); | 49 | spin_lock_init(&mac->lock); |
50 | mac->netdev = netdev; | 50 | mac->netdev = netdev; |
51 | INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work, mac); | 51 | INIT_DELAYED_WORK(&mac->set_rts_cts_work, set_rts_cts_work); |
52 | INIT_WORK(&mac->set_basic_rates_work, set_basic_rates_work, mac); | 52 | INIT_DELAYED_WORK(&mac->set_basic_rates_work, set_basic_rates_work); |
53 | 53 | ||
54 | ieee_init(ieee); | 54 | ieee_init(ieee); |
55 | softmac_init(ieee80211_priv(netdev)); | 55 | softmac_init(ieee80211_priv(netdev)); |
@@ -366,9 +366,10 @@ static void try_enable_tx(struct zd_mac *mac) | |||
366 | spin_unlock_irqrestore(&mac->lock, flags); | 366 | spin_unlock_irqrestore(&mac->lock, flags); |
367 | } | 367 | } |
368 | 368 | ||
369 | static void set_rts_cts_work(void *d) | 369 | static void set_rts_cts_work(struct work_struct *work) |
370 | { | 370 | { |
371 | struct zd_mac *mac = d; | 371 | struct zd_mac *mac = |
372 | container_of(work, struct zd_mac, set_rts_cts_work.work); | ||
372 | unsigned long flags; | 373 | unsigned long flags; |
373 | u8 rts_rate; | 374 | u8 rts_rate; |
374 | unsigned int short_preamble; | 375 | unsigned int short_preamble; |
@@ -387,9 +388,10 @@ static void set_rts_cts_work(void *d) | |||
387 | try_enable_tx(mac); | 388 | try_enable_tx(mac); |
388 | } | 389 | } |
389 | 390 | ||
390 | static void set_basic_rates_work(void *d) | 391 | static void set_basic_rates_work(struct work_struct *work) |
391 | { | 392 | { |
392 | struct zd_mac *mac = d; | 393 | struct zd_mac *mac = |
394 | container_of(work, struct zd_mac, set_basic_rates_work.work); | ||
393 | unsigned long flags; | 395 | unsigned long flags; |
394 | u16 basic_rates; | 396 | u16 basic_rates; |
395 | 397 | ||
@@ -467,12 +469,13 @@ static void bssinfo_change(struct net_device *netdev, u32 changes) | |||
467 | if (need_set_rts_cts && !mac->updating_rts_rate) { | 469 | if (need_set_rts_cts && !mac->updating_rts_rate) { |
468 | mac->updating_rts_rate = 1; | 470 | mac->updating_rts_rate = 1; |
469 | netif_stop_queue(mac->netdev); | 471 | netif_stop_queue(mac->netdev); |
470 | queue_work(zd_workqueue, &mac->set_rts_cts_work); | 472 | queue_delayed_work(zd_workqueue, &mac->set_rts_cts_work, 0); |
471 | } | 473 | } |
472 | if (need_set_rates && !mac->updating_basic_rates) { | 474 | if (need_set_rates && !mac->updating_basic_rates) { |
473 | mac->updating_basic_rates = 1; | 475 | mac->updating_basic_rates = 1; |
474 | netif_stop_queue(mac->netdev); | 476 | netif_stop_queue(mac->netdev); |
475 | queue_work(zd_workqueue, &mac->set_basic_rates_work); | 477 | queue_delayed_work(zd_workqueue, &mac->set_basic_rates_work, |
478 | 0); | ||
476 | } | 479 | } |
477 | spin_unlock_irqrestore(&mac->lock, flags); | 480 | spin_unlock_irqrestore(&mac->lock, flags); |
478 | } | 481 | } |
@@ -1182,9 +1185,10 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev) | |||
1182 | 1185 | ||
1183 | #define LINK_LED_WORK_DELAY HZ | 1186 | #define LINK_LED_WORK_DELAY HZ |
1184 | 1187 | ||
1185 | static void link_led_handler(void *p) | 1188 | static void link_led_handler(struct work_struct *work) |
1186 | { | 1189 | { |
1187 | struct zd_mac *mac = p; | 1190 | struct zd_mac *mac = |
1191 | container_of(work, struct zd_mac, housekeeping.link_led_work.work); | ||
1188 | struct zd_chip *chip = &mac->chip; | 1192 | struct zd_chip *chip = &mac->chip; |
1189 | struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); | 1193 | struct ieee80211softmac_device *sm = ieee80211_priv(mac->netdev); |
1190 | int is_associated; | 1194 | int is_associated; |
@@ -1205,7 +1209,7 @@ static void link_led_handler(void *p) | |||
1205 | 1209 | ||
1206 | static void housekeeping_init(struct zd_mac *mac) | 1210 | static void housekeeping_init(struct zd_mac *mac) |
1207 | { | 1211 | { |
1208 | INIT_WORK(&mac->housekeeping.link_led_work, link_led_handler, mac); | 1212 | INIT_DELAYED_WORK(&mac->housekeeping.link_led_work, link_led_handler); |
1209 | } | 1213 | } |
1210 | 1214 | ||
1211 | static void housekeeping_enable(struct zd_mac *mac) | 1215 | static void housekeeping_enable(struct zd_mac *mac) |
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h index 5dcfb251f02e..d4e8b870409d 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zd1211rw/zd_mac.h | |||
@@ -119,7 +119,7 @@ struct rx_status { | |||
119 | #define ZD_RX_ERROR 0x80 | 119 | #define ZD_RX_ERROR 0x80 |
120 | 120 | ||
121 | struct housekeeping { | 121 | struct housekeeping { |
122 | struct work_struct link_led_work; | 122 | struct delayed_work link_led_work; |
123 | }; | 123 | }; |
124 | 124 | ||
125 | #define ZD_MAC_STATS_BUFFER_SIZE 16 | 125 | #define ZD_MAC_STATS_BUFFER_SIZE 16 |
@@ -133,8 +133,8 @@ struct zd_mac { | |||
133 | struct iw_statistics iw_stats; | 133 | struct iw_statistics iw_stats; |
134 | 134 | ||
135 | struct housekeeping housekeeping; | 135 | struct housekeeping housekeeping; |
136 | struct work_struct set_rts_cts_work; | 136 | struct delayed_work set_rts_cts_work; |
137 | struct work_struct set_basic_rates_work; | 137 | struct delayed_work set_basic_rates_work; |
138 | 138 | ||
139 | unsigned int stats_count; | 139 | unsigned int stats_count; |
140 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; | 140 | u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE]; |
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index fc4bc9b94c74..a83c3db7d18f 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c | |||
@@ -29,7 +29,7 @@ | |||
29 | 29 | ||
30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; | 30 | struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; |
31 | 31 | ||
32 | static void wq_sync_buffer(void *); | 32 | static void wq_sync_buffer(struct work_struct *work); |
33 | 33 | ||
34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) | 34 | #define DEFAULT_TIMER_EXPIRE (HZ / 10) |
35 | static int work_enabled; | 35 | static int work_enabled; |
@@ -65,7 +65,7 @@ int alloc_cpu_buffers(void) | |||
65 | b->sample_received = 0; | 65 | b->sample_received = 0; |
66 | b->sample_lost_overflow = 0; | 66 | b->sample_lost_overflow = 0; |
67 | b->cpu = i; | 67 | b->cpu = i; |
68 | INIT_WORK(&b->work, wq_sync_buffer, b); | 68 | INIT_DELAYED_WORK(&b->work, wq_sync_buffer); |
69 | } | 69 | } |
70 | return 0; | 70 | return 0; |
71 | 71 | ||
@@ -282,9 +282,10 @@ void oprofile_add_trace(unsigned long pc) | |||
282 | * By using schedule_delayed_work_on and then schedule_delayed_work | 282 | * By using schedule_delayed_work_on and then schedule_delayed_work |
283 | * we guarantee this will stay on the correct cpu | 283 | * we guarantee this will stay on the correct cpu |
284 | */ | 284 | */ |
285 | static void wq_sync_buffer(void * data) | 285 | static void wq_sync_buffer(struct work_struct *work) |
286 | { | 286 | { |
287 | struct oprofile_cpu_buffer * b = data; | 287 | struct oprofile_cpu_buffer * b = |
288 | container_of(work, struct oprofile_cpu_buffer, work.work); | ||
288 | if (b->cpu != smp_processor_id()) { | 289 | if (b->cpu != smp_processor_id()) { |
289 | printk("WQ on CPU%d, prefer CPU%d\n", | 290 | printk("WQ on CPU%d, prefer CPU%d\n", |
290 | smp_processor_id(), b->cpu); | 291 | smp_processor_id(), b->cpu); |
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 09abb80e0570..49900d9e3235 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h | |||
@@ -43,7 +43,7 @@ struct oprofile_cpu_buffer { | |||
43 | unsigned long sample_lost_overflow; | 43 | unsigned long sample_lost_overflow; |
44 | unsigned long backtrace_aborted; | 44 | unsigned long backtrace_aborted; |
45 | int cpu; | 45 | int cpu; |
46 | struct work_struct work; | 46 | struct delayed_work work; |
47 | } ____cacheline_aligned; | 47 | } ____cacheline_aligned; |
48 | 48 | ||
49 | extern struct oprofile_cpu_buffer cpu_buffer[]; | 49 | extern struct oprofile_cpu_buffer cpu_buffer[]; |
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index ea2087c34149..50757695844f 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h | |||
@@ -70,7 +70,7 @@ struct slot { | |||
70 | struct hotplug_slot *hotplug_slot; | 70 | struct hotplug_slot *hotplug_slot; |
71 | struct list_head slot_list; | 71 | struct list_head slot_list; |
72 | char name[SLOT_NAME_SIZE]; | 72 | char name[SLOT_NAME_SIZE]; |
73 | struct work_struct work; /* work for button event */ | 73 | struct delayed_work work; /* work for button event */ |
74 | struct mutex lock; | 74 | struct mutex lock; |
75 | }; | 75 | }; |
76 | 76 | ||
@@ -187,7 +187,7 @@ extern int shpchp_configure_device(struct slot *p_slot); | |||
187 | extern int shpchp_unconfigure_device(struct slot *p_slot); | 187 | extern int shpchp_unconfigure_device(struct slot *p_slot); |
188 | extern void shpchp_remove_ctrl_files(struct controller *ctrl); | 188 | extern void shpchp_remove_ctrl_files(struct controller *ctrl); |
189 | extern void cleanup_slots(struct controller *ctrl); | 189 | extern void cleanup_slots(struct controller *ctrl); |
190 | extern void queue_pushbutton_work(void *data); | 190 | extern void queue_pushbutton_work(struct work_struct *work); |
191 | 191 | ||
192 | 192 | ||
193 | #ifdef CONFIG_ACPI | 193 | #ifdef CONFIG_ACPI |
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index 235c18a22393..4eac85b3d90e 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c | |||
@@ -159,7 +159,7 @@ static int init_slots(struct controller *ctrl) | |||
159 | goto error_info; | 159 | goto error_info; |
160 | 160 | ||
161 | slot->number = sun; | 161 | slot->number = sun; |
162 | INIT_WORK(&slot->work, queue_pushbutton_work, slot); | 162 | INIT_DELAYED_WORK(&slot->work, queue_pushbutton_work); |
163 | 163 | ||
164 | /* register this slot with the hotplug pci core */ | 164 | /* register this slot with the hotplug pci core */ |
165 | hotplug_slot->private = slot; | 165 | hotplug_slot->private = slot; |
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index c39901dbff20..158ac7836096 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include "../pci.h" | 36 | #include "../pci.h" |
37 | #include "shpchp.h" | 37 | #include "shpchp.h" |
38 | 38 | ||
39 | static void interrupt_event_handler(void *data); | 39 | static void interrupt_event_handler(struct work_struct *work); |
40 | static int shpchp_enable_slot(struct slot *p_slot); | 40 | static int shpchp_enable_slot(struct slot *p_slot); |
41 | static int shpchp_disable_slot(struct slot *p_slot); | 41 | static int shpchp_disable_slot(struct slot *p_slot); |
42 | 42 | ||
@@ -50,7 +50,7 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) | |||
50 | 50 | ||
51 | info->event_type = event_type; | 51 | info->event_type = event_type; |
52 | info->p_slot = p_slot; | 52 | info->p_slot = p_slot; |
53 | INIT_WORK(&info->work, interrupt_event_handler, info); | 53 | INIT_WORK(&info->work, interrupt_event_handler); |
54 | 54 | ||
55 | schedule_work(&info->work); | 55 | schedule_work(&info->work); |
56 | 56 | ||
@@ -408,9 +408,10 @@ struct pushbutton_work_info { | |||
408 | * Handles all pending events and exits. | 408 | * Handles all pending events and exits. |
409 | * | 409 | * |
410 | */ | 410 | */ |
411 | static void shpchp_pushbutton_thread(void *data) | 411 | static void shpchp_pushbutton_thread(struct work_struct *work) |
412 | { | 412 | { |
413 | struct pushbutton_work_info *info = data; | 413 | struct pushbutton_work_info *info = |
414 | container_of(work, struct pushbutton_work_info, work); | ||
414 | struct slot *p_slot = info->p_slot; | 415 | struct slot *p_slot = info->p_slot; |
415 | 416 | ||
416 | mutex_lock(&p_slot->lock); | 417 | mutex_lock(&p_slot->lock); |
@@ -436,9 +437,9 @@ static void shpchp_pushbutton_thread(void *data) | |||
436 | kfree(info); | 437 | kfree(info); |
437 | } | 438 | } |
438 | 439 | ||
439 | void queue_pushbutton_work(void *data) | 440 | void queue_pushbutton_work(struct work_struct *work) |
440 | { | 441 | { |
441 | struct slot *p_slot = data; | 442 | struct slot *p_slot = container_of(work, struct slot, work.work); |
442 | struct pushbutton_work_info *info; | 443 | struct pushbutton_work_info *info; |
443 | 444 | ||
444 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 445 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
@@ -447,7 +448,7 @@ void queue_pushbutton_work(void *data) | |||
447 | return; | 448 | return; |
448 | } | 449 | } |
449 | info->p_slot = p_slot; | 450 | info->p_slot = p_slot; |
450 | INIT_WORK(&info->work, shpchp_pushbutton_thread, info); | 451 | INIT_WORK(&info->work, shpchp_pushbutton_thread); |
451 | 452 | ||
452 | mutex_lock(&p_slot->lock); | 453 | mutex_lock(&p_slot->lock); |
453 | switch (p_slot->state) { | 454 | switch (p_slot->state) { |
@@ -541,9 +542,9 @@ static void handle_button_press_event(struct slot *p_slot) | |||
541 | } | 542 | } |
542 | } | 543 | } |
543 | 544 | ||
544 | static void interrupt_event_handler(void *data) | 545 | static void interrupt_event_handler(struct work_struct *work) |
545 | { | 546 | { |
546 | struct event_info *info = data; | 547 | struct event_info *info = container_of(work, struct event_info, work); |
547 | struct slot *p_slot = info->p_slot; | 548 | struct slot *p_slot = info->p_slot; |
548 | 549 | ||
549 | mutex_lock(&p_slot->lock); | 550 | mutex_lock(&p_slot->lock); |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 04c43ef529ac..55866b6b26fa 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c | |||
@@ -160,7 +160,7 @@ static struct aer_rpc* aer_alloc_rpc(struct pcie_device *dev) | |||
160 | rpc->e_lock = SPIN_LOCK_UNLOCKED; | 160 | rpc->e_lock = SPIN_LOCK_UNLOCKED; |
161 | 161 | ||
162 | rpc->rpd = dev; | 162 | rpc->rpd = dev; |
163 | INIT_WORK(&rpc->dpc_handler, aer_isr, (void *)dev); | 163 | INIT_WORK(&rpc->dpc_handler, aer_isr); |
164 | rpc->prod_idx = rpc->cons_idx = 0; | 164 | rpc->prod_idx = rpc->cons_idx = 0; |
165 | mutex_init(&rpc->rpc_mutex); | 165 | mutex_init(&rpc->rpc_mutex); |
166 | init_waitqueue_head(&rpc->wait_release); | 166 | init_waitqueue_head(&rpc->wait_release); |
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index daf0cad88fc8..3c0a58f64dd8 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h | |||
@@ -118,7 +118,7 @@ extern struct bus_type pcie_port_bus_type; | |||
118 | extern void aer_enable_rootport(struct aer_rpc *rpc); | 118 | extern void aer_enable_rootport(struct aer_rpc *rpc); |
119 | extern void aer_delete_rootport(struct aer_rpc *rpc); | 119 | extern void aer_delete_rootport(struct aer_rpc *rpc); |
120 | extern int aer_init(struct pcie_device *dev); | 120 | extern int aer_init(struct pcie_device *dev); |
121 | extern void aer_isr(void *context); | 121 | extern void aer_isr(struct work_struct *work); |
122 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); | 122 | extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info); |
123 | extern int aer_osc_setup(struct pci_dev *dev); | 123 | extern int aer_osc_setup(struct pci_dev *dev); |
124 | 124 | ||
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 1c7e660d6535..08e13033ced8 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c | |||
@@ -690,14 +690,14 @@ static void aer_isr_one_error(struct pcie_device *p_device, | |||
690 | 690 | ||
691 | /** | 691 | /** |
692 | * aer_isr - consume errors detected by root port | 692 | * aer_isr - consume errors detected by root port |
693 | * @context: pointer to a private data of pcie device | 693 | * @work: definition of this work item |
694 | * | 694 | * |
695 | * Invoked, as DPC, when root port records new detected error | 695 | * Invoked, as DPC, when root port records new detected error |
696 | **/ | 696 | **/ |
697 | void aer_isr(void *context) | 697 | void aer_isr(struct work_struct *work) |
698 | { | 698 | { |
699 | struct pcie_device *p_device = (struct pcie_device *) context; | 699 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); |
700 | struct aer_rpc *rpc = get_service_data(p_device); | 700 | struct pcie_device *p_device = rpc->rpd; |
701 | struct aer_err_source *e_src; | 701 | struct aer_err_source *e_src; |
702 | 702 | ||
703 | mutex_lock(&rpc->rpc_mutex); | 703 | mutex_lock(&rpc->rpc_mutex); |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 45df12eda3c5..7355eb455a88 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -675,9 +675,10 @@ static int pcmcia_card_add(struct pcmcia_socket *s) | |||
675 | } | 675 | } |
676 | 676 | ||
677 | 677 | ||
678 | static void pcmcia_delayed_add_device(void *data) | 678 | static void pcmcia_delayed_add_device(struct work_struct *work) |
679 | { | 679 | { |
680 | struct pcmcia_socket *s = data; | 680 | struct pcmcia_socket *s = |
681 | container_of(work, struct pcmcia_socket, device_add); | ||
681 | ds_dbg(1, "adding additional device to %d\n", s->sock); | 682 | ds_dbg(1, "adding additional device to %d\n", s->sock); |
682 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); | 683 | pcmcia_device_add(s, s->pcmcia_state.mfc_pfc); |
683 | s->pcmcia_state.device_add_pending = 0; | 684 | s->pcmcia_state.device_add_pending = 0; |
@@ -1349,7 +1350,7 @@ static int __devinit pcmcia_bus_add_socket(struct class_device *class_dev, | |||
1349 | init_waitqueue_head(&socket->queue); | 1350 | init_waitqueue_head(&socket->queue); |
1350 | #endif | 1351 | #endif |
1351 | INIT_LIST_HEAD(&socket->devices_list); | 1352 | INIT_LIST_HEAD(&socket->devices_list); |
1352 | INIT_WORK(&socket->device_add, pcmcia_delayed_add_device, socket); | 1353 | INIT_WORK(&socket->device_add, pcmcia_delayed_add_device); |
1353 | memset(&socket->pcmcia_state, 0, sizeof(u8)); | 1354 | memset(&socket->pcmcia_state, 0, sizeof(u8)); |
1354 | socket->device_count = 0; | 1355 | socket->device_count = 0; |
1355 | 1356 | ||
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 814b9e1873f5..828b329e08e0 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -53,9 +53,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
53 | * Routine to poll RTC seconds field for change as often as possible, | 53 | * Routine to poll RTC seconds field for change as often as possible, |
54 | * after first RTC_UIE use timer to reduce polling | 54 | * after first RTC_UIE use timer to reduce polling |
55 | */ | 55 | */ |
56 | static void rtc_uie_task(void *data) | 56 | static void rtc_uie_task(struct work_struct *work) |
57 | { | 57 | { |
58 | struct rtc_device *rtc = data; | 58 | struct rtc_device *rtc = |
59 | container_of(work, struct rtc_device, uie_task); | ||
59 | struct rtc_time tm; | 60 | struct rtc_time tm; |
60 | int num = 0; | 61 | int num = 0; |
61 | int err; | 62 | int err; |
@@ -411,7 +412,7 @@ static int rtc_dev_add_device(struct class_device *class_dev, | |||
411 | spin_lock_init(&rtc->irq_lock); | 412 | spin_lock_init(&rtc->irq_lock); |
412 | init_waitqueue_head(&rtc->irq_queue); | 413 | init_waitqueue_head(&rtc->irq_queue); |
413 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | 414 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
414 | INIT_WORK(&rtc->uie_task, rtc_uie_task, rtc); | 415 | INIT_WORK(&rtc->uie_task, rtc_uie_task); |
415 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); | 416 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); |
416 | #endif | 417 | #endif |
417 | 418 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a2cef57d7bcb..2af2d9b53d18 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -54,7 +54,7 @@ static void dasd_flush_request_queue(struct dasd_device *); | |||
54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | 54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); |
55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_tasklet(struct dasd_device *); |
57 | static void do_kick_device(void *data); | 57 | static void do_kick_device(struct work_struct *); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * SECTION: Operations on the device structure. | 60 | * SECTION: Operations on the device structure. |
@@ -100,7 +100,7 @@ dasd_alloc_device(void) | |||
100 | (unsigned long) device); | 100 | (unsigned long) device); |
101 | INIT_LIST_HEAD(&device->ccw_queue); | 101 | INIT_LIST_HEAD(&device->ccw_queue); |
102 | init_timer(&device->timer); | 102 | init_timer(&device->timer); |
103 | INIT_WORK(&device->kick_work, do_kick_device, device); | 103 | INIT_WORK(&device->kick_work, do_kick_device); |
104 | device->state = DASD_STATE_NEW; | 104 | device->state = DASD_STATE_NEW; |
105 | device->target = DASD_STATE_NEW; | 105 | device->target = DASD_STATE_NEW; |
106 | 106 | ||
@@ -407,11 +407,9 @@ dasd_change_state(struct dasd_device *device) | |||
407 | * event daemon. | 407 | * event daemon. |
408 | */ | 408 | */ |
409 | static void | 409 | static void |
410 | do_kick_device(void *data) | 410 | do_kick_device(struct work_struct *work) |
411 | { | 411 | { |
412 | struct dasd_device *device; | 412 | struct dasd_device *device = container_of(work, struct dasd_device, kick_work); |
413 | |||
414 | device = (struct dasd_device *) data; | ||
415 | dasd_change_state(device); | 413 | dasd_change_state(device); |
416 | dasd_schedule_bh(device); | 414 | dasd_schedule_bh(device); |
417 | dasd_put_device(device); | 415 | dasd_put_device(device); |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ad7f7e1c0163..26cf2f5ae2e7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -334,7 +334,7 @@ static LIST_HEAD(slow_subchannels_head); | |||
334 | static DEFINE_SPINLOCK(slow_subchannel_lock); | 334 | static DEFINE_SPINLOCK(slow_subchannel_lock); |
335 | 335 | ||
336 | static void | 336 | static void |
337 | css_trigger_slow_path(void) | 337 | css_trigger_slow_path(struct work_struct *unused) |
338 | { | 338 | { |
339 | CIO_TRACE_EVENT(4, "slowpath"); | 339 | CIO_TRACE_EVENT(4, "slowpath"); |
340 | 340 | ||
@@ -359,8 +359,7 @@ css_trigger_slow_path(void) | |||
359 | spin_unlock_irq(&slow_subchannel_lock); | 359 | spin_unlock_irq(&slow_subchannel_lock); |
360 | } | 360 | } |
361 | 361 | ||
362 | typedef void (*workfunc)(void *); | 362 | DECLARE_WORK(slow_path_work, css_trigger_slow_path); |
363 | DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); | ||
364 | struct workqueue_struct *slow_path_wq; | 363 | struct workqueue_struct *slow_path_wq; |
365 | 364 | ||
366 | /* Reprobe subchannel if unregistered. */ | 365 | /* Reprobe subchannel if unregistered. */ |
@@ -397,7 +396,7 @@ static int reprobe_subchannel(struct subchannel_id schid, void *data) | |||
397 | } | 396 | } |
398 | 397 | ||
399 | /* Work function used to reprobe all unregistered subchannels. */ | 398 | /* Work function used to reprobe all unregistered subchannels. */ |
400 | static void reprobe_all(void *data) | 399 | static void reprobe_all(struct work_struct *unused) |
401 | { | 400 | { |
402 | int ret; | 401 | int ret; |
403 | 402 | ||
@@ -413,7 +412,7 @@ static void reprobe_all(void *data) | |||
413 | need_reprobe); | 412 | need_reprobe); |
414 | } | 413 | } |
415 | 414 | ||
416 | DECLARE_WORK(css_reprobe_work, reprobe_all, NULL); | 415 | DECLARE_WORK(css_reprobe_work, reprobe_all); |
417 | 416 | ||
418 | /* Schedule reprobing of all unregistered subchannels. */ | 417 | /* Schedule reprobing of all unregistered subchannels. */ |
419 | void css_schedule_reprobe(void) | 418 | void css_schedule_reprobe(void) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 6a54334ffe09..e4dc947e74e9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "ap_bus.h" | 37 | #include "ap_bus.h" |
38 | 38 | ||
39 | /* Some prototypes. */ | 39 | /* Some prototypes. */ |
40 | static void ap_scan_bus(void *); | 40 | static void ap_scan_bus(struct work_struct *); |
41 | static void ap_poll_all(unsigned long); | 41 | static void ap_poll_all(unsigned long); |
42 | static void ap_poll_timeout(unsigned long); | 42 | static void ap_poll_timeout(unsigned long); |
43 | static int ap_poll_thread_start(void); | 43 | static int ap_poll_thread_start(void); |
@@ -71,7 +71,7 @@ static struct device *ap_root_device = NULL; | |||
71 | static struct workqueue_struct *ap_work_queue; | 71 | static struct workqueue_struct *ap_work_queue; |
72 | static struct timer_list ap_config_timer; | 72 | static struct timer_list ap_config_timer; |
73 | static int ap_config_time = AP_CONFIG_TIME; | 73 | static int ap_config_time = AP_CONFIG_TIME; |
74 | static DECLARE_WORK(ap_config_work, ap_scan_bus, NULL); | 74 | static DECLARE_WORK(ap_config_work, ap_scan_bus); |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * Tasklet & timer for AP request polling. | 77 | * Tasklet & timer for AP request polling. |
@@ -732,7 +732,7 @@ static void ap_device_release(struct device *dev) | |||
732 | kfree(ap_dev); | 732 | kfree(ap_dev); |
733 | } | 733 | } |
734 | 734 | ||
735 | static void ap_scan_bus(void *data) | 735 | static void ap_scan_bus(struct work_struct *unused) |
736 | { | 736 | { |
737 | struct ap_device *ap_dev; | 737 | struct ap_device *ap_dev; |
738 | struct device *dev; | 738 | struct device *dev; |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 08d4e47070bd..e5665b6743a1 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -67,7 +67,7 @@ static char debug_buffer[255]; | |||
67 | * Some prototypes. | 67 | * Some prototypes. |
68 | */ | 68 | */ |
69 | static void lcs_tasklet(unsigned long); | 69 | static void lcs_tasklet(unsigned long); |
70 | static void lcs_start_kernel_thread(struct lcs_card *card); | 70 | static void lcs_start_kernel_thread(struct work_struct *); |
71 | static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); | 71 | static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); |
72 | static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); | 72 | static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); |
73 | static int lcs_recovery(void *ptr); | 73 | static int lcs_recovery(void *ptr); |
@@ -1724,8 +1724,9 @@ lcs_stopcard(struct lcs_card *card) | |||
1724 | * Kernel Thread helper functions for LGW initiated commands | 1724 | * Kernel Thread helper functions for LGW initiated commands |
1725 | */ | 1725 | */ |
1726 | static void | 1726 | static void |
1727 | lcs_start_kernel_thread(struct lcs_card *card) | 1727 | lcs_start_kernel_thread(struct work_struct *work) |
1728 | { | 1728 | { |
1729 | struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); | ||
1729 | LCS_DBF_TEXT(5, trace, "krnthrd"); | 1730 | LCS_DBF_TEXT(5, trace, "krnthrd"); |
1730 | if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) | 1731 | if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) |
1731 | kernel_thread(lcs_recovery, (void *) card, SIGCHLD); | 1732 | kernel_thread(lcs_recovery, (void *) card, SIGCHLD); |
@@ -2053,8 +2054,7 @@ lcs_probe_device(struct ccwgroup_device *ccwgdev) | |||
2053 | ccwgdev->cdev[0]->handler = lcs_irq; | 2054 | ccwgdev->cdev[0]->handler = lcs_irq; |
2054 | ccwgdev->cdev[1]->handler = lcs_irq; | 2055 | ccwgdev->cdev[1]->handler = lcs_irq; |
2055 | card->gdev = ccwgdev; | 2056 | card->gdev = ccwgdev; |
2056 | INIT_WORK(&card->kernel_thread_starter, | 2057 | INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); |
2057 | (void *) lcs_start_kernel_thread, card); | ||
2058 | card->thread_start_mask = 0; | 2058 | card->thread_start_mask = 0; |
2059 | card->thread_allowed_mask = 0; | 2059 | card->thread_allowed_mask = 0; |
2060 | card->thread_running_mask = 0; | 2060 | card->thread_running_mask = 0; |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 7fdc5272c446..2bde4f1fb9c2 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -1039,8 +1039,9 @@ qeth_do_start_thread(struct qeth_card *card, unsigned long thread) | |||
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | static void | 1041 | static void |
1042 | qeth_start_kernel_thread(struct qeth_card *card) | 1042 | qeth_start_kernel_thread(struct work_struct *work) |
1043 | { | 1043 | { |
1044 | struct qeth_card *card = container_of(work, struct qeth_card, kernel_thread_starter); | ||
1044 | QETH_DBF_TEXT(trace , 2, "strthrd"); | 1045 | QETH_DBF_TEXT(trace , 2, "strthrd"); |
1045 | 1046 | ||
1046 | if (card->read.state != CH_STATE_UP && | 1047 | if (card->read.state != CH_STATE_UP && |
@@ -1103,8 +1104,7 @@ qeth_setup_card(struct qeth_card *card) | |||
1103 | card->thread_start_mask = 0; | 1104 | card->thread_start_mask = 0; |
1104 | card->thread_allowed_mask = 0; | 1105 | card->thread_allowed_mask = 0; |
1105 | card->thread_running_mask = 0; | 1106 | card->thread_running_mask = 0; |
1106 | INIT_WORK(&card->kernel_thread_starter, | 1107 | INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); |
1107 | (void *)qeth_start_kernel_thread,card); | ||
1108 | INIT_LIST_HEAD(&card->ip_list); | 1108 | INIT_LIST_HEAD(&card->ip_list); |
1109 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | 1109 | card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); |
1110 | if (!card->ip_tbd_list) { | 1110 | if (!card->ip_tbd_list) { |
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index a6aa91072880..bb3cb3360541 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c | |||
@@ -849,7 +849,7 @@ static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags) | |||
849 | hostdata->issue_queue = NULL; | 849 | hostdata->issue_queue = NULL; |
850 | hostdata->disconnected_queue = NULL; | 850 | hostdata->disconnected_queue = NULL; |
851 | 851 | ||
852 | INIT_WORK(&hostdata->coroutine, NCR5380_main, hostdata); | 852 | INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main); |
853 | 853 | ||
854 | #ifdef NCR5380_STATS | 854 | #ifdef NCR5380_STATS |
855 | for (i = 0; i < 8; ++i) { | 855 | for (i = 0; i < 8; ++i) { |
@@ -1016,7 +1016,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1016 | 1016 | ||
1017 | /* Run the coroutine if it isn't already running. */ | 1017 | /* Run the coroutine if it isn't already running. */ |
1018 | /* Kick off command processing */ | 1018 | /* Kick off command processing */ |
1019 | schedule_work(&hostdata->coroutine); | 1019 | schedule_delayed_work(&hostdata->coroutine, 0); |
1020 | return 0; | 1020 | return 0; |
1021 | } | 1021 | } |
1022 | 1022 | ||
@@ -1033,9 +1033,10 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) | |||
1033 | * host lock and called routines may take the isa dma lock. | 1033 | * host lock and called routines may take the isa dma lock. |
1034 | */ | 1034 | */ |
1035 | 1035 | ||
1036 | static void NCR5380_main(void *p) | 1036 | static void NCR5380_main(struct work_struct *work) |
1037 | { | 1037 | { |
1038 | struct NCR5380_hostdata *hostdata = p; | 1038 | struct NCR5380_hostdata *hostdata = |
1039 | container_of(work, struct NCR5380_hostdata, coroutine.work); | ||
1039 | struct Scsi_Host *instance = hostdata->host; | 1040 | struct Scsi_Host *instance = hostdata->host; |
1040 | Scsi_Cmnd *tmp, *prev; | 1041 | Scsi_Cmnd *tmp, *prev; |
1041 | int done; | 1042 | int done; |
@@ -1221,7 +1222,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id) | |||
1221 | } /* if BASR_IRQ */ | 1222 | } /* if BASR_IRQ */ |
1222 | spin_unlock_irqrestore(instance->host_lock, flags); | 1223 | spin_unlock_irqrestore(instance->host_lock, flags); |
1223 | if(!done) | 1224 | if(!done) |
1224 | schedule_work(&hostdata->coroutine); | 1225 | schedule_delayed_work(&hostdata->coroutine, 0); |
1225 | } while (!done); | 1226 | } while (!done); |
1226 | return IRQ_HANDLED; | 1227 | return IRQ_HANDLED; |
1227 | } | 1228 | } |
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 1bc73de496b0..713a108c02ef 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h | |||
@@ -271,7 +271,7 @@ struct NCR5380_hostdata { | |||
271 | unsigned long time_expires; /* in jiffies, set prior to sleeping */ | 271 | unsigned long time_expires; /* in jiffies, set prior to sleeping */ |
272 | int select_time; /* timer in select for target response */ | 272 | int select_time; /* timer in select for target response */ |
273 | volatile Scsi_Cmnd *selecting; | 273 | volatile Scsi_Cmnd *selecting; |
274 | struct work_struct coroutine; /* our co-routine */ | 274 | struct delayed_work coroutine; /* our co-routine */ |
275 | #ifdef NCR5380_STATS | 275 | #ifdef NCR5380_STATS |
276 | unsigned timebase; /* Base for time calcs */ | 276 | unsigned timebase; /* Base for time calcs */ |
277 | long time_read[8]; /* time to do reads */ | 277 | long time_read[8]; /* time to do reads */ |
@@ -298,7 +298,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance); | |||
298 | #ifndef DONT_USE_INTR | 298 | #ifndef DONT_USE_INTR |
299 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); | 299 | static irqreturn_t NCR5380_intr(int irq, void *dev_id); |
300 | #endif | 300 | #endif |
301 | static void NCR5380_main(void *ptr); | 301 | static void NCR5380_main(struct work_struct *work); |
302 | static void NCR5380_print_options(struct Scsi_Host *instance); | 302 | static void NCR5380_print_options(struct Scsi_Host *instance); |
303 | #ifdef NDEBUG | 303 | #ifdef NDEBUG |
304 | static void NCR5380_print_phase(struct Scsi_Host *instance); | 304 | static void NCR5380_print_phase(struct Scsi_Host *instance); |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 306f46b85a55..0cec742d12e9 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -1443,7 +1443,7 @@ static struct work_struct aha152x_tq; | |||
1443 | * Run service completions on the card with interrupts enabled. | 1443 | * Run service completions on the card with interrupts enabled. |
1444 | * | 1444 | * |
1445 | */ | 1445 | */ |
1446 | static void run(void) | 1446 | static void run(struct work_struct *work) |
1447 | { | 1447 | { |
1448 | struct aha152x_hostdata *hd; | 1448 | struct aha152x_hostdata *hd; |
1449 | 1449 | ||
@@ -1499,7 +1499,7 @@ static irqreturn_t intr(int irqno, void *dev_id) | |||
1499 | HOSTDATA(shpnt)->service=1; | 1499 | HOSTDATA(shpnt)->service=1; |
1500 | 1500 | ||
1501 | /* Poke the BH handler */ | 1501 | /* Poke the BH handler */ |
1502 | INIT_WORK(&aha152x_tq, (void *) run, NULL); | 1502 | INIT_WORK(&aha152x_tq, run); |
1503 | schedule_work(&aha152x_tq); | 1503 | schedule_work(&aha152x_tq); |
1504 | } | 1504 | } |
1505 | DO_UNLOCK(flags); | 1505 | DO_UNLOCK(flags); |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 14d5d8c2ee13..75ed6b0569d1 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -414,9 +414,10 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id) | |||
414 | } | 414 | } |
415 | 415 | ||
416 | /* hard reset a phy later */ | 416 | /* hard reset a phy later */ |
417 | static void do_phy_reset_later(void *data) | 417 | static void do_phy_reset_later(struct work_struct *work) |
418 | { | 418 | { |
419 | struct sas_phy *sas_phy = data; | 419 | struct sas_phy *sas_phy = |
420 | container_of(work, struct sas_phy, reset_work); | ||
420 | int error; | 421 | int error; |
421 | 422 | ||
422 | ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, | 423 | ASD_DPRINTK("%s: About to hard reset phy %d\n", __FUNCTION__, |
@@ -430,7 +431,7 @@ static void do_phy_reset_later(void *data) | |||
430 | 431 | ||
431 | static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) | 432 | static void phy_reset_later(struct sas_phy *sas_phy, struct Scsi_Host *shost) |
432 | { | 433 | { |
433 | INIT_WORK(&sas_phy->reset_work, do_phy_reset_later, sas_phy); | 434 | INIT_WORK(&sas_phy->reset_work, do_phy_reset_later); |
434 | queue_work(shost->work_q, &sas_phy->reset_work); | 435 | queue_work(shost->work_q, &sas_phy->reset_work); |
435 | } | 436 | } |
436 | 437 | ||
@@ -442,7 +443,7 @@ static void task_kill_later(struct asd_ascb *ascb) | |||
442 | struct Scsi_Host *shost = sas_ha->core.shost; | 443 | struct Scsi_Host *shost = sas_ha->core.shost; |
443 | struct sas_task *task = ascb->uldd_task; | 444 | struct sas_task *task = ascb->uldd_task; |
444 | 445 | ||
445 | INIT_WORK(&task->abort_work, (void (*)(void *))sas_task_abort, task); | 446 | INIT_WORK(&task->abort_work, sas_task_abort); |
446 | queue_work(shost->work_q, &task->abort_work); | 447 | queue_work(shost->work_q, &task->abort_work); |
447 | } | 448 | } |
448 | 449 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 0e74174a1b37..e28260f05d6b 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -67,6 +67,7 @@ struct vio_port { | |||
67 | 67 | ||
68 | unsigned long liobn; | 68 | unsigned long liobn; |
69 | unsigned long riobn; | 69 | unsigned long riobn; |
70 | struct srp_target *target; | ||
70 | }; | 71 | }; |
71 | 72 | ||
72 | static struct workqueue_struct *vtgtd; | 73 | static struct workqueue_struct *vtgtd; |
@@ -685,10 +686,10 @@ static inline struct viosrp_crq *next_crq(struct crq_queue *queue) | |||
685 | return crq; | 686 | return crq; |
686 | } | 687 | } |
687 | 688 | ||
688 | static void handle_crq(void *data) | 689 | static void handle_crq(struct work_struct *work) |
689 | { | 690 | { |
690 | struct srp_target *target = (struct srp_target *) data; | 691 | struct vio_port *vport = container_of(work, struct vio_port, crq_work); |
691 | struct vio_port *vport = target_to_port(target); | 692 | struct srp_target *target = vport->target; |
692 | struct viosrp_crq *crq; | 693 | struct viosrp_crq *crq; |
693 | int done = 0; | 694 | int done = 0; |
694 | 695 | ||
@@ -822,6 +823,7 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
822 | target->shost = shost; | 823 | target->shost = shost; |
823 | vport->dma_dev = dev; | 824 | vport->dma_dev = dev; |
824 | target->ldata = vport; | 825 | target->ldata = vport; |
826 | vport->target = target; | ||
825 | err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT, | 827 | err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT, |
826 | SRP_MAX_IU_LEN); | 828 | SRP_MAX_IU_LEN); |
827 | if (err) | 829 | if (err) |
@@ -837,7 +839,7 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
837 | vport->liobn = dma[0]; | 839 | vport->liobn = dma[0]; |
838 | vport->riobn = dma[5]; | 840 | vport->riobn = dma[5]; |
839 | 841 | ||
840 | INIT_WORK(&vport->crq_work, handle_crq, target); | 842 | INIT_WORK(&vport->crq_work, handle_crq); |
841 | 843 | ||
842 | err = crq_queue_create(&vport->crq_queue, target); | 844 | err = crq_queue_create(&vport->crq_queue, target); |
843 | if (err) | 845 | if (err) |
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index e31f6122106f..0464c182c577 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c | |||
@@ -36,7 +36,7 @@ typedef struct { | |||
36 | int base_hi; /* Hi Base address for ECP-ISA chipset */ | 36 | int base_hi; /* Hi Base address for ECP-ISA chipset */ |
37 | int mode; /* Transfer mode */ | 37 | int mode; /* Transfer mode */ |
38 | struct scsi_cmnd *cur_cmd; /* Current queued command */ | 38 | struct scsi_cmnd *cur_cmd; /* Current queued command */ |
39 | struct work_struct imm_tq; /* Polling interrupt stuff */ | 39 | struct delayed_work imm_tq; /* Polling interrupt stuff */ |
40 | unsigned long jstart; /* Jiffies at start */ | 40 | unsigned long jstart; /* Jiffies at start */ |
41 | unsigned failed:1; /* Failure flag */ | 41 | unsigned failed:1; /* Failure flag */ |
42 | unsigned dp:1; /* Data phase present */ | 42 | unsigned dp:1; /* Data phase present */ |
@@ -733,9 +733,9 @@ static int imm_completion(struct scsi_cmnd *cmd) | |||
733 | * the scheduler's task queue to generate a stream of call-backs and | 733 | * the scheduler's task queue to generate a stream of call-backs and |
734 | * complete the request when the drive is ready. | 734 | * complete the request when the drive is ready. |
735 | */ | 735 | */ |
736 | static void imm_interrupt(void *data) | 736 | static void imm_interrupt(struct work_struct *work) |
737 | { | 737 | { |
738 | imm_struct *dev = (imm_struct *) data; | 738 | imm_struct *dev = container_of(work, imm_struct, imm_tq.work); |
739 | struct scsi_cmnd *cmd = dev->cur_cmd; | 739 | struct scsi_cmnd *cmd = dev->cur_cmd; |
740 | struct Scsi_Host *host = cmd->device->host; | 740 | struct Scsi_Host *host = cmd->device->host; |
741 | unsigned long flags; | 741 | unsigned long flags; |
@@ -745,7 +745,6 @@ static void imm_interrupt(void *data) | |||
745 | return; | 745 | return; |
746 | } | 746 | } |
747 | if (imm_engine(dev, cmd)) { | 747 | if (imm_engine(dev, cmd)) { |
748 | INIT_WORK(&dev->imm_tq, imm_interrupt, (void *) dev); | ||
749 | schedule_delayed_work(&dev->imm_tq, 1); | 748 | schedule_delayed_work(&dev->imm_tq, 1); |
750 | return; | 749 | return; |
751 | } | 750 | } |
@@ -953,8 +952,7 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, | |||
953 | cmd->result = DID_ERROR << 16; /* default return code */ | 952 | cmd->result = DID_ERROR << 16; /* default return code */ |
954 | cmd->SCp.phase = 0; /* bus free */ | 953 | cmd->SCp.phase = 0; /* bus free */ |
955 | 954 | ||
956 | INIT_WORK(&dev->imm_tq, imm_interrupt, dev); | 955 | schedule_delayed_work(&dev->imm_tq, 0); |
957 | schedule_work(&dev->imm_tq); | ||
958 | 956 | ||
959 | imm_pb_claim(dev); | 957 | imm_pb_claim(dev); |
960 | 958 | ||
@@ -1225,7 +1223,7 @@ static int __imm_attach(struct parport *pb) | |||
1225 | else | 1223 | else |
1226 | ports = 8; | 1224 | ports = 8; |
1227 | 1225 | ||
1228 | INIT_WORK(&dev->imm_tq, imm_interrupt, dev); | 1226 | INIT_DELAYED_WORK(&dev->imm_tq, imm_interrupt); |
1229 | 1227 | ||
1230 | err = -ENOMEM; | 1228 | err = -ENOMEM; |
1231 | host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); | 1229 | host = scsi_host_alloc(&imm_template, sizeof(imm_struct *)); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2d83fbb806a5..ccd4dafce8e2 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -2307,7 +2307,7 @@ static void ipr_release_dump(struct kref *kref) | |||
2307 | 2307 | ||
2308 | /** | 2308 | /** |
2309 | * ipr_worker_thread - Worker thread | 2309 | * ipr_worker_thread - Worker thread |
2310 | * @data: ioa config struct | 2310 | * @work: ioa config struct |
2311 | * | 2311 | * |
2312 | * Called at task level from a work thread. This function takes care | 2312 | * Called at task level from a work thread. This function takes care |
2313 | * of adding and removing device from the mid-layer as configuration | 2313 | * of adding and removing device from the mid-layer as configuration |
@@ -2316,13 +2316,14 @@ static void ipr_release_dump(struct kref *kref) | |||
2316 | * Return value: | 2316 | * Return value: |
2317 | * nothing | 2317 | * nothing |
2318 | **/ | 2318 | **/ |
2319 | static void ipr_worker_thread(void *data) | 2319 | static void ipr_worker_thread(struct work_struct *work) |
2320 | { | 2320 | { |
2321 | unsigned long lock_flags; | 2321 | unsigned long lock_flags; |
2322 | struct ipr_resource_entry *res; | 2322 | struct ipr_resource_entry *res; |
2323 | struct scsi_device *sdev; | 2323 | struct scsi_device *sdev; |
2324 | struct ipr_dump *dump; | 2324 | struct ipr_dump *dump; |
2325 | struct ipr_ioa_cfg *ioa_cfg = data; | 2325 | struct ipr_ioa_cfg *ioa_cfg = |
2326 | container_of(work, struct ipr_ioa_cfg, work_q); | ||
2326 | u8 bus, target, lun; | 2327 | u8 bus, target, lun; |
2327 | int did_work; | 2328 | int did_work; |
2328 | 2329 | ||
@@ -7121,7 +7122,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, | |||
7121 | INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); | 7122 | INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); |
7122 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); | 7123 | INIT_LIST_HEAD(&ioa_cfg->free_res_q); |
7123 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | 7124 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); |
7124 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); | 7125 | INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); |
7125 | init_waitqueue_head(&ioa_cfg->reset_wait_q); | 7126 | init_waitqueue_head(&ioa_cfg->reset_wait_q); |
7126 | ioa_cfg->sdt_state = INACTIVE; | 7127 | ioa_cfg->sdt_state = INACTIVE; |
7127 | if (ipr_enable_cache) | 7128 | if (ipr_enable_cache) |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 5d8862189485..e11b23c641e2 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -719,9 +719,10 @@ again: | |||
719 | return rc; | 719 | return rc; |
720 | } | 720 | } |
721 | 721 | ||
722 | static void iscsi_xmitworker(void *data) | 722 | static void iscsi_xmitworker(struct work_struct *work) |
723 | { | 723 | { |
724 | struct iscsi_conn *conn = data; | 724 | struct iscsi_conn *conn = |
725 | container_of(work, struct iscsi_conn, xmitwork); | ||
725 | int rc; | 726 | int rc; |
726 | /* | 727 | /* |
727 | * serialize Xmit worker on a per-connection basis. | 728 | * serialize Xmit worker on a per-connection basis. |
@@ -1512,7 +1513,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx) | |||
1512 | if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) | 1513 | if (conn->mgmtqueue == ERR_PTR(-ENOMEM)) |
1513 | goto mgmtqueue_alloc_fail; | 1514 | goto mgmtqueue_alloc_fail; |
1514 | 1515 | ||
1515 | INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn); | 1516 | INIT_WORK(&conn->xmitwork, iscsi_xmitworker); |
1516 | 1517 | ||
1517 | /* allocate login_mtask used for the login/text sequences */ | 1518 | /* allocate login_mtask used for the login/text sequences */ |
1518 | spin_lock_bh(&session->lock); | 1519 | spin_lock_bh(&session->lock); |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index d977bd492d8d..fb7df7b75811 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -647,10 +647,12 @@ void sas_unregister_domain_devices(struct asd_sas_port *port) | |||
647 | * Discover process only interrogates devices in order to discover the | 647 | * Discover process only interrogates devices in order to discover the |
648 | * domain. | 648 | * domain. |
649 | */ | 649 | */ |
650 | static void sas_discover_domain(void *data) | 650 | static void sas_discover_domain(struct work_struct *work) |
651 | { | 651 | { |
652 | int error = 0; | 652 | int error = 0; |
653 | struct asd_sas_port *port = data; | 653 | struct sas_discovery_event *ev = |
654 | container_of(work, struct sas_discovery_event, work); | ||
655 | struct asd_sas_port *port = ev->port; | ||
654 | 656 | ||
655 | sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, | 657 | sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, |
656 | &port->disc.pending); | 658 | &port->disc.pending); |
@@ -692,10 +694,12 @@ static void sas_discover_domain(void *data) | |||
692 | current->pid, error); | 694 | current->pid, error); |
693 | } | 695 | } |
694 | 696 | ||
695 | static void sas_revalidate_domain(void *data) | 697 | static void sas_revalidate_domain(struct work_struct *work) |
696 | { | 698 | { |
697 | int res = 0; | 699 | int res = 0; |
698 | struct asd_sas_port *port = data; | 700 | struct sas_discovery_event *ev = |
701 | container_of(work, struct sas_discovery_event, work); | ||
702 | struct asd_sas_port *port = ev->port; | ||
699 | 703 | ||
700 | sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, | 704 | sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, |
701 | &port->disc.pending); | 705 | &port->disc.pending); |
@@ -722,7 +726,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) | |||
722 | BUG_ON(ev >= DISC_NUM_EVENTS); | 726 | BUG_ON(ev >= DISC_NUM_EVENTS); |
723 | 727 | ||
724 | sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, | 728 | sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, |
725 | &disc->disc_work[ev], port->ha->core.shost); | 729 | &disc->disc_work[ev].work, port->ha->core.shost); |
726 | 730 | ||
727 | return 0; | 731 | return 0; |
728 | } | 732 | } |
@@ -737,13 +741,15 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) | |||
737 | { | 741 | { |
738 | int i; | 742 | int i; |
739 | 743 | ||
740 | static void (*sas_event_fns[DISC_NUM_EVENTS])(void *) = { | 744 | static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { |
741 | [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, | 745 | [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, |
742 | [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, | 746 | [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, |
743 | }; | 747 | }; |
744 | 748 | ||
745 | spin_lock_init(&disc->disc_event_lock); | 749 | spin_lock_init(&disc->disc_event_lock); |
746 | disc->pending = 0; | 750 | disc->pending = 0; |
747 | for (i = 0; i < DISC_NUM_EVENTS; i++) | 751 | for (i = 0; i < DISC_NUM_EVENTS; i++) { |
748 | INIT_WORK(&disc->disc_work[i], sas_event_fns[i], port); | 752 | INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); |
753 | disc->disc_work[i].port = port; | ||
754 | } | ||
749 | } | 755 | } |
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 19110ed1c89c..d83392ee6823 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c | |||
@@ -31,7 +31,7 @@ static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) | |||
31 | BUG_ON(event >= HA_NUM_EVENTS); | 31 | BUG_ON(event >= HA_NUM_EVENTS); |
32 | 32 | ||
33 | sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, | 33 | sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, |
34 | &sas_ha->ha_events[event], sas_ha->core.shost); | 34 | &sas_ha->ha_events[event].work, sas_ha->core.shost); |
35 | } | 35 | } |
36 | 36 | ||
37 | static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) | 37 | static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) |
@@ -41,7 +41,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event) | |||
41 | BUG_ON(event >= PORT_NUM_EVENTS); | 41 | BUG_ON(event >= PORT_NUM_EVENTS); |
42 | 42 | ||
43 | sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, | 43 | sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, |
44 | &phy->port_events[event], ha->core.shost); | 44 | &phy->port_events[event].work, ha->core.shost); |
45 | } | 45 | } |
46 | 46 | ||
47 | static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) | 47 | static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) |
@@ -51,12 +51,12 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) | |||
51 | BUG_ON(event >= PHY_NUM_EVENTS); | 51 | BUG_ON(event >= PHY_NUM_EVENTS); |
52 | 52 | ||
53 | sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, | 53 | sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, |
54 | &phy->phy_events[event], ha->core.shost); | 54 | &phy->phy_events[event].work, ha->core.shost); |
55 | } | 55 | } |
56 | 56 | ||
57 | int sas_init_events(struct sas_ha_struct *sas_ha) | 57 | int sas_init_events(struct sas_ha_struct *sas_ha) |
58 | { | 58 | { |
59 | static void (*sas_ha_event_fns[HA_NUM_EVENTS])(void *) = { | 59 | static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = { |
60 | [HAE_RESET] = sas_hae_reset, | 60 | [HAE_RESET] = sas_hae_reset, |
61 | }; | 61 | }; |
62 | 62 | ||
@@ -64,8 +64,10 @@ int sas_init_events(struct sas_ha_struct *sas_ha) | |||
64 | 64 | ||
65 | spin_lock_init(&sas_ha->event_lock); | 65 | spin_lock_init(&sas_ha->event_lock); |
66 | 66 | ||
67 | for (i = 0; i < HA_NUM_EVENTS; i++) | 67 | for (i = 0; i < HA_NUM_EVENTS; i++) { |
68 | INIT_WORK(&sas_ha->ha_events[i], sas_ha_event_fns[i], sas_ha); | 68 | INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); |
69 | sas_ha->ha_events[i].ha = sas_ha; | ||
70 | } | ||
69 | 71 | ||
70 | sas_ha->notify_ha_event = notify_ha_event; | 72 | sas_ha->notify_ha_event = notify_ha_event; |
71 | sas_ha->notify_port_event = notify_port_event; | 73 | sas_ha->notify_port_event = notify_port_event; |
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 0fb347b4b1a2..d65bc4e0f214 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c | |||
@@ -65,9 +65,11 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) | |||
65 | 65 | ||
66 | /* ---------- HA events ---------- */ | 66 | /* ---------- HA events ---------- */ |
67 | 67 | ||
68 | void sas_hae_reset(void *data) | 68 | void sas_hae_reset(struct work_struct *work) |
69 | { | 69 | { |
70 | struct sas_ha_struct *ha = data; | 70 | struct sas_ha_event *ev = |
71 | container_of(work, struct sas_ha_event, work); | ||
72 | struct sas_ha_struct *ha = ev->ha; | ||
71 | 73 | ||
72 | sas_begin_event(HAE_RESET, &ha->event_lock, | 74 | sas_begin_event(HAE_RESET, &ha->event_lock, |
73 | &ha->pending); | 75 | &ha->pending); |
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index bffcee474921..137d7e496b6d 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h | |||
@@ -60,11 +60,11 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha); | |||
60 | 60 | ||
61 | void sas_deform_port(struct asd_sas_phy *phy); | 61 | void sas_deform_port(struct asd_sas_phy *phy); |
62 | 62 | ||
63 | void sas_porte_bytes_dmaed(void *); | 63 | void sas_porte_bytes_dmaed(struct work_struct *work); |
64 | void sas_porte_broadcast_rcvd(void *); | 64 | void sas_porte_broadcast_rcvd(struct work_struct *work); |
65 | void sas_porte_link_reset_err(void *); | 65 | void sas_porte_link_reset_err(struct work_struct *work); |
66 | void sas_porte_timer_event(void *); | 66 | void sas_porte_timer_event(struct work_struct *work); |
67 | void sas_porte_hard_reset(void *); | 67 | void sas_porte_hard_reset(struct work_struct *work); |
68 | 68 | ||
69 | int sas_notify_lldd_dev_found(struct domain_device *); | 69 | int sas_notify_lldd_dev_found(struct domain_device *); |
70 | void sas_notify_lldd_dev_gone(struct domain_device *); | 70 | void sas_notify_lldd_dev_gone(struct domain_device *); |
@@ -75,7 +75,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy); | |||
75 | 75 | ||
76 | struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); | 76 | struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); |
77 | 77 | ||
78 | void sas_hae_reset(void *); | 78 | void sas_hae_reset(struct work_struct *work); |
79 | 79 | ||
80 | static inline void sas_queue_event(int event, spinlock_t *lock, | 80 | static inline void sas_queue_event(int event, spinlock_t *lock, |
81 | unsigned long *pending, | 81 | unsigned long *pending, |
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 9340cdbae4a3..b459c4b635b1 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c | |||
@@ -30,9 +30,11 @@ | |||
30 | 30 | ||
31 | /* ---------- Phy events ---------- */ | 31 | /* ---------- Phy events ---------- */ |
32 | 32 | ||
33 | static void sas_phye_loss_of_signal(void *data) | 33 | static void sas_phye_loss_of_signal(struct work_struct *work) |
34 | { | 34 | { |
35 | struct asd_sas_phy *phy = data; | 35 | struct asd_sas_event *ev = |
36 | container_of(work, struct asd_sas_event, work); | ||
37 | struct asd_sas_phy *phy = ev->phy; | ||
36 | 38 | ||
37 | sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, | 39 | sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, |
38 | &phy->phy_events_pending); | 40 | &phy->phy_events_pending); |
@@ -40,18 +42,22 @@ static void sas_phye_loss_of_signal(void *data) | |||
40 | sas_deform_port(phy); | 42 | sas_deform_port(phy); |
41 | } | 43 | } |
42 | 44 | ||
43 | static void sas_phye_oob_done(void *data) | 45 | static void sas_phye_oob_done(struct work_struct *work) |
44 | { | 46 | { |
45 | struct asd_sas_phy *phy = data; | 47 | struct asd_sas_event *ev = |
48 | container_of(work, struct asd_sas_event, work); | ||
49 | struct asd_sas_phy *phy = ev->phy; | ||
46 | 50 | ||
47 | sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, | 51 | sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, |
48 | &phy->phy_events_pending); | 52 | &phy->phy_events_pending); |
49 | phy->error = 0; | 53 | phy->error = 0; |
50 | } | 54 | } |
51 | 55 | ||
52 | static void sas_phye_oob_error(void *data) | 56 | static void sas_phye_oob_error(struct work_struct *work) |
53 | { | 57 | { |
54 | struct asd_sas_phy *phy = data; | 58 | struct asd_sas_event *ev = |
59 | container_of(work, struct asd_sas_event, work); | ||
60 | struct asd_sas_phy *phy = ev->phy; | ||
55 | struct sas_ha_struct *sas_ha = phy->ha; | 61 | struct sas_ha_struct *sas_ha = phy->ha; |
56 | struct asd_sas_port *port = phy->port; | 62 | struct asd_sas_port *port = phy->port; |
57 | struct sas_internal *i = | 63 | struct sas_internal *i = |
@@ -80,9 +86,11 @@ static void sas_phye_oob_error(void *data) | |||
80 | } | 86 | } |
81 | } | 87 | } |
82 | 88 | ||
83 | static void sas_phye_spinup_hold(void *data) | 89 | static void sas_phye_spinup_hold(struct work_struct *work) |
84 | { | 90 | { |
85 | struct asd_sas_phy *phy = data; | 91 | struct asd_sas_event *ev = |
92 | container_of(work, struct asd_sas_event, work); | ||
93 | struct asd_sas_phy *phy = ev->phy; | ||
86 | struct sas_ha_struct *sas_ha = phy->ha; | 94 | struct sas_ha_struct *sas_ha = phy->ha; |
87 | struct sas_internal *i = | 95 | struct sas_internal *i = |
88 | to_sas_internal(sas_ha->core.shost->transportt); | 96 | to_sas_internal(sas_ha->core.shost->transportt); |
@@ -100,14 +108,14 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
100 | { | 108 | { |
101 | int i; | 109 | int i; |
102 | 110 | ||
103 | static void (*sas_phy_event_fns[PHY_NUM_EVENTS])(void *) = { | 111 | static const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { |
104 | [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, | 112 | [PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal, |
105 | [PHYE_OOB_DONE] = sas_phye_oob_done, | 113 | [PHYE_OOB_DONE] = sas_phye_oob_done, |
106 | [PHYE_OOB_ERROR] = sas_phye_oob_error, | 114 | [PHYE_OOB_ERROR] = sas_phye_oob_error, |
107 | [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, | 115 | [PHYE_SPINUP_HOLD] = sas_phye_spinup_hold, |
108 | }; | 116 | }; |
109 | 117 | ||
110 | static void (*sas_port_event_fns[PORT_NUM_EVENTS])(void *) = { | 118 | static const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = { |
111 | [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, | 119 | [PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed, |
112 | [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, | 120 | [PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd, |
113 | [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, | 121 | [PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err, |
@@ -122,13 +130,18 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) | |||
122 | 130 | ||
123 | phy->error = 0; | 131 | phy->error = 0; |
124 | INIT_LIST_HEAD(&phy->port_phy_el); | 132 | INIT_LIST_HEAD(&phy->port_phy_el); |
125 | for (k = 0; k < PORT_NUM_EVENTS; k++) | 133 | for (k = 0; k < PORT_NUM_EVENTS; k++) { |
126 | INIT_WORK(&phy->port_events[k], sas_port_event_fns[k], | 134 | INIT_WORK(&phy->port_events[k].work, |
127 | phy); | 135 | sas_port_event_fns[k]); |
136 | phy->port_events[k].phy = phy; | ||
137 | } | ||
138 | |||
139 | for (k = 0; k < PHY_NUM_EVENTS; k++) { | ||
140 | INIT_WORK(&phy->phy_events[k].work, | ||
141 | sas_phy_event_fns[k]); | ||
142 | phy->phy_events[k].phy = phy; | ||
143 | } | ||
128 | 144 | ||
129 | for (k = 0; k < PHY_NUM_EVENTS; k++) | ||
130 | INIT_WORK(&phy->phy_events[k], sas_phy_event_fns[k], | ||
131 | phy); | ||
132 | phy->port = NULL; | 145 | phy->port = NULL; |
133 | phy->ha = sas_ha; | 146 | phy->ha = sas_ha; |
134 | spin_lock_init(&phy->frame_rcvd_lock); | 147 | spin_lock_init(&phy->frame_rcvd_lock); |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 253cdcf306a2..971c37ceecb4 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -181,9 +181,11 @@ void sas_deform_port(struct asd_sas_phy *phy) | |||
181 | 181 | ||
182 | /* ---------- SAS port events ---------- */ | 182 | /* ---------- SAS port events ---------- */ |
183 | 183 | ||
184 | void sas_porte_bytes_dmaed(void *data) | 184 | void sas_porte_bytes_dmaed(struct work_struct *work) |
185 | { | 185 | { |
186 | struct asd_sas_phy *phy = data; | 186 | struct asd_sas_event *ev = |
187 | container_of(work, struct asd_sas_event, work); | ||
188 | struct asd_sas_phy *phy = ev->phy; | ||
187 | 189 | ||
188 | sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, | 190 | sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, |
189 | &phy->port_events_pending); | 191 | &phy->port_events_pending); |
@@ -191,11 +193,13 @@ void sas_porte_bytes_dmaed(void *data) | |||
191 | sas_form_port(phy); | 193 | sas_form_port(phy); |
192 | } | 194 | } |
193 | 195 | ||
194 | void sas_porte_broadcast_rcvd(void *data) | 196 | void sas_porte_broadcast_rcvd(struct work_struct *work) |
195 | { | 197 | { |
198 | struct asd_sas_event *ev = | ||
199 | container_of(work, struct asd_sas_event, work); | ||
200 | struct asd_sas_phy *phy = ev->phy; | ||
196 | unsigned long flags; | 201 | unsigned long flags; |
197 | u32 prim; | 202 | u32 prim; |
198 | struct asd_sas_phy *phy = data; | ||
199 | 203 | ||
200 | sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, | 204 | sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, |
201 | &phy->port_events_pending); | 205 | &phy->port_events_pending); |
@@ -208,9 +212,11 @@ void sas_porte_broadcast_rcvd(void *data) | |||
208 | sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); | 212 | sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN); |
209 | } | 213 | } |
210 | 214 | ||
211 | void sas_porte_link_reset_err(void *data) | 215 | void sas_porte_link_reset_err(struct work_struct *work) |
212 | { | 216 | { |
213 | struct asd_sas_phy *phy = data; | 217 | struct asd_sas_event *ev = |
218 | container_of(work, struct asd_sas_event, work); | ||
219 | struct asd_sas_phy *phy = ev->phy; | ||
214 | 220 | ||
215 | sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, | 221 | sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, |
216 | &phy->port_events_pending); | 222 | &phy->port_events_pending); |
@@ -218,9 +224,11 @@ void sas_porte_link_reset_err(void *data) | |||
218 | sas_deform_port(phy); | 224 | sas_deform_port(phy); |
219 | } | 225 | } |
220 | 226 | ||
221 | void sas_porte_timer_event(void *data) | 227 | void sas_porte_timer_event(struct work_struct *work) |
222 | { | 228 | { |
223 | struct asd_sas_phy *phy = data; | 229 | struct asd_sas_event *ev = |
230 | container_of(work, struct asd_sas_event, work); | ||
231 | struct asd_sas_phy *phy = ev->phy; | ||
224 | 232 | ||
225 | sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, | 233 | sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, |
226 | &phy->port_events_pending); | 234 | &phy->port_events_pending); |
@@ -228,9 +236,11 @@ void sas_porte_timer_event(void *data) | |||
228 | sas_deform_port(phy); | 236 | sas_deform_port(phy); |
229 | } | 237 | } |
230 | 238 | ||
231 | void sas_porte_hard_reset(void *data) | 239 | void sas_porte_hard_reset(struct work_struct *work) |
232 | { | 240 | { |
233 | struct asd_sas_phy *phy = data; | 241 | struct asd_sas_event *ev = |
242 | container_of(work, struct asd_sas_event, work); | ||
243 | struct asd_sas_phy *phy = ev->phy; | ||
234 | 244 | ||
235 | sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, | 245 | sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, |
236 | &phy->port_events_pending); | 246 | &phy->port_events_pending); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index e064aac06b90..22672d54aa27 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -846,8 +846,10 @@ static int do_sas_task_abort(struct sas_task *task) | |||
846 | return -EAGAIN; | 846 | return -EAGAIN; |
847 | } | 847 | } |
848 | 848 | ||
849 | void sas_task_abort(struct sas_task *task) | 849 | void sas_task_abort(struct work_struct *work) |
850 | { | 850 | { |
851 | struct sas_task *task = | ||
852 | container_of(work, struct sas_task, abort_work); | ||
851 | int i; | 853 | int i; |
852 | 854 | ||
853 | for (i = 0; i < 5; i++) | 855 | for (i = 0; i < 5; i++) |
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c index dd67a68c5c23..c116a6ae3c54 100644 --- a/drivers/scsi/oktagon_esp.c +++ b/drivers/scsi/oktagon_esp.c | |||
@@ -72,12 +72,12 @@ static void dma_advance_sg(Scsi_Cmnd *); | |||
72 | static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x); | 72 | static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x); |
73 | 73 | ||
74 | #ifdef USE_BOTTOM_HALF | 74 | #ifdef USE_BOTTOM_HALF |
75 | static void dma_commit(void *opaque); | 75 | static void dma_commit(struct work_struct *unused); |
76 | 76 | ||
77 | long oktag_to_io(long *paddr, long *addr, long len); | 77 | long oktag_to_io(long *paddr, long *addr, long len); |
78 | long oktag_from_io(long *addr, long *paddr, long len); | 78 | long oktag_from_io(long *addr, long *paddr, long len); |
79 | 79 | ||
80 | static DECLARE_WORK(tq_fake_dma, dma_commit, NULL); | 80 | static DECLARE_WORK(tq_fake_dma, dma_commit); |
81 | 81 | ||
82 | #define DMA_MAXTRANSFER 0x8000 | 82 | #define DMA_MAXTRANSFER 0x8000 |
83 | 83 | ||
@@ -266,7 +266,7 @@ oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x) | |||
266 | */ | 266 | */ |
267 | 267 | ||
268 | 268 | ||
269 | static void dma_commit(void *opaque) | 269 | static void dma_commit(struct work_struct *unused) |
270 | { | 270 | { |
271 | long wait,len2,pos; | 271 | long wait,len2,pos; |
272 | struct NCR_ESP *esp; | 272 | struct NCR_ESP *esp; |
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 89a2a9f11e41..584ba4d6e038 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c | |||
@@ -31,7 +31,7 @@ typedef struct { | |||
31 | int base; /* Actual port address */ | 31 | int base; /* Actual port address */ |
32 | int mode; /* Transfer mode */ | 32 | int mode; /* Transfer mode */ |
33 | struct scsi_cmnd *cur_cmd; /* Current queued command */ | 33 | struct scsi_cmnd *cur_cmd; /* Current queued command */ |
34 | struct work_struct ppa_tq; /* Polling interrupt stuff */ | 34 | struct delayed_work ppa_tq; /* Polling interrupt stuff */ |
35 | unsigned long jstart; /* Jiffies at start */ | 35 | unsigned long jstart; /* Jiffies at start */ |
36 | unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ | 36 | unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */ |
37 | unsigned int failed:1; /* Failure flag */ | 37 | unsigned int failed:1; /* Failure flag */ |
@@ -627,9 +627,9 @@ static int ppa_completion(struct scsi_cmnd *cmd) | |||
627 | * the scheduler's task queue to generate a stream of call-backs and | 627 | * the scheduler's task queue to generate a stream of call-backs and |
628 | * complete the request when the drive is ready. | 628 | * complete the request when the drive is ready. |
629 | */ | 629 | */ |
630 | static void ppa_interrupt(void *data) | 630 | static void ppa_interrupt(struct work_struct *work) |
631 | { | 631 | { |
632 | ppa_struct *dev = (ppa_struct *) data; | 632 | ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work); |
633 | struct scsi_cmnd *cmd = dev->cur_cmd; | 633 | struct scsi_cmnd *cmd = dev->cur_cmd; |
634 | 634 | ||
635 | if (!cmd) { | 635 | if (!cmd) { |
@@ -637,7 +637,6 @@ static void ppa_interrupt(void *data) | |||
637 | return; | 637 | return; |
638 | } | 638 | } |
639 | if (ppa_engine(dev, cmd)) { | 639 | if (ppa_engine(dev, cmd)) { |
640 | dev->ppa_tq.data = (void *) dev; | ||
641 | schedule_delayed_work(&dev->ppa_tq, 1); | 640 | schedule_delayed_work(&dev->ppa_tq, 1); |
642 | return; | 641 | return; |
643 | } | 642 | } |
@@ -822,8 +821,7 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, | |||
822 | cmd->result = DID_ERROR << 16; /* default return code */ | 821 | cmd->result = DID_ERROR << 16; /* default return code */ |
823 | cmd->SCp.phase = 0; /* bus free */ | 822 | cmd->SCp.phase = 0; /* bus free */ |
824 | 823 | ||
825 | dev->ppa_tq.data = dev; | 824 | schedule_delayed_work(&dev->ppa_tq, 0); |
826 | schedule_work(&dev->ppa_tq); | ||
827 | 825 | ||
828 | ppa_pb_claim(dev); | 826 | ppa_pb_claim(dev); |
829 | 827 | ||
@@ -1086,7 +1084,7 @@ static int __ppa_attach(struct parport *pb) | |||
1086 | else | 1084 | else |
1087 | ports = 8; | 1085 | ports = 8; |
1088 | 1086 | ||
1089 | INIT_WORK(&dev->ppa_tq, ppa_interrupt, dev); | 1087 | INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt); |
1090 | 1088 | ||
1091 | err = -ENOMEM; | 1089 | err = -ENOMEM; |
1092 | host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); | 1090 | host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *)); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index db9d88e7bee7..969c9e431028 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -961,9 +961,10 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
961 | * the mid-level tries to sleep when it reaches the driver threshold | 961 | * the mid-level tries to sleep when it reaches the driver threshold |
962 | * "host->can_queue". This can cause a panic if we were in our interrupt code. | 962 | * "host->can_queue". This can cause a panic if we were in our interrupt code. |
963 | **/ | 963 | **/ |
964 | static void qla4xxx_do_dpc(void *data) | 964 | static void qla4xxx_do_dpc(struct work_struct *work) |
965 | { | 965 | { |
966 | struct scsi_qla_host *ha = (struct scsi_qla_host *) data; | 966 | struct scsi_qla_host *ha = |
967 | container_of(work, struct scsi_qla_host, dpc_work); | ||
967 | struct ddb_entry *ddb_entry, *dtemp; | 968 | struct ddb_entry *ddb_entry, *dtemp; |
968 | 969 | ||
969 | DEBUG2(printk("scsi%ld: %s: DPC handler waking up." | 970 | DEBUG2(printk("scsi%ld: %s: DPC handler waking up." |
@@ -1253,7 +1254,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1253 | ret = -ENODEV; | 1254 | ret = -ENODEV; |
1254 | goto probe_failed; | 1255 | goto probe_failed; |
1255 | } | 1256 | } |
1256 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc, ha); | 1257 | INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); |
1257 | 1258 | ||
1258 | ret = request_irq(pdev->irq, qla4xxx_intr_handler, | 1259 | ret = request_irq(pdev->irq, qla4xxx_intr_handler, |
1259 | SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); | 1260 | SA_INTERRUPT|SA_SHIRQ, "qla4xxx", ha); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 4d656148bd67..14e635aa44ce 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -437,9 +437,10 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, | |||
437 | goto retry; | 437 | goto retry; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void scsi_target_reap_usercontext(void *data) | 440 | static void scsi_target_reap_usercontext(struct work_struct *work) |
441 | { | 441 | { |
442 | struct scsi_target *starget = data; | 442 | struct scsi_target *starget = |
443 | container_of(work, struct scsi_target, ew.work); | ||
443 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); | 444 | struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); |
444 | unsigned long flags; | 445 | unsigned long flags; |
445 | 446 | ||
@@ -475,7 +476,7 @@ void scsi_target_reap(struct scsi_target *starget) | |||
475 | starget->state = STARGET_DEL; | 476 | starget->state = STARGET_DEL; |
476 | spin_unlock_irqrestore(shost->host_lock, flags); | 477 | spin_unlock_irqrestore(shost->host_lock, flags); |
477 | execute_in_process_context(scsi_target_reap_usercontext, | 478 | execute_in_process_context(scsi_target_reap_usercontext, |
478 | starget, &starget->ew); | 479 | &starget->ew); |
479 | return; | 480 | return; |
480 | 481 | ||
481 | } | 482 | } |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index e1a91665d1c2..259c90cfa367 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -218,16 +218,16 @@ static void scsi_device_cls_release(struct class_device *class_dev) | |||
218 | put_device(&sdev->sdev_gendev); | 218 | put_device(&sdev->sdev_gendev); |
219 | } | 219 | } |
220 | 220 | ||
221 | static void scsi_device_dev_release_usercontext(void *data) | 221 | static void scsi_device_dev_release_usercontext(struct work_struct *work) |
222 | { | 222 | { |
223 | struct device *dev = data; | ||
224 | struct scsi_device *sdev; | 223 | struct scsi_device *sdev; |
225 | struct device *parent; | 224 | struct device *parent; |
226 | struct scsi_target *starget; | 225 | struct scsi_target *starget; |
227 | unsigned long flags; | 226 | unsigned long flags; |
228 | 227 | ||
229 | parent = dev->parent; | 228 | sdev = container_of(work, struct scsi_device, ew.work); |
230 | sdev = to_scsi_device(dev); | 229 | |
230 | parent = sdev->sdev_gendev.parent; | ||
231 | starget = to_scsi_target(parent); | 231 | starget = to_scsi_target(parent); |
232 | 232 | ||
233 | spin_lock_irqsave(sdev->host->host_lock, flags); | 233 | spin_lock_irqsave(sdev->host->host_lock, flags); |
@@ -258,7 +258,7 @@ static void scsi_device_dev_release_usercontext(void *data) | |||
258 | static void scsi_device_dev_release(struct device *dev) | 258 | static void scsi_device_dev_release(struct device *dev) |
259 | { | 259 | { |
260 | struct scsi_device *sdp = to_scsi_device(dev); | 260 | struct scsi_device *sdp = to_scsi_device(dev); |
261 | execute_in_process_context(scsi_device_dev_release_usercontext, dev, | 261 | execute_in_process_context(scsi_device_dev_release_usercontext, |
262 | &sdp->ew); | 262 | &sdp->ew); |
263 | } | 263 | } |
264 | 264 | ||
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 39da5cd6fb6f..386dbae17b44 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -185,10 +185,11 @@ static void cmd_hashlist_del(struct scsi_cmnd *cmd) | |||
185 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); | 185 | spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags); |
186 | } | 186 | } |
187 | 187 | ||
188 | static void scsi_tgt_cmd_destroy(void *data) | 188 | static void scsi_tgt_cmd_destroy(struct work_struct *work) |
189 | { | 189 | { |
190 | struct scsi_cmnd *cmd = data; | 190 | struct scsi_tgt_cmd *tcmd = |
191 | struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data; | 191 | container_of(work, struct scsi_tgt_cmd, work); |
192 | struct scsi_cmnd *cmd = tcmd->rq->special; | ||
192 | 193 | ||
193 | dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, | 194 | dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction, |
194 | rq_data_dir(cmd->request)); | 195 | rq_data_dir(cmd->request)); |
@@ -214,6 +215,7 @@ static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd, | |||
214 | struct list_head *head; | 215 | struct list_head *head; |
215 | 216 | ||
216 | tcmd->tag = tag; | 217 | tcmd->tag = tag; |
218 | INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy); | ||
217 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); | 219 | spin_lock_irqsave(&qdata->cmd_hash_lock, flags); |
218 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; | 220 | head = &qdata->cmd_hash[cmd_hashfn(tag)]; |
219 | list_add(&tcmd->hash_list, head); | 221 | list_add(&tcmd->hash_list, head); |
@@ -303,7 +305,7 @@ void scsi_tgt_free_queue(struct Scsi_Host *shost) | |||
303 | cmd = tcmd->rq->special; | 305 | cmd = tcmd->rq->special; |
304 | 306 | ||
305 | shost->hostt->eh_abort_handler(cmd); | 307 | shost->hostt->eh_abort_handler(cmd); |
306 | scsi_tgt_cmd_destroy(cmd); | 308 | scsi_tgt_cmd_destroy(&tcmd->work); |
307 | } | 309 | } |
308 | } | 310 | } |
309 | EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); | 311 | EXPORT_SYMBOL_GPL(scsi_tgt_free_queue); |
@@ -347,7 +349,6 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd) | |||
347 | dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); | 349 | dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request)); |
348 | 350 | ||
349 | scsi_tgt_uspace_send_status(cmd, tcmd->tag); | 351 | scsi_tgt_uspace_send_status(cmd, tcmd->tag); |
350 | INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy, cmd); | ||
351 | queue_work(scsi_tgtd, &tcmd->work); | 352 | queue_work(scsi_tgtd, &tcmd->work); |
352 | } | 353 | } |
353 | 354 | ||
@@ -549,13 +550,15 @@ static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr, | |||
549 | 550 | ||
550 | static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) | 551 | static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
551 | { | 552 | { |
553 | struct scsi_tgt_cmd *tcmd; | ||
552 | int err; | 554 | int err; |
553 | 555 | ||
554 | err = shost->hostt->eh_abort_handler(cmd); | 556 | err = shost->hostt->eh_abort_handler(cmd); |
555 | if (err) | 557 | if (err) |
556 | eprintk("fail to abort %p\n", cmd); | 558 | eprintk("fail to abort %p\n", cmd); |
557 | 559 | ||
558 | scsi_tgt_cmd_destroy(cmd); | 560 | tcmd = cmd->request->end_io_data; |
561 | scsi_tgt_cmd_destroy(&tcmd->work); | ||
559 | return err; | 562 | return err; |
560 | } | 563 | } |
561 | 564 | ||
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 38c215a78f69..3571ce8934e7 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -241,9 +241,9 @@ fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) | |||
241 | #define FC_MGMTSRVR_PORTID 0x00000a | 241 | #define FC_MGMTSRVR_PORTID 0x00000a |
242 | 242 | ||
243 | 243 | ||
244 | static void fc_timeout_deleted_rport(void *data); | 244 | static void fc_timeout_deleted_rport(struct work_struct *work); |
245 | static void fc_timeout_fail_rport_io(void *data); | 245 | static void fc_timeout_fail_rport_io(struct work_struct *work); |
246 | static void fc_scsi_scan_rport(void *data); | 246 | static void fc_scsi_scan_rport(struct work_struct *work); |
247 | 247 | ||
248 | /* | 248 | /* |
249 | * Attribute counts pre object type... | 249 | * Attribute counts pre object type... |
@@ -1613,7 +1613,7 @@ fc_flush_work(struct Scsi_Host *shost) | |||
1613 | * 1 on success / 0 already queued / < 0 for error | 1613 | * 1 on success / 0 already queued / < 0 for error |
1614 | **/ | 1614 | **/ |
1615 | static int | 1615 | static int |
1616 | fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, | 1616 | fc_queue_devloss_work(struct Scsi_Host *shost, struct delayed_work *work, |
1617 | unsigned long delay) | 1617 | unsigned long delay) |
1618 | { | 1618 | { |
1619 | if (unlikely(!fc_host_devloss_work_q(shost))) { | 1619 | if (unlikely(!fc_host_devloss_work_q(shost))) { |
@@ -1625,9 +1625,6 @@ fc_queue_devloss_work(struct Scsi_Host *shost, struct work_struct *work, | |||
1625 | return -EINVAL; | 1625 | return -EINVAL; |
1626 | } | 1626 | } |
1627 | 1627 | ||
1628 | if (delay == 0) | ||
1629 | return queue_work(fc_host_devloss_work_q(shost), work); | ||
1630 | |||
1631 | return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); | 1628 | return queue_delayed_work(fc_host_devloss_work_q(shost), work, delay); |
1632 | } | 1629 | } |
1633 | 1630 | ||
@@ -1712,12 +1709,13 @@ EXPORT_SYMBOL(fc_remove_host); | |||
1712 | * fc_starget_delete - called to delete the scsi decendents of an rport | 1709 | * fc_starget_delete - called to delete the scsi decendents of an rport |
1713 | * (target and all sdevs) | 1710 | * (target and all sdevs) |
1714 | * | 1711 | * |
1715 | * @data: remote port to be operated on. | 1712 | * @work: remote port to be operated on. |
1716 | **/ | 1713 | **/ |
1717 | static void | 1714 | static void |
1718 | fc_starget_delete(void *data) | 1715 | fc_starget_delete(struct work_struct *work) |
1719 | { | 1716 | { |
1720 | struct fc_rport *rport = (struct fc_rport *)data; | 1717 | struct fc_rport *rport = |
1718 | container_of(work, struct fc_rport, stgt_delete_work); | ||
1721 | struct Scsi_Host *shost = rport_to_shost(rport); | 1719 | struct Scsi_Host *shost = rport_to_shost(rport); |
1722 | unsigned long flags; | 1720 | unsigned long flags; |
1723 | struct fc_internal *i = to_fc_internal(shost->transportt); | 1721 | struct fc_internal *i = to_fc_internal(shost->transportt); |
@@ -1751,12 +1749,13 @@ fc_starget_delete(void *data) | |||
1751 | /** | 1749 | /** |
1752 | * fc_rport_final_delete - finish rport termination and delete it. | 1750 | * fc_rport_final_delete - finish rport termination and delete it. |
1753 | * | 1751 | * |
1754 | * @data: remote port to be deleted. | 1752 | * @work: remote port to be deleted. |
1755 | **/ | 1753 | **/ |
1756 | static void | 1754 | static void |
1757 | fc_rport_final_delete(void *data) | 1755 | fc_rport_final_delete(struct work_struct *work) |
1758 | { | 1756 | { |
1759 | struct fc_rport *rport = (struct fc_rport *)data; | 1757 | struct fc_rport *rport = |
1758 | container_of(work, struct fc_rport, rport_delete_work); | ||
1760 | struct device *dev = &rport->dev; | 1759 | struct device *dev = &rport->dev; |
1761 | struct Scsi_Host *shost = rport_to_shost(rport); | 1760 | struct Scsi_Host *shost = rport_to_shost(rport); |
1762 | struct fc_internal *i = to_fc_internal(shost->transportt); | 1761 | struct fc_internal *i = to_fc_internal(shost->transportt); |
@@ -1770,7 +1769,7 @@ fc_rport_final_delete(void *data) | |||
1770 | 1769 | ||
1771 | /* Delete SCSI target and sdevs */ | 1770 | /* Delete SCSI target and sdevs */ |
1772 | if (rport->scsi_target_id != -1) | 1771 | if (rport->scsi_target_id != -1) |
1773 | fc_starget_delete(data); | 1772 | fc_starget_delete(&rport->stgt_delete_work); |
1774 | else if (i->f->dev_loss_tmo_callbk) | 1773 | else if (i->f->dev_loss_tmo_callbk) |
1775 | i->f->dev_loss_tmo_callbk(rport); | 1774 | i->f->dev_loss_tmo_callbk(rport); |
1776 | else if (i->f->terminate_rport_io) | 1775 | else if (i->f->terminate_rport_io) |
@@ -1829,11 +1828,11 @@ fc_rport_create(struct Scsi_Host *shost, int channel, | |||
1829 | rport->channel = channel; | 1828 | rport->channel = channel; |
1830 | rport->fast_io_fail_tmo = -1; | 1829 | rport->fast_io_fail_tmo = -1; |
1831 | 1830 | ||
1832 | INIT_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport, rport); | 1831 | INIT_DELAYED_WORK(&rport->dev_loss_work, fc_timeout_deleted_rport); |
1833 | INIT_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io, rport); | 1832 | INIT_DELAYED_WORK(&rport->fail_io_work, fc_timeout_fail_rport_io); |
1834 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport, rport); | 1833 | INIT_WORK(&rport->scan_work, fc_scsi_scan_rport); |
1835 | INIT_WORK(&rport->stgt_delete_work, fc_starget_delete, rport); | 1834 | INIT_WORK(&rport->stgt_delete_work, fc_starget_delete); |
1836 | INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete, rport); | 1835 | INIT_WORK(&rport->rport_delete_work, fc_rport_final_delete); |
1837 | 1836 | ||
1838 | spin_lock_irqsave(shost->host_lock, flags); | 1837 | spin_lock_irqsave(shost->host_lock, flags); |
1839 | 1838 | ||
@@ -1963,7 +1962,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel, | |||
1963 | } | 1962 | } |
1964 | 1963 | ||
1965 | if (match) { | 1964 | if (match) { |
1966 | struct work_struct *work = | 1965 | struct delayed_work *work = |
1967 | &rport->dev_loss_work; | 1966 | &rport->dev_loss_work; |
1968 | 1967 | ||
1969 | memcpy(&rport->node_name, &ids->node_name, | 1968 | memcpy(&rport->node_name, &ids->node_name, |
@@ -2267,12 +2266,13 @@ EXPORT_SYMBOL(fc_remote_port_rolechg); | |||
2267 | * was a SCSI target (thus was blocked), and failed | 2266 | * was a SCSI target (thus was blocked), and failed |
2268 | * to return in the alloted time. | 2267 | * to return in the alloted time. |
2269 | * | 2268 | * |
2270 | * @data: rport target that failed to reappear in the alloted time. | 2269 | * @work: rport target that failed to reappear in the alloted time. |
2271 | **/ | 2270 | **/ |
2272 | static void | 2271 | static void |
2273 | fc_timeout_deleted_rport(void *data) | 2272 | fc_timeout_deleted_rport(struct work_struct *work) |
2274 | { | 2273 | { |
2275 | struct fc_rport *rport = (struct fc_rport *)data; | 2274 | struct fc_rport *rport = |
2275 | container_of(work, struct fc_rport, dev_loss_work.work); | ||
2276 | struct Scsi_Host *shost = rport_to_shost(rport); | 2276 | struct Scsi_Host *shost = rport_to_shost(rport); |
2277 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); | 2277 | struct fc_host_attrs *fc_host = shost_to_fc_host(shost); |
2278 | unsigned long flags; | 2278 | unsigned long flags; |
@@ -2366,15 +2366,16 @@ fc_timeout_deleted_rport(void *data) | |||
2366 | * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a | 2366 | * fc_timeout_fail_rport_io - Timeout handler for a fast io failing on a |
2367 | * disconnected SCSI target. | 2367 | * disconnected SCSI target. |
2368 | * | 2368 | * |
2369 | * @data: rport to terminate io on. | 2369 | * @work: rport to terminate io on. |
2370 | * | 2370 | * |
2371 | * Notes: Only requests the failure of the io, not that all are flushed | 2371 | * Notes: Only requests the failure of the io, not that all are flushed |
2372 | * prior to returning. | 2372 | * prior to returning. |
2373 | **/ | 2373 | **/ |
2374 | static void | 2374 | static void |
2375 | fc_timeout_fail_rport_io(void *data) | 2375 | fc_timeout_fail_rport_io(struct work_struct *work) |
2376 | { | 2376 | { |
2377 | struct fc_rport *rport = (struct fc_rport *)data; | 2377 | struct fc_rport *rport = |
2378 | container_of(work, struct fc_rport, fail_io_work.work); | ||
2378 | struct Scsi_Host *shost = rport_to_shost(rport); | 2379 | struct Scsi_Host *shost = rport_to_shost(rport); |
2379 | struct fc_internal *i = to_fc_internal(shost->transportt); | 2380 | struct fc_internal *i = to_fc_internal(shost->transportt); |
2380 | 2381 | ||
@@ -2387,12 +2388,13 @@ fc_timeout_fail_rport_io(void *data) | |||
2387 | /** | 2388 | /** |
2388 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. | 2389 | * fc_scsi_scan_rport - called to perform a scsi scan on a remote port. |
2389 | * | 2390 | * |
2390 | * @data: remote port to be scanned. | 2391 | * @work: remote port to be scanned. |
2391 | **/ | 2392 | **/ |
2392 | static void | 2393 | static void |
2393 | fc_scsi_scan_rport(void *data) | 2394 | fc_scsi_scan_rport(struct work_struct *work) |
2394 | { | 2395 | { |
2395 | struct fc_rport *rport = (struct fc_rport *)data; | 2396 | struct fc_rport *rport = |
2397 | container_of(work, struct fc_rport, scan_work); | ||
2396 | struct Scsi_Host *shost = rport_to_shost(rport); | 2398 | struct Scsi_Host *shost = rport_to_shost(rport); |
2397 | unsigned long flags; | 2399 | unsigned long flags; |
2398 | 2400 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9b25124a989e..9c22f1342715 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -234,9 +234,11 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel, | |||
234 | return 0; | 234 | return 0; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void session_recovery_timedout(void *data) | 237 | static void session_recovery_timedout(struct work_struct *work) |
238 | { | 238 | { |
239 | struct iscsi_cls_session *session = data; | 239 | struct iscsi_cls_session *session = |
240 | container_of(work, struct iscsi_cls_session, | ||
241 | recovery_work.work); | ||
240 | 242 | ||
241 | dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " | 243 | dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed " |
242 | "out after %d secs\n", session->recovery_tmo); | 244 | "out after %d secs\n", session->recovery_tmo); |
@@ -276,7 +278,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, | |||
276 | 278 | ||
277 | session->transport = transport; | 279 | session->transport = transport; |
278 | session->recovery_tmo = 120; | 280 | session->recovery_tmo = 120; |
279 | INIT_WORK(&session->recovery_work, session_recovery_timedout, session); | 281 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); |
280 | INIT_LIST_HEAD(&session->host_list); | 282 | INIT_LIST_HEAD(&session->host_list); |
281 | INIT_LIST_HEAD(&session->sess_list); | 283 | INIT_LIST_HEAD(&session->sess_list); |
282 | 284 | ||
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 9f070f0d0f2b..3fded4831460 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -964,9 +964,10 @@ struct work_queue_wrapper { | |||
964 | }; | 964 | }; |
965 | 965 | ||
966 | static void | 966 | static void |
967 | spi_dv_device_work_wrapper(void *data) | 967 | spi_dv_device_work_wrapper(struct work_struct *work) |
968 | { | 968 | { |
969 | struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; | 969 | struct work_queue_wrapper *wqw = |
970 | container_of(work, struct work_queue_wrapper, work); | ||
970 | struct scsi_device *sdev = wqw->sdev; | 971 | struct scsi_device *sdev = wqw->sdev; |
971 | 972 | ||
972 | kfree(wqw); | 973 | kfree(wqw); |
@@ -1006,7 +1007,7 @@ spi_schedule_dv_device(struct scsi_device *sdev) | |||
1006 | return; | 1007 | return; |
1007 | } | 1008 | } |
1008 | 1009 | ||
1009 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw); | 1010 | INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); |
1010 | wqw->sdev = sdev; | 1011 | wqw->sdev = sdev; |
1011 | 1012 | ||
1012 | schedule_work(&wqw->work); | 1013 | schedule_work(&wqw->work); |
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c index aee1b31f1a1c..3db206d29b33 100644 --- a/drivers/serial/mcfserial.c +++ b/drivers/serial/mcfserial.c | |||
@@ -60,7 +60,8 @@ struct timer_list mcfrs_timer_struct; | |||
60 | #if defined(CONFIG_HW_FEITH) | 60 | #if defined(CONFIG_HW_FEITH) |
61 | #define CONSOLE_BAUD_RATE 38400 | 61 | #define CONSOLE_BAUD_RATE 38400 |
62 | #define DEFAULT_CBAUD B38400 | 62 | #define DEFAULT_CBAUD B38400 |
63 | #elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || defined(CONFIG_M5329EVB) | 63 | #elif defined(CONFIG_MOD5272) || defined(CONFIG_M5208EVB) || \ |
64 | defined(CONFIG_M5329EVB) || defined(CONFIG_GILBARCO) | ||
64 | #define CONSOLE_BAUD_RATE 115200 | 65 | #define CONSOLE_BAUD_RATE 115200 |
65 | #define DEFAULT_CBAUD B115200 | 66 | #define DEFAULT_CBAUD B115200 |
66 | #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \ | 67 | #elif defined(CONFIG_ARNEWSH) || defined(CONFIG_FREESCALE) || \ |
@@ -109,12 +110,30 @@ static struct mcf_serial mcfrs_table[] = { | |||
109 | .irq = IRQBASE, | 110 | .irq = IRQBASE, |
110 | .flags = ASYNC_BOOT_AUTOCONF, | 111 | .flags = ASYNC_BOOT_AUTOCONF, |
111 | }, | 112 | }, |
113 | #ifdef MCFUART_BASE2 | ||
112 | { /* ttyS1 */ | 114 | { /* ttyS1 */ |
113 | .magic = 0, | 115 | .magic = 0, |
114 | .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2), | 116 | .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE2), |
115 | .irq = IRQBASE+1, | 117 | .irq = IRQBASE+1, |
116 | .flags = ASYNC_BOOT_AUTOCONF, | 118 | .flags = ASYNC_BOOT_AUTOCONF, |
117 | }, | 119 | }, |
120 | #endif | ||
121 | #ifdef MCFUART_BASE3 | ||
122 | { /* ttyS2 */ | ||
123 | .magic = 0, | ||
124 | .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE3), | ||
125 | .irq = IRQBASE+2, | ||
126 | .flags = ASYNC_BOOT_AUTOCONF, | ||
127 | }, | ||
128 | #endif | ||
129 | #ifdef MCFUART_BASE4 | ||
130 | { /* ttyS3 */ | ||
131 | .magic = 0, | ||
132 | .addr = (volatile unsigned char *) (MCF_MBAR+MCFUART_BASE4), | ||
133 | .irq = IRQBASE+3, | ||
134 | .flags = ASYNC_BOOT_AUTOCONF, | ||
135 | }, | ||
136 | #endif | ||
118 | }; | 137 | }; |
119 | 138 | ||
120 | 139 | ||
@@ -1516,6 +1535,22 @@ static void mcfrs_irqinit(struct mcf_serial *info) | |||
1516 | imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 + | 1535 | imrp = (volatile unsigned long *) (MCF_MBAR + MCFICM_INTC0 + |
1517 | MCFINTC_IMRL); | 1536 | MCFINTC_IMRL); |
1518 | *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1); | 1537 | *imrp &= ~((1 << (info->irq - MCFINT_VECBASE)) | 1); |
1538 | #if defined(CONFIG_M527x) | ||
1539 | { | ||
1540 | /* | ||
1541 | * External Pin Mask Setting & Enable External Pin for Interface | ||
1542 | * mrcbis@aliceposta.it | ||
1543 | */ | ||
1544 | unsigned short *serpin_enable_mask; | ||
1545 | serpin_enable_mask = (MCF_IPSBAR + MCF_GPIO_PAR_UART); | ||
1546 | if (info->line == 0) | ||
1547 | *serpin_enable_mask |= UART0_ENABLE_MASK; | ||
1548 | else if (info->line == 1) | ||
1549 | *serpin_enable_mask |= UART1_ENABLE_MASK; | ||
1550 | else if (info->line == 2) | ||
1551 | *serpin_enable_mask |= UART2_ENABLE_MASK; | ||
1552 | } | ||
1553 | #endif | ||
1519 | #elif defined(CONFIG_M520x) | 1554 | #elif defined(CONFIG_M520x) |
1520 | volatile unsigned char *icrp, *uartp; | 1555 | volatile unsigned char *icrp, *uartp; |
1521 | volatile unsigned long *imrp; | 1556 | volatile unsigned long *imrp; |
@@ -1713,7 +1748,7 @@ mcfrs_init(void) | |||
1713 | /* Initialize the tty_driver structure */ | 1748 | /* Initialize the tty_driver structure */ |
1714 | mcfrs_serial_driver->owner = THIS_MODULE; | 1749 | mcfrs_serial_driver->owner = THIS_MODULE; |
1715 | mcfrs_serial_driver->name = "ttyS"; | 1750 | mcfrs_serial_driver->name = "ttyS"; |
1716 | mcfrs_serial_driver->driver_name = "serial"; | 1751 | mcfrs_serial_driver->driver_name = "mcfserial"; |
1717 | mcfrs_serial_driver->major = TTY_MAJOR; | 1752 | mcfrs_serial_driver->major = TTY_MAJOR; |
1718 | mcfrs_serial_driver->minor_start = 64; | 1753 | mcfrs_serial_driver->minor_start = 64; |
1719 | mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; | 1754 | mcfrs_serial_driver->type = TTY_DRIVER_TYPE_SERIAL; |
@@ -1797,10 +1832,23 @@ void mcfrs_init_console(void) | |||
1797 | uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8; | 1832 | uartp[MCFUART_UMR] = MCFUART_MR1_PARITYNONE | MCFUART_MR1_CS8; |
1798 | uartp[MCFUART_UMR] = MCFUART_MR2_STOP1; | 1833 | uartp[MCFUART_UMR] = MCFUART_MR2_STOP1; |
1799 | 1834 | ||
1835 | #ifdef CONFIG_M5272 | ||
1836 | { | ||
1837 | /* | ||
1838 | * For the MCF5272, also compute the baudrate fraction. | ||
1839 | */ | ||
1840 | int fraction = MCF_BUSCLK - (clk * 32 * mcfrs_console_baud); | ||
1841 | fraction *= 16; | ||
1842 | fraction /= (32 * mcfrs_console_baud); | ||
1843 | uartp[MCFUART_UFPD] = (fraction & 0xf); /* set fraction */ | ||
1844 | clk = (MCF_BUSCLK / mcfrs_console_baud) / 32; | ||
1845 | } | ||
1846 | #else | ||
1800 | clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */ | 1847 | clk = ((MCF_BUSCLK / mcfrs_console_baud) + 16) / 32; /* set baud */ |
1848 | #endif | ||
1849 | |||
1801 | uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8; /* set msb baud */ | 1850 | uartp[MCFUART_UBG1] = (clk & 0xff00) >> 8; /* set msb baud */ |
1802 | uartp[MCFUART_UBG2] = (clk & 0xff); /* set lsb baud */ | 1851 | uartp[MCFUART_UBG2] = (clk & 0xff); /* set lsb baud */ |
1803 | |||
1804 | uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER; | 1852 | uartp[MCFUART_UCSR] = MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER; |
1805 | uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE; | 1853 | uartp[MCFUART_UCR] = MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE; |
1806 | 1854 | ||
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index cfcc3caf49d8..3b5f19ec2126 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
@@ -775,7 +775,7 @@ static int sci_notifier(struct notifier_block *self, | |||
775 | * | 775 | * |
776 | * Clean this up later.. | 776 | * Clean this up later.. |
777 | */ | 777 | */ |
778 | clk = clk_get("module_clk"); | 778 | clk = clk_get(NULL, "module_clk"); |
779 | port->uartclk = clk_get_rate(clk) * 16; | 779 | port->uartclk = clk_get_rate(clk) * 16; |
780 | clk_put(clk); | 780 | clk_put(clk); |
781 | } | 781 | } |
@@ -960,7 +960,7 @@ static void sci_set_termios(struct uart_port *port, struct termios *termios, | |||
960 | default: | 960 | default: |
961 | { | 961 | { |
962 | #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) | 962 | #if defined(CONFIG_SUPERH) && !defined(CONFIG_SUPERH64) |
963 | struct clk *clk = clk_get("module_clk"); | 963 | struct clk *clk = clk_get(NULL, "module_clk"); |
964 | t = SCBRR_VALUE(baud, clk_get_rate(clk)); | 964 | t = SCBRR_VALUE(baud, clk_get_rate(clk)); |
965 | clk_put(clk); | 965 | clk_put(clk); |
966 | #else | 966 | #else |
@@ -1128,7 +1128,7 @@ static void __init sci_init_ports(void) | |||
1128 | * XXX: We should use a proper SCI/SCIF clock | 1128 | * XXX: We should use a proper SCI/SCIF clock |
1129 | */ | 1129 | */ |
1130 | { | 1130 | { |
1131 | struct clk *clk = clk_get("module_clk"); | 1131 | struct clk *clk = clk_get(NULL, "module_clk"); |
1132 | sci_ports[i].port.uartclk = clk_get_rate(clk) * 16; | 1132 | sci_ports[i].port.uartclk = clk_get_rate(clk) * 16; |
1133 | clk_put(clk); | 1133 | clk_put(clk); |
1134 | } | 1134 | } |
diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index 7ee992146ae9..e4557cc4f74b 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h | |||
@@ -133,6 +133,20 @@ | |||
133 | # define SCIF_ORER 0x0001 /* Overrun error bit */ | 133 | # define SCIF_ORER 0x0001 /* Overrun error bit */ |
134 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | 134 | # define SCSCR_INIT(port) 0x3a /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ |
135 | # define SCIF_ONLY | 135 | # define SCIF_ONLY |
136 | #elif defined(CONFIG_CPU_SUBTYPE_SH7206) | ||
137 | # define SCSPTR0 0xfffe8020 /* 16 bit SCIF */ | ||
138 | # define SCSPTR1 0xfffe8820 /* 16 bit SCIF */ | ||
139 | # define SCSPTR2 0xfffe9020 /* 16 bit SCIF */ | ||
140 | # define SCSPTR3 0xfffe9820 /* 16 bit SCIF */ | ||
141 | # define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | ||
142 | # define SCIF_ONLY | ||
143 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
144 | # define SCSPTR0 0xf8400020 /* 16 bit SCIF */ | ||
145 | # define SCSPTR1 0xf8410020 /* 16 bit SCIF */ | ||
146 | # define SCSPTR2 0xf8420020 /* 16 bit SCIF */ | ||
147 | # define SCIF_ORER 0x0001 /* overrun error bit */ | ||
148 | # define SCSCR_INIT(port) 0x38 /* TIE=0,RIE=0,TE=1,RE=1,REIE=1 */ | ||
149 | # define SCIF_ONLY | ||
136 | #else | 150 | #else |
137 | # error CPU subtype not defined | 151 | # error CPU subtype not defined |
138 | #endif | 152 | #endif |
@@ -365,6 +379,7 @@ SCIx_FNS(SCxSR, 0x08, 8, 0x10, 8, 0x08, 16, 0x10, 16, 0x04, 8) | |||
365 | SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) | 379 | SCIx_FNS(SCxRDR, 0x0a, 8, 0x14, 8, 0x0A, 8, 0x14, 8, 0x05, 8) |
366 | SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) | 380 | SCIF_FNS(SCFCR, 0x0c, 8, 0x18, 16) |
367 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780) | 381 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || defined(CONFIG_CPU_SUBTYPE_SH7780) |
382 | SCIF_FNS(SCFDR, 0x0e, 16, 0x1C, 16) | ||
368 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) | 383 | SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16) |
369 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) | 384 | SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16) |
370 | SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) | 385 | SCIF_FNS(SCSPTR, 0, 0, 0x24, 16) |
@@ -544,6 +559,28 @@ static inline int sci_rxd_in(struct uart_port *port) | |||
544 | if (port->mapbase == 0xffe10000) | 559 | if (port->mapbase == 0xffe10000) |
545 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | 560 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ |
546 | } | 561 | } |
562 | #elif defined(CONFIG_CPU_SUBTYPE_SH7206) | ||
563 | static inline int sci_rxd_in(struct uart_port *port) | ||
564 | { | ||
565 | if (port->mapbase == 0xfffe8000) | ||
566 | return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
567 | if (port->mapbase == 0xfffe8800) | ||
568 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
569 | if (port->mapbase == 0xfffe9000) | ||
570 | return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
571 | if (port->mapbase == 0xfffe9800) | ||
572 | return ctrl_inw(SCSPTR3) & 0x0001 ? 1 : 0; /* SCIF */ | ||
573 | } | ||
574 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
575 | static inline int sci_rxd_in(struct uart_port *port) | ||
576 | { | ||
577 | if (port->mapbase == 0xf8400000) | ||
578 | return ctrl_inw(SCSPTR0) & 0x0001 ? 1 : 0; /* SCIF */ | ||
579 | if (port->mapbase == 0xf8410000) | ||
580 | return ctrl_inw(SCSPTR1) & 0x0001 ? 1 : 0; /* SCIF */ | ||
581 | if (port->mapbase == 0xf8420000) | ||
582 | return ctrl_inw(SCSPTR2) & 0x0001 ? 1 : 0; /* SCIF */ | ||
583 | } | ||
547 | #endif | 584 | #endif |
548 | 585 | ||
549 | /* | 586 | /* |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 72025df5561d..494d9b856488 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -148,7 +148,7 @@ struct chip_data { | |||
148 | void (*cs_control)(u32 command); | 148 | void (*cs_control)(u32 command); |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static void pump_messages(void *data); | 151 | static void pump_messages(struct work_struct *work); |
152 | 152 | ||
153 | static int flush(struct driver_data *drv_data) | 153 | static int flush(struct driver_data *drv_data) |
154 | { | 154 | { |
@@ -884,9 +884,10 @@ static void pump_transfers(unsigned long data) | |||
884 | } | 884 | } |
885 | } | 885 | } |
886 | 886 | ||
887 | static void pump_messages(void *data) | 887 | static void pump_messages(struct work_struct *work) |
888 | { | 888 | { |
889 | struct driver_data *drv_data = data; | 889 | struct driver_data *drv_data = |
890 | container_of(work, struct driver_data, pump_messages); | ||
890 | unsigned long flags; | 891 | unsigned long flags; |
891 | 892 | ||
892 | /* Lock queue and check for queue work */ | 893 | /* Lock queue and check for queue work */ |
@@ -1098,7 +1099,7 @@ static int init_queue(struct driver_data *drv_data) | |||
1098 | tasklet_init(&drv_data->pump_transfers, | 1099 | tasklet_init(&drv_data->pump_transfers, |
1099 | pump_transfers, (unsigned long)drv_data); | 1100 | pump_transfers, (unsigned long)drv_data); |
1100 | 1101 | ||
1101 | INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); | 1102 | INIT_WORK(&drv_data->pump_messages, pump_messages); |
1102 | drv_data->workqueue = create_singlethread_workqueue( | 1103 | drv_data->workqueue = create_singlethread_workqueue( |
1103 | drv_data->master->cdev.dev->bus_id); | 1104 | drv_data->master->cdev.dev->bus_id); |
1104 | if (drv_data->workqueue == NULL) | 1105 | if (drv_data->workqueue == NULL) |
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index a23862ef72b2..08c1c57c6128 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -265,9 +265,10 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
265 | * Drivers can provide word-at-a-time i/o primitives, or provide | 265 | * Drivers can provide word-at-a-time i/o primitives, or provide |
266 | * transfer-at-a-time ones to leverage dma or fifo hardware. | 266 | * transfer-at-a-time ones to leverage dma or fifo hardware. |
267 | */ | 267 | */ |
268 | static void bitbang_work(void *_bitbang) | 268 | static void bitbang_work(struct work_struct *work) |
269 | { | 269 | { |
270 | struct spi_bitbang *bitbang = _bitbang; | 270 | struct spi_bitbang *bitbang = |
271 | container_of(work, struct spi_bitbang, work); | ||
271 | unsigned long flags; | 272 | unsigned long flags; |
272 | 273 | ||
273 | spin_lock_irqsave(&bitbang->lock, flags); | 274 | spin_lock_irqsave(&bitbang->lock, flags); |
@@ -456,7 +457,7 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
456 | if (!bitbang->master || !bitbang->chipselect) | 457 | if (!bitbang->master || !bitbang->chipselect) |
457 | return -EINVAL; | 458 | return -EINVAL; |
458 | 459 | ||
459 | INIT_WORK(&bitbang->work, bitbang_work, bitbang); | 460 | INIT_WORK(&bitbang->work, bitbang_work); |
460 | spin_lock_init(&bitbang->lock); | 461 | spin_lock_init(&bitbang->lock); |
461 | INIT_LIST_HEAD(&bitbang->queue); | 462 | INIT_LIST_HEAD(&bitbang->queue); |
462 | 463 | ||
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index e6565633ba0f..3dfa3e40e148 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c | |||
@@ -158,7 +158,7 @@ struct cxacru_data { | |||
158 | const struct cxacru_modem_type *modem_type; | 158 | const struct cxacru_modem_type *modem_type; |
159 | 159 | ||
160 | int line_status; | 160 | int line_status; |
161 | struct work_struct poll_work; | 161 | struct delayed_work poll_work; |
162 | 162 | ||
163 | /* contol handles */ | 163 | /* contol handles */ |
164 | struct mutex cm_serialize; | 164 | struct mutex cm_serialize; |
@@ -347,7 +347,7 @@ static int cxacru_card_status(struct cxacru_data *instance) | |||
347 | return 0; | 347 | return 0; |
348 | } | 348 | } |
349 | 349 | ||
350 | static void cxacru_poll_status(struct cxacru_data *instance); | 350 | static void cxacru_poll_status(struct work_struct *work); |
351 | 351 | ||
352 | static int cxacru_atm_start(struct usbatm_data *usbatm_instance, | 352 | static int cxacru_atm_start(struct usbatm_data *usbatm_instance, |
353 | struct atm_dev *atm_dev) | 353 | struct atm_dev *atm_dev) |
@@ -376,12 +376,14 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance, | |||
376 | } | 376 | } |
377 | 377 | ||
378 | /* Start status polling */ | 378 | /* Start status polling */ |
379 | cxacru_poll_status(instance); | 379 | cxacru_poll_status(&instance->poll_work.work); |
380 | return 0; | 380 | return 0; |
381 | } | 381 | } |
382 | 382 | ||
383 | static void cxacru_poll_status(struct cxacru_data *instance) | 383 | static void cxacru_poll_status(struct work_struct *work) |
384 | { | 384 | { |
385 | struct cxacru_data *instance = | ||
386 | container_of(work, struct cxacru_data, poll_work.work); | ||
385 | u32 buf[CXINF_MAX] = {}; | 387 | u32 buf[CXINF_MAX] = {}; |
386 | struct usbatm_data *usbatm = instance->usbatm; | 388 | struct usbatm_data *usbatm = instance->usbatm; |
387 | struct atm_dev *atm_dev = usbatm->atm_dev; | 389 | struct atm_dev *atm_dev = usbatm->atm_dev; |
@@ -720,7 +722,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, | |||
720 | 722 | ||
721 | mutex_init(&instance->cm_serialize); | 723 | mutex_init(&instance->cm_serialize); |
722 | 724 | ||
723 | INIT_WORK(&instance->poll_work, (void *)cxacru_poll_status, instance); | 725 | INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); |
724 | 726 | ||
725 | usbatm_instance->driver_data = instance; | 727 | usbatm_instance->driver_data = instance; |
726 | 728 | ||
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c index a823486495c3..8ed6c75adf0f 100644 --- a/drivers/usb/atm/speedtch.c +++ b/drivers/usb/atm/speedtch.c | |||
@@ -142,7 +142,7 @@ struct speedtch_instance_data { | |||
142 | 142 | ||
143 | struct speedtch_params params; /* set in probe, constant afterwards */ | 143 | struct speedtch_params params; /* set in probe, constant afterwards */ |
144 | 144 | ||
145 | struct work_struct status_checker; | 145 | struct delayed_work status_checker; |
146 | 146 | ||
147 | unsigned char last_status; | 147 | unsigned char last_status; |
148 | 148 | ||
@@ -498,8 +498,11 @@ static int speedtch_start_synchro(struct speedtch_instance_data *instance) | |||
498 | return ret; | 498 | return ret; |
499 | } | 499 | } |
500 | 500 | ||
501 | static void speedtch_check_status(struct speedtch_instance_data *instance) | 501 | static void speedtch_check_status(struct work_struct *work) |
502 | { | 502 | { |
503 | struct speedtch_instance_data *instance = | ||
504 | container_of(work, struct speedtch_instance_data, | ||
505 | status_checker.work); | ||
503 | struct usbatm_data *usbatm = instance->usbatm; | 506 | struct usbatm_data *usbatm = instance->usbatm; |
504 | struct atm_dev *atm_dev = usbatm->atm_dev; | 507 | struct atm_dev *atm_dev = usbatm->atm_dev; |
505 | unsigned char *buf = instance->scratch_buffer; | 508 | unsigned char *buf = instance->scratch_buffer; |
@@ -576,7 +579,7 @@ static void speedtch_status_poll(unsigned long data) | |||
576 | { | 579 | { |
577 | struct speedtch_instance_data *instance = (void *)data; | 580 | struct speedtch_instance_data *instance = (void *)data; |
578 | 581 | ||
579 | schedule_work(&instance->status_checker); | 582 | schedule_delayed_work(&instance->status_checker, 0); |
580 | 583 | ||
581 | /* The following check is racy, but the race is harmless */ | 584 | /* The following check is racy, but the race is harmless */ |
582 | if (instance->poll_delay < MAX_POLL_DELAY) | 585 | if (instance->poll_delay < MAX_POLL_DELAY) |
@@ -596,7 +599,7 @@ static void speedtch_resubmit_int(unsigned long data) | |||
596 | if (int_urb) { | 599 | if (int_urb) { |
597 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); | 600 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); |
598 | if (!ret) | 601 | if (!ret) |
599 | schedule_work(&instance->status_checker); | 602 | schedule_delayed_work(&instance->status_checker, 0); |
600 | else { | 603 | else { |
601 | atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); | 604 | atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); |
602 | mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); | 605 | mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY)); |
@@ -640,7 +643,7 @@ static void speedtch_handle_int(struct urb *int_urb) | |||
640 | 643 | ||
641 | if ((int_urb = instance->int_urb)) { | 644 | if ((int_urb = instance->int_urb)) { |
642 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); | 645 | ret = usb_submit_urb(int_urb, GFP_ATOMIC); |
643 | schedule_work(&instance->status_checker); | 646 | schedule_delayed_work(&instance->status_checker, 0); |
644 | if (ret < 0) { | 647 | if (ret < 0) { |
645 | atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); | 648 | atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret); |
646 | goto fail; | 649 | goto fail; |
@@ -855,7 +858,7 @@ static int speedtch_bind(struct usbatm_data *usbatm, | |||
855 | 858 | ||
856 | usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); | 859 | usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0); |
857 | 860 | ||
858 | INIT_WORK(&instance->status_checker, (void *)speedtch_check_status, instance); | 861 | INIT_DELAYED_WORK(&instance->status_checker, speedtch_check_status); |
859 | 862 | ||
860 | instance->status_checker.timer.function = speedtch_status_poll; | 863 | instance->status_checker.timer.function = speedtch_status_poll; |
861 | instance->status_checker.timer.data = (unsigned long)instance; | 864 | instance->status_checker.timer.data = (unsigned long)instance; |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index c137c041f7a4..f2d196fa1e8b 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -655,9 +655,9 @@ static int request_dsp(struct uea_softc *sc) | |||
655 | /* | 655 | /* |
656 | * The uea_load_page() function must be called within a process context | 656 | * The uea_load_page() function must be called within a process context |
657 | */ | 657 | */ |
658 | static void uea_load_page(void *xsc) | 658 | static void uea_load_page(struct work_struct *work) |
659 | { | 659 | { |
660 | struct uea_softc *sc = xsc; | 660 | struct uea_softc *sc = container_of(work, struct uea_softc, task); |
661 | u16 pageno = sc->pageno; | 661 | u16 pageno = sc->pageno; |
662 | u16 ovl = sc->ovl; | 662 | u16 ovl = sc->ovl; |
663 | struct block_info bi; | 663 | struct block_info bi; |
@@ -1348,7 +1348,7 @@ static int uea_boot(struct uea_softc *sc) | |||
1348 | 1348 | ||
1349 | uea_enters(INS_TO_USBDEV(sc)); | 1349 | uea_enters(INS_TO_USBDEV(sc)); |
1350 | 1350 | ||
1351 | INIT_WORK(&sc->task, uea_load_page, sc); | 1351 | INIT_WORK(&sc->task, uea_load_page); |
1352 | init_waitqueue_head(&sc->sync_q); | 1352 | init_waitqueue_head(&sc->sync_q); |
1353 | init_waitqueue_head(&sc->cmv_ack_wait); | 1353 | init_waitqueue_head(&sc->cmv_ack_wait); |
1354 | 1354 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index ec3438dc8ee5..7f1fa956dcdb 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -421,9 +421,9 @@ static void acm_write_bulk(struct urb *urb) | |||
421 | schedule_work(&acm->work); | 421 | schedule_work(&acm->work); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void acm_softint(void *private) | 424 | static void acm_softint(struct work_struct *work) |
425 | { | 425 | { |
426 | struct acm *acm = private; | 426 | struct acm *acm = container_of(work, struct acm, work); |
427 | dbg("Entering acm_softint."); | 427 | dbg("Entering acm_softint."); |
428 | 428 | ||
429 | if (!ACM_READY(acm)) | 429 | if (!ACM_READY(acm)) |
@@ -927,7 +927,7 @@ skip_normal_probe: | |||
927 | acm->rx_buflimit = num_rx_buf; | 927 | acm->rx_buflimit = num_rx_buf; |
928 | acm->urb_task.func = acm_rx_tasklet; | 928 | acm->urb_task.func = acm_rx_tasklet; |
929 | acm->urb_task.data = (unsigned long) acm; | 929 | acm->urb_task.data = (unsigned long) acm; |
930 | INIT_WORK(&acm->work, acm_softint, acm); | 930 | INIT_WORK(&acm->work, acm_softint); |
931 | spin_lock_init(&acm->throttle_lock); | 931 | spin_lock_init(&acm->throttle_lock); |
932 | spin_lock_init(&acm->write_lock); | 932 | spin_lock_init(&acm->write_lock); |
933 | spin_lock_init(&acm->read_lock); | 933 | spin_lock_init(&acm->read_lock); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 0ce393eb3c4b..9be41ed1f9a6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -68,7 +68,7 @@ struct usb_hub { | |||
68 | 68 | ||
69 | unsigned has_indicators:1; | 69 | unsigned has_indicators:1; |
70 | u8 indicator[USB_MAXCHILDREN]; | 70 | u8 indicator[USB_MAXCHILDREN]; |
71 | struct work_struct leds; | 71 | struct delayed_work leds; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | 74 | ||
@@ -218,9 +218,10 @@ static void set_port_led( | |||
218 | 218 | ||
219 | #define LED_CYCLE_PERIOD ((2*HZ)/3) | 219 | #define LED_CYCLE_PERIOD ((2*HZ)/3) |
220 | 220 | ||
221 | static void led_work (void *__hub) | 221 | static void led_work (struct work_struct *work) |
222 | { | 222 | { |
223 | struct usb_hub *hub = __hub; | 223 | struct usb_hub *hub = |
224 | container_of(work, struct usb_hub, leds.work); | ||
224 | struct usb_device *hdev = hub->hdev; | 225 | struct usb_device *hdev = hub->hdev; |
225 | unsigned i; | 226 | unsigned i; |
226 | unsigned changed = 0; | 227 | unsigned changed = 0; |
@@ -405,9 +406,10 @@ hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) | |||
405 | * talking to TTs must queue control transfers (not just bulk and iso), so | 406 | * talking to TTs must queue control transfers (not just bulk and iso), so |
406 | * both can talk to the same hub concurrently. | 407 | * both can talk to the same hub concurrently. |
407 | */ | 408 | */ |
408 | static void hub_tt_kevent (void *arg) | 409 | static void hub_tt_kevent (struct work_struct *work) |
409 | { | 410 | { |
410 | struct usb_hub *hub = arg; | 411 | struct usb_hub *hub = |
412 | container_of(work, struct usb_hub, tt.kevent); | ||
411 | unsigned long flags; | 413 | unsigned long flags; |
412 | 414 | ||
413 | spin_lock_irqsave (&hub->tt.lock, flags); | 415 | spin_lock_irqsave (&hub->tt.lock, flags); |
@@ -694,7 +696,7 @@ static int hub_configure(struct usb_hub *hub, | |||
694 | 696 | ||
695 | spin_lock_init (&hub->tt.lock); | 697 | spin_lock_init (&hub->tt.lock); |
696 | INIT_LIST_HEAD (&hub->tt.clear_list); | 698 | INIT_LIST_HEAD (&hub->tt.clear_list); |
697 | INIT_WORK (&hub->tt.kevent, hub_tt_kevent, hub); | 699 | INIT_WORK (&hub->tt.kevent, hub_tt_kevent); |
698 | switch (hdev->descriptor.bDeviceProtocol) { | 700 | switch (hdev->descriptor.bDeviceProtocol) { |
699 | case 0: | 701 | case 0: |
700 | break; | 702 | break; |
@@ -938,7 +940,7 @@ descriptor_error: | |||
938 | INIT_LIST_HEAD(&hub->event_list); | 940 | INIT_LIST_HEAD(&hub->event_list); |
939 | hub->intfdev = &intf->dev; | 941 | hub->intfdev = &intf->dev; |
940 | hub->hdev = hdev; | 942 | hub->hdev = hdev; |
941 | INIT_WORK(&hub->leds, led_work, hub); | 943 | INIT_DELAYED_WORK(&hub->leds, led_work); |
942 | 944 | ||
943 | usb_set_intfdata (intf, hub); | 945 | usb_set_intfdata (intf, hub); |
944 | intf->needs_remote_wakeup = 1; | 946 | intf->needs_remote_wakeup = 1; |
@@ -2381,7 +2383,7 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) | |||
2381 | /* hub LEDs are probably harder to miss than syslog */ | 2383 | /* hub LEDs are probably harder to miss than syslog */ |
2382 | if (hub->has_indicators) { | 2384 | if (hub->has_indicators) { |
2383 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; | 2385 | hub->indicator[port1-1] = INDICATOR_GREEN_BLINK; |
2384 | schedule_work (&hub->leds); | 2386 | schedule_delayed_work (&hub->leds, 0); |
2385 | } | 2387 | } |
2386 | } | 2388 | } |
2387 | kfree(qual); | 2389 | kfree(qual); |
@@ -2555,7 +2557,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2555 | if (hub->has_indicators) { | 2557 | if (hub->has_indicators) { |
2556 | hub->indicator[port1-1] = | 2558 | hub->indicator[port1-1] = |
2557 | INDICATOR_AMBER_BLINK; | 2559 | INDICATOR_AMBER_BLINK; |
2558 | schedule_work (&hub->leds); | 2560 | schedule_delayed_work (&hub->leds, 0); |
2559 | } | 2561 | } |
2560 | status = -ENOTCONN; /* Don't retry */ | 2562 | status = -ENOTCONN; /* Don't retry */ |
2561 | goto loop_disable; | 2563 | goto loop_disable; |
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 29b0fa9ff9d0..7390b67c609d 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c | |||
@@ -1501,9 +1501,10 @@ struct set_config_request { | |||
1501 | }; | 1501 | }; |
1502 | 1502 | ||
1503 | /* Worker routine for usb_driver_set_configuration() */ | 1503 | /* Worker routine for usb_driver_set_configuration() */ |
1504 | static void driver_set_config_work(void *_req) | 1504 | static void driver_set_config_work(struct work_struct *work) |
1505 | { | 1505 | { |
1506 | struct set_config_request *req = _req; | 1506 | struct set_config_request *req = |
1507 | container_of(work, struct set_config_request, work); | ||
1507 | 1508 | ||
1508 | usb_lock_device(req->udev); | 1509 | usb_lock_device(req->udev); |
1509 | usb_set_configuration(req->udev, req->config); | 1510 | usb_set_configuration(req->udev, req->config); |
@@ -1541,7 +1542,7 @@ int usb_driver_set_configuration(struct usb_device *udev, int config) | |||
1541 | return -ENOMEM; | 1542 | return -ENOMEM; |
1542 | req->udev = udev; | 1543 | req->udev = udev; |
1543 | req->config = config; | 1544 | req->config = config; |
1544 | INIT_WORK(&req->work, driver_set_config_work, req); | 1545 | INIT_WORK(&req->work, driver_set_config_work); |
1545 | 1546 | ||
1546 | usb_get_dev(udev); | 1547 | usb_get_dev(udev); |
1547 | if (!schedule_work(&req->work)) { | 1548 | if (!schedule_work(&req->work)) { |
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 81cb52564e68..02426d0b9a34 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c | |||
@@ -203,9 +203,10 @@ static void ksuspend_usb_cleanup(void) | |||
203 | #ifdef CONFIG_USB_SUSPEND | 203 | #ifdef CONFIG_USB_SUSPEND |
204 | 204 | ||
205 | /* usb_autosuspend_work - callback routine to autosuspend a USB device */ | 205 | /* usb_autosuspend_work - callback routine to autosuspend a USB device */ |
206 | static void usb_autosuspend_work(void *_udev) | 206 | static void usb_autosuspend_work(struct work_struct *work) |
207 | { | 207 | { |
208 | struct usb_device *udev = _udev; | 208 | struct usb_device *udev = |
209 | container_of(work, struct usb_device, autosuspend.work); | ||
209 | 210 | ||
210 | usb_pm_lock(udev); | 211 | usb_pm_lock(udev); |
211 | udev->auto_pm = 1; | 212 | udev->auto_pm = 1; |
@@ -215,7 +216,7 @@ static void usb_autosuspend_work(void *_udev) | |||
215 | 216 | ||
216 | #else | 217 | #else |
217 | 218 | ||
218 | static void usb_autosuspend_work(void *_udev) | 219 | static void usb_autosuspend_work(struct work_struct *work) |
219 | {} | 220 | {} |
220 | 221 | ||
221 | #endif /* CONFIG_USB_SUSPEND */ | 222 | #endif /* CONFIG_USB_SUSPEND */ |
@@ -304,7 +305,7 @@ usb_alloc_dev(struct usb_device *parent, struct usb_bus *bus, unsigned port1) | |||
304 | 305 | ||
305 | #ifdef CONFIG_PM | 306 | #ifdef CONFIG_PM |
306 | mutex_init(&dev->pm_mutex); | 307 | mutex_init(&dev->pm_mutex); |
307 | INIT_WORK(&dev->autosuspend, usb_autosuspend_work, dev); | 308 | INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); |
308 | #endif | 309 | #endif |
309 | return dev; | 310 | return dev; |
310 | } | 311 | } |
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 3bd1dfe565c1..d15bf22b9a03 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c | |||
@@ -1833,9 +1833,9 @@ static void rx_fill (struct eth_dev *dev, gfp_t gfp_flags) | |||
1833 | spin_unlock_irqrestore(&dev->req_lock, flags); | 1833 | spin_unlock_irqrestore(&dev->req_lock, flags); |
1834 | } | 1834 | } |
1835 | 1835 | ||
1836 | static void eth_work (void *_dev) | 1836 | static void eth_work (struct work_struct *work) |
1837 | { | 1837 | { |
1838 | struct eth_dev *dev = _dev; | 1838 | struct eth_dev *dev = container_of(work, struct eth_dev, work); |
1839 | 1839 | ||
1840 | if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { | 1840 | if (test_and_clear_bit (WORK_RX_MEMORY, &dev->todo)) { |
1841 | if (netif_running (dev->net)) | 1841 | if (netif_running (dev->net)) |
@@ -2398,7 +2398,7 @@ autoconf_fail: | |||
2398 | dev = netdev_priv(net); | 2398 | dev = netdev_priv(net); |
2399 | spin_lock_init (&dev->lock); | 2399 | spin_lock_init (&dev->lock); |
2400 | spin_lock_init (&dev->req_lock); | 2400 | spin_lock_init (&dev->req_lock); |
2401 | INIT_WORK (&dev->work, eth_work, dev); | 2401 | INIT_WORK (&dev->work, eth_work); |
2402 | INIT_LIST_HEAD (&dev->tx_reqs); | 2402 | INIT_LIST_HEAD (&dev->tx_reqs); |
2403 | INIT_LIST_HEAD (&dev->rx_reqs); | 2403 | INIT_LIST_HEAD (&dev->rx_reqs); |
2404 | 2404 | ||
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c index ef54e310bfc4..a9d7119e3176 100644 --- a/drivers/usb/host/u132-hcd.c +++ b/drivers/usb/host/u132-hcd.c | |||
@@ -163,7 +163,7 @@ struct u132_endp { | |||
163 | u16 queue_next; | 163 | u16 queue_next; |
164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; | 164 | struct urb *urb_list[ENDP_QUEUE_SIZE]; |
165 | struct list_head urb_more; | 165 | struct list_head urb_more; |
166 | struct work_struct scheduler; | 166 | struct delayed_work scheduler; |
167 | }; | 167 | }; |
168 | struct u132_ring { | 168 | struct u132_ring { |
169 | unsigned in_use:1; | 169 | unsigned in_use:1; |
@@ -171,7 +171,7 @@ struct u132_ring { | |||
171 | u8 number; | 171 | u8 number; |
172 | struct u132 *u132; | 172 | struct u132 *u132; |
173 | struct u132_endp *curr_endp; | 173 | struct u132_endp *curr_endp; |
174 | struct work_struct scheduler; | 174 | struct delayed_work scheduler; |
175 | }; | 175 | }; |
176 | #define OHCI_QUIRK_AMD756 0x01 | 176 | #define OHCI_QUIRK_AMD756 0x01 |
177 | #define OHCI_QUIRK_SUPERIO 0x02 | 177 | #define OHCI_QUIRK_SUPERIO 0x02 |
@@ -198,7 +198,7 @@ struct u132 { | |||
198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; | 198 | u32 hc_roothub_portstatus[MAX_ROOT_PORTS]; |
199 | int flags; | 199 | int flags; |
200 | unsigned long next_statechange; | 200 | unsigned long next_statechange; |
201 | struct work_struct monitor; | 201 | struct delayed_work monitor; |
202 | int num_endpoints; | 202 | int num_endpoints; |
203 | struct u132_addr addr[MAX_U132_ADDRS]; | 203 | struct u132_addr addr[MAX_U132_ADDRS]; |
204 | struct u132_udev udev[MAX_U132_UDEVS]; | 204 | struct u132_udev udev[MAX_U132_UDEVS]; |
@@ -310,7 +310,7 @@ static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring, | |||
310 | if (delta > 0) { | 310 | if (delta > 0) { |
311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) | 311 | if (queue_delayed_work(workqueue, &ring->scheduler, delta)) |
312 | return; | 312 | return; |
313 | } else if (queue_work(workqueue, &ring->scheduler)) | 313 | } else if (queue_delayed_work(workqueue, &ring->scheduler, 0)) |
314 | return; | 314 | return; |
315 | kref_put(&u132->kref, u132_hcd_delete); | 315 | kref_put(&u132->kref, u132_hcd_delete); |
316 | return; | 316 | return; |
@@ -389,12 +389,8 @@ static inline void u132_endp_init_kref(struct u132 *u132, | |||
389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, | 389 | static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp, |
390 | unsigned int delta) | 390 | unsigned int delta) |
391 | { | 391 | { |
392 | if (delta > 0) { | 392 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) |
393 | if (queue_delayed_work(workqueue, &endp->scheduler, delta)) | 393 | kref_get(&endp->kref); |
394 | kref_get(&endp->kref); | ||
395 | } else if (queue_work(workqueue, &endp->scheduler)) | ||
396 | kref_get(&endp->kref); | ||
397 | return; | ||
398 | } | 394 | } |
399 | 395 | ||
400 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) | 396 | static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp) |
@@ -410,24 +406,14 @@ static inline void u132_monitor_put_kref(struct u132 *u132) | |||
410 | 406 | ||
411 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) | 407 | static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta) |
412 | { | 408 | { |
413 | if (delta > 0) { | 409 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) |
414 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) { | 410 | kref_get(&u132->kref); |
415 | kref_get(&u132->kref); | ||
416 | } | ||
417 | } else if (queue_work(workqueue, &u132->monitor)) | ||
418 | kref_get(&u132->kref); | ||
419 | return; | ||
420 | } | 411 | } |
421 | 412 | ||
422 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) | 413 | static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta) |
423 | { | 414 | { |
424 | if (delta > 0) { | 415 | if (!queue_delayed_work(workqueue, &u132->monitor, delta)) |
425 | if (queue_delayed_work(workqueue, &u132->monitor, delta)) | 416 | kref_put(&u132->kref, u132_hcd_delete); |
426 | return; | ||
427 | } else if (queue_work(workqueue, &u132->monitor)) | ||
428 | return; | ||
429 | kref_put(&u132->kref, u132_hcd_delete); | ||
430 | return; | ||
431 | } | 417 | } |
432 | 418 | ||
433 | static void u132_monitor_cancel_work(struct u132 *u132) | 419 | static void u132_monitor_cancel_work(struct u132 *u132) |
@@ -489,9 +475,9 @@ static int read_roothub_info(struct u132 *u132) | |||
489 | return 0; | 475 | return 0; |
490 | } | 476 | } |
491 | 477 | ||
492 | static void u132_hcd_monitor_work(void *data) | 478 | static void u132_hcd_monitor_work(struct work_struct *work) |
493 | { | 479 | { |
494 | struct u132 *u132 = data; | 480 | struct u132 *u132 = container_of(work, struct u132, monitor.work); |
495 | if (u132->going > 1) { | 481 | if (u132->going > 1) { |
496 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" | 482 | dev_err(&u132->platform_dev->dev, "device has been removed %d\n" |
497 | , u132->going); | 483 | , u132->going); |
@@ -1315,15 +1301,14 @@ static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf, | |||
1315 | } | 1301 | } |
1316 | } | 1302 | } |
1317 | 1303 | ||
1318 | static void u132_hcd_ring_work_scheduler(void *data); | ||
1319 | static void u132_hcd_endp_work_scheduler(void *data); | ||
1320 | /* | 1304 | /* |
1321 | * this work function is only executed from the work queue | 1305 | * this work function is only executed from the work queue |
1322 | * | 1306 | * |
1323 | */ | 1307 | */ |
1324 | static void u132_hcd_ring_work_scheduler(void *data) | 1308 | static void u132_hcd_ring_work_scheduler(struct work_struct *work) |
1325 | { | 1309 | { |
1326 | struct u132_ring *ring = data; | 1310 | struct u132_ring *ring = |
1311 | container_of(work, struct u132_ring, scheduler.work); | ||
1327 | struct u132 *u132 = ring->u132; | 1312 | struct u132 *u132 = ring->u132; |
1328 | down(&u132->scheduler_lock); | 1313 | down(&u132->scheduler_lock); |
1329 | if (ring->in_use) { | 1314 | if (ring->in_use) { |
@@ -1382,10 +1367,11 @@ static void u132_hcd_ring_work_scheduler(void *data) | |||
1382 | } | 1367 | } |
1383 | } | 1368 | } |
1384 | 1369 | ||
1385 | static void u132_hcd_endp_work_scheduler(void *data) | 1370 | static void u132_hcd_endp_work_scheduler(struct work_struct *work) |
1386 | { | 1371 | { |
1387 | struct u132_ring *ring; | 1372 | struct u132_ring *ring; |
1388 | struct u132_endp *endp = data; | 1373 | struct u132_endp *endp = |
1374 | container_of(work, struct u132_endp, scheduler.work); | ||
1389 | struct u132 *u132 = endp->u132; | 1375 | struct u132 *u132 = endp->u132; |
1390 | down(&u132->scheduler_lock); | 1376 | down(&u132->scheduler_lock); |
1391 | ring = endp->ring; | 1377 | ring = endp->ring; |
@@ -1943,7 +1929,7 @@ static int create_endpoint_and_queue_int(struct u132 *u132, | |||
1943 | if (!endp) { | 1929 | if (!endp) { |
1944 | return -ENOMEM; | 1930 | return -ENOMEM; |
1945 | } | 1931 | } |
1946 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 1932 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
1947 | spin_lock_init(&endp->queue_lock.slock); | 1933 | spin_lock_init(&endp->queue_lock.slock); |
1948 | INIT_LIST_HEAD(&endp->urb_more); | 1934 | INIT_LIST_HEAD(&endp->urb_more); |
1949 | ring = endp->ring = &u132->ring[0]; | 1935 | ring = endp->ring = &u132->ring[0]; |
@@ -2032,7 +2018,7 @@ static int create_endpoint_and_queue_bulk(struct u132 *u132, | |||
2032 | if (!endp) { | 2018 | if (!endp) { |
2033 | return -ENOMEM; | 2019 | return -ENOMEM; |
2034 | } | 2020 | } |
2035 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2021 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2036 | spin_lock_init(&endp->queue_lock.slock); | 2022 | spin_lock_init(&endp->queue_lock.slock); |
2037 | INIT_LIST_HEAD(&endp->urb_more); | 2023 | INIT_LIST_HEAD(&endp->urb_more); |
2038 | endp->dequeueing = 0; | 2024 | endp->dequeueing = 0; |
@@ -2117,7 +2103,7 @@ static int create_endpoint_and_queue_control(struct u132 *u132, | |||
2117 | if (!endp) { | 2103 | if (!endp) { |
2118 | return -ENOMEM; | 2104 | return -ENOMEM; |
2119 | } | 2105 | } |
2120 | INIT_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler, (void *)endp); | 2106 | INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler); |
2121 | spin_lock_init(&endp->queue_lock.slock); | 2107 | spin_lock_init(&endp->queue_lock.slock); |
2122 | INIT_LIST_HEAD(&endp->urb_more); | 2108 | INIT_LIST_HEAD(&endp->urb_more); |
2123 | ring = endp->ring = &u132->ring[0]; | 2109 | ring = endp->ring = &u132->ring[0]; |
@@ -3096,10 +3082,10 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev) | |||
3096 | ring->number = rings + 1; | 3082 | ring->number = rings + 1; |
3097 | ring->length = 0; | 3083 | ring->length = 0; |
3098 | ring->curr_endp = NULL; | 3084 | ring->curr_endp = NULL; |
3099 | INIT_WORK(&ring->scheduler, u132_hcd_ring_work_scheduler, | 3085 | INIT_DELAYED_WORK(&ring->scheduler, |
3100 | (void *)ring); | 3086 | u132_hcd_ring_work_scheduler); |
3101 | } down(&u132->sw_lock); | 3087 | } down(&u132->sw_lock); |
3102 | INIT_WORK(&u132->monitor, u132_hcd_monitor_work, (void *)u132); | 3088 | INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work); |
3103 | while (ports-- > 0) { | 3089 | while (ports-- > 0) { |
3104 | struct u132_port *port = &u132->port[ports]; | 3090 | struct u132_port *port = &u132->port[ports]; |
3105 | port->u132 = u132; | 3091 | port->u132 = u132; |
diff --git a/drivers/usb/input/hid-core.c b/drivers/usb/input/hid-core.c index a49644b7c58e..4295bab4f1e2 100644 --- a/drivers/usb/input/hid-core.c +++ b/drivers/usb/input/hid-core.c | |||
@@ -969,9 +969,10 @@ static void hid_retry_timeout(unsigned long _hid) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | /* Workqueue routine to reset the device or clear a halt */ | 971 | /* Workqueue routine to reset the device or clear a halt */ |
972 | static void hid_reset(void *_hid) | 972 | static void hid_reset(struct work_struct *work) |
973 | { | 973 | { |
974 | struct hid_device *hid = (struct hid_device *) _hid; | 974 | struct hid_device *hid = |
975 | container_of(work, struct hid_device, reset_work); | ||
975 | int rc_lock, rc = 0; | 976 | int rc_lock, rc = 0; |
976 | 977 | ||
977 | if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { | 978 | if (test_bit(HID_CLEAR_HALT, &hid->iofl)) { |
@@ -2043,7 +2044,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf) | |||
2043 | 2044 | ||
2044 | init_waitqueue_head(&hid->wait); | 2045 | init_waitqueue_head(&hid->wait); |
2045 | 2046 | ||
2046 | INIT_WORK(&hid->reset_work, hid_reset, hid); | 2047 | INIT_WORK(&hid->reset_work, hid_reset); |
2047 | setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); | 2048 | setup_timer(&hid->io_retry, hid_retry_timeout, (unsigned long) hid); |
2048 | 2049 | ||
2049 | spin_lock_init(&hid->inlock); | 2050 | spin_lock_init(&hid->inlock); |
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index ba30ca6a14aa..02cbb7fff24f 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c | |||
@@ -76,7 +76,7 @@ struct appledisplay { | |||
76 | char *urbdata; /* interrupt URB data buffer */ | 76 | char *urbdata; /* interrupt URB data buffer */ |
77 | char *msgdata; /* control message data buffer */ | 77 | char *msgdata; /* control message data buffer */ |
78 | 78 | ||
79 | struct work_struct work; | 79 | struct delayed_work work; |
80 | int button_pressed; | 80 | int button_pressed; |
81 | spinlock_t lock; | 81 | spinlock_t lock; |
82 | }; | 82 | }; |
@@ -117,7 +117,7 @@ static void appledisplay_complete(struct urb *urb) | |||
117 | case ACD_BTN_BRIGHT_UP: | 117 | case ACD_BTN_BRIGHT_UP: |
118 | case ACD_BTN_BRIGHT_DOWN: | 118 | case ACD_BTN_BRIGHT_DOWN: |
119 | pdata->button_pressed = 1; | 119 | pdata->button_pressed = 1; |
120 | queue_work(wq, &pdata->work); | 120 | queue_delayed_work(wq, &pdata->work, 0); |
121 | break; | 121 | break; |
122 | case ACD_BTN_NONE: | 122 | case ACD_BTN_NONE: |
123 | default: | 123 | default: |
@@ -184,9 +184,10 @@ static struct backlight_properties appledisplay_bl_data = { | |||
184 | .max_brightness = 0xFF | 184 | .max_brightness = 0xFF |
185 | }; | 185 | }; |
186 | 186 | ||
187 | static void appledisplay_work(void *private) | 187 | static void appledisplay_work(struct work_struct *work) |
188 | { | 188 | { |
189 | struct appledisplay *pdata = private; | 189 | struct appledisplay *pdata = |
190 | container_of(work, struct appledisplay, work.work); | ||
190 | int retval; | 191 | int retval; |
191 | 192 | ||
192 | up(&pdata->bd->sem); | 193 | up(&pdata->bd->sem); |
@@ -238,7 +239,7 @@ static int appledisplay_probe(struct usb_interface *iface, | |||
238 | pdata->udev = udev; | 239 | pdata->udev = udev; |
239 | 240 | ||
240 | spin_lock_init(&pdata->lock); | 241 | spin_lock_init(&pdata->lock); |
241 | INIT_WORK(&pdata->work, appledisplay_work, pdata); | 242 | INIT_DELAYED_WORK(&pdata->work, appledisplay_work); |
242 | 243 | ||
243 | /* Allocate buffer for control messages */ | 244 | /* Allocate buffer for control messages */ |
244 | pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); | 245 | pdata->msgdata = kmalloc(ACD_MSG_BUFFER_LEN, GFP_KERNEL); |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index cb0ba3107d7f..18b1925032a8 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -156,9 +156,9 @@ struct usb_ftdi { | |||
156 | struct usb_device *udev; | 156 | struct usb_device *udev; |
157 | struct usb_interface *interface; | 157 | struct usb_interface *interface; |
158 | struct usb_class_driver *class; | 158 | struct usb_class_driver *class; |
159 | struct work_struct status_work; | 159 | struct delayed_work status_work; |
160 | struct work_struct command_work; | 160 | struct delayed_work command_work; |
161 | struct work_struct respond_work; | 161 | struct delayed_work respond_work; |
162 | struct u132_platform_data platform_data; | 162 | struct u132_platform_data platform_data; |
163 | struct resource resources[0]; | 163 | struct resource resources[0]; |
164 | struct platform_device platform_dev; | 164 | struct platform_device platform_dev; |
@@ -210,23 +210,14 @@ static void ftdi_elan_init_kref(struct usb_ftdi *ftdi) | |||
210 | 210 | ||
211 | static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) | 211 | static void ftdi_status_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) |
212 | { | 212 | { |
213 | if (delta > 0) { | 213 | if (!queue_delayed_work(status_queue, &ftdi->status_work, delta)) |
214 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) | 214 | kref_put(&ftdi->kref, ftdi_elan_delete); |
215 | return; | ||
216 | } else if (queue_work(status_queue, &ftdi->status_work)) | ||
217 | return; | ||
218 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
219 | return; | ||
220 | } | 215 | } |
221 | 216 | ||
222 | static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 217 | static void ftdi_status_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
223 | { | 218 | { |
224 | if (delta > 0) { | 219 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) |
225 | if (queue_delayed_work(status_queue, &ftdi->status_work, delta)) | 220 | kref_get(&ftdi->kref); |
226 | kref_get(&ftdi->kref); | ||
227 | } else if (queue_work(status_queue, &ftdi->status_work)) | ||
228 | kref_get(&ftdi->kref); | ||
229 | return; | ||
230 | } | 221 | } |
231 | 222 | ||
232 | static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) | 223 | static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) |
@@ -237,25 +228,14 @@ static void ftdi_status_cancel_work(struct usb_ftdi *ftdi) | |||
237 | 228 | ||
238 | static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) | 229 | static void ftdi_command_requeue_work(struct usb_ftdi *ftdi, unsigned int delta) |
239 | { | 230 | { |
240 | if (delta > 0) { | 231 | if (!queue_delayed_work(command_queue, &ftdi->command_work, delta)) |
241 | if (queue_delayed_work(command_queue, &ftdi->command_work, | 232 | kref_put(&ftdi->kref, ftdi_elan_delete); |
242 | delta)) | ||
243 | return; | ||
244 | } else if (queue_work(command_queue, &ftdi->command_work)) | ||
245 | return; | ||
246 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
247 | return; | ||
248 | } | 233 | } |
249 | 234 | ||
250 | static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 235 | static void ftdi_command_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
251 | { | 236 | { |
252 | if (delta > 0) { | 237 | if (queue_delayed_work(command_queue, &ftdi->command_work, delta)) |
253 | if (queue_delayed_work(command_queue, &ftdi->command_work, | 238 | kref_get(&ftdi->kref); |
254 | delta)) | ||
255 | kref_get(&ftdi->kref); | ||
256 | } else if (queue_work(command_queue, &ftdi->command_work)) | ||
257 | kref_get(&ftdi->kref); | ||
258 | return; | ||
259 | } | 239 | } |
260 | 240 | ||
261 | static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) | 241 | static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) |
@@ -267,25 +247,14 @@ static void ftdi_command_cancel_work(struct usb_ftdi *ftdi) | |||
267 | static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, | 247 | static void ftdi_response_requeue_work(struct usb_ftdi *ftdi, |
268 | unsigned int delta) | 248 | unsigned int delta) |
269 | { | 249 | { |
270 | if (delta > 0) { | 250 | if (!queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) |
271 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, | 251 | kref_put(&ftdi->kref, ftdi_elan_delete); |
272 | delta)) | ||
273 | return; | ||
274 | } else if (queue_work(respond_queue, &ftdi->respond_work)) | ||
275 | return; | ||
276 | kref_put(&ftdi->kref, ftdi_elan_delete); | ||
277 | return; | ||
278 | } | 252 | } |
279 | 253 | ||
280 | static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) | 254 | static void ftdi_respond_queue_work(struct usb_ftdi *ftdi, unsigned int delta) |
281 | { | 255 | { |
282 | if (delta > 0) { | 256 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, delta)) |
283 | if (queue_delayed_work(respond_queue, &ftdi->respond_work, | 257 | kref_get(&ftdi->kref); |
284 | delta)) | ||
285 | kref_get(&ftdi->kref); | ||
286 | } else if (queue_work(respond_queue, &ftdi->respond_work)) | ||
287 | kref_get(&ftdi->kref); | ||
288 | return; | ||
289 | } | 258 | } |
290 | 259 | ||
291 | static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) | 260 | static void ftdi_response_cancel_work(struct usb_ftdi *ftdi) |
@@ -475,9 +444,11 @@ static void ftdi_elan_kick_command_queue(struct usb_ftdi *ftdi) | |||
475 | return; | 444 | return; |
476 | } | 445 | } |
477 | 446 | ||
478 | static void ftdi_elan_command_work(void *data) | 447 | static void ftdi_elan_command_work(struct work_struct *work) |
479 | { | 448 | { |
480 | struct usb_ftdi *ftdi = data; | 449 | struct usb_ftdi *ftdi = |
450 | container_of(work, struct usb_ftdi, command_work.work); | ||
451 | |||
481 | if (ftdi->disconnected > 0) { | 452 | if (ftdi->disconnected > 0) { |
482 | ftdi_elan_put_kref(ftdi); | 453 | ftdi_elan_put_kref(ftdi); |
483 | return; | 454 | return; |
@@ -500,9 +471,10 @@ static void ftdi_elan_kick_respond_queue(struct usb_ftdi *ftdi) | |||
500 | return; | 471 | return; |
501 | } | 472 | } |
502 | 473 | ||
503 | static void ftdi_elan_respond_work(void *data) | 474 | static void ftdi_elan_respond_work(struct work_struct *work) |
504 | { | 475 | { |
505 | struct usb_ftdi *ftdi = data; | 476 | struct usb_ftdi *ftdi = |
477 | container_of(work, struct usb_ftdi, respond_work.work); | ||
506 | if (ftdi->disconnected > 0) { | 478 | if (ftdi->disconnected > 0) { |
507 | ftdi_elan_put_kref(ftdi); | 479 | ftdi_elan_put_kref(ftdi); |
508 | return; | 480 | return; |
@@ -534,9 +506,10 @@ static void ftdi_elan_respond_work(void *data) | |||
534 | * after the FTDI has been synchronized | 506 | * after the FTDI has been synchronized |
535 | * | 507 | * |
536 | */ | 508 | */ |
537 | static void ftdi_elan_status_work(void *data) | 509 | static void ftdi_elan_status_work(struct work_struct *work) |
538 | { | 510 | { |
539 | struct usb_ftdi *ftdi = data; | 511 | struct usb_ftdi *ftdi = |
512 | container_of(work, struct usb_ftdi, status_work.work); | ||
540 | int work_delay_in_msec = 0; | 513 | int work_delay_in_msec = 0; |
541 | if (ftdi->disconnected > 0) { | 514 | if (ftdi->disconnected > 0) { |
542 | ftdi_elan_put_kref(ftdi); | 515 | ftdi_elan_put_kref(ftdi); |
@@ -2677,12 +2650,9 @@ static int ftdi_elan_probe(struct usb_interface *interface, | |||
2677 | ftdi->class = NULL; | 2650 | ftdi->class = NULL; |
2678 | dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" | 2651 | dev_info(&ftdi->udev->dev, "USB FDTI=%p ELAN interface %d now a" |
2679 | "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); | 2652 | "ctivated\n", ftdi, iface_desc->desc.bInterfaceNumber); |
2680 | INIT_WORK(&ftdi->status_work, ftdi_elan_status_work, | 2653 | INIT_DELAYED_WORK(&ftdi->status_work, ftdi_elan_status_work); |
2681 | (void *)ftdi); | 2654 | INIT_DELAYED_WORK(&ftdi->command_work, ftdi_elan_command_work); |
2682 | INIT_WORK(&ftdi->command_work, ftdi_elan_command_work, | 2655 | INIT_DELAYED_WORK(&ftdi->respond_work, ftdi_elan_respond_work); |
2683 | (void *)ftdi); | ||
2684 | INIT_WORK(&ftdi->respond_work, ftdi_elan_respond_work, | ||
2685 | (void *)ftdi); | ||
2686 | ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); | 2656 | ftdi_status_queue_work(ftdi, msecs_to_jiffies(3 *1000)); |
2687 | return 0; | 2657 | return 0; |
2688 | } else { | 2658 | } else { |
diff --git a/drivers/usb/misc/phidgetkit.c b/drivers/usb/misc/phidgetkit.c index 9110793f81d3..9659c79e187e 100644 --- a/drivers/usb/misc/phidgetkit.c +++ b/drivers/usb/misc/phidgetkit.c | |||
@@ -81,8 +81,8 @@ struct interfacekit { | |||
81 | unsigned char *data; | 81 | unsigned char *data; |
82 | dma_addr_t data_dma; | 82 | dma_addr_t data_dma; |
83 | 83 | ||
84 | struct work_struct do_notify; | 84 | struct delayed_work do_notify; |
85 | struct work_struct do_resubmit; | 85 | struct delayed_work do_resubmit; |
86 | unsigned long input_events; | 86 | unsigned long input_events; |
87 | unsigned long sensor_events; | 87 | unsigned long sensor_events; |
88 | }; | 88 | }; |
@@ -374,7 +374,7 @@ static void interfacekit_irq(struct urb *urb) | |||
374 | } | 374 | } |
375 | 375 | ||
376 | if (kit->input_events || kit->sensor_events) | 376 | if (kit->input_events || kit->sensor_events) |
377 | schedule_work(&kit->do_notify); | 377 | schedule_delayed_work(&kit->do_notify, 0); |
378 | 378 | ||
379 | resubmit: | 379 | resubmit: |
380 | status = usb_submit_urb(urb, SLAB_ATOMIC); | 380 | status = usb_submit_urb(urb, SLAB_ATOMIC); |
@@ -384,9 +384,10 @@ resubmit: | |||
384 | kit->udev->devpath, status); | 384 | kit->udev->devpath, status); |
385 | } | 385 | } |
386 | 386 | ||
387 | static void do_notify(void *data) | 387 | static void do_notify(struct work_struct *work) |
388 | { | 388 | { |
389 | struct interfacekit *kit = data; | 389 | struct interfacekit *kit = |
390 | container_of(work, struct interfacekit, do_notify.work); | ||
390 | int i; | 391 | int i; |
391 | char sysfs_file[8]; | 392 | char sysfs_file[8]; |
392 | 393 | ||
@@ -405,9 +406,11 @@ static void do_notify(void *data) | |||
405 | } | 406 | } |
406 | } | 407 | } |
407 | 408 | ||
408 | static void do_resubmit(void *data) | 409 | static void do_resubmit(struct work_struct *work) |
409 | { | 410 | { |
410 | set_outputs(data); | 411 | struct interfacekit *kit = |
412 | container_of(work, struct interfacekit, do_resubmit.work); | ||
413 | set_outputs(kit); | ||
411 | } | 414 | } |
412 | 415 | ||
413 | #define show_set_output(value) \ | 416 | #define show_set_output(value) \ |
@@ -575,8 +578,8 @@ static int interfacekit_probe(struct usb_interface *intf, const struct usb_devic | |||
575 | 578 | ||
576 | kit->udev = usb_get_dev(dev); | 579 | kit->udev = usb_get_dev(dev); |
577 | kit->intf = intf; | 580 | kit->intf = intf; |
578 | INIT_WORK(&kit->do_notify, do_notify, kit); | 581 | INIT_DELAYED_WORK(&kit->do_notify, do_notify); |
579 | INIT_WORK(&kit->do_resubmit, do_resubmit, kit); | 582 | INIT_DELAYED_WORK(&kit->do_resubmit, do_resubmit); |
580 | usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, | 583 | usb_fill_int_urb(kit->irq, kit->udev, pipe, kit->data, |
581 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, | 584 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, |
582 | interfacekit_irq, kit, endpoint->bInterval); | 585 | interfacekit_irq, kit, endpoint->bInterval); |
diff --git a/drivers/usb/misc/phidgetmotorcontrol.c b/drivers/usb/misc/phidgetmotorcontrol.c index c3469b0a67c2..2bb4fa572bb7 100644 --- a/drivers/usb/misc/phidgetmotorcontrol.c +++ b/drivers/usb/misc/phidgetmotorcontrol.c | |||
@@ -41,7 +41,7 @@ struct motorcontrol { | |||
41 | unsigned char *data; | 41 | unsigned char *data; |
42 | dma_addr_t data_dma; | 42 | dma_addr_t data_dma; |
43 | 43 | ||
44 | struct work_struct do_notify; | 44 | struct delayed_work do_notify; |
45 | unsigned long input_events; | 45 | unsigned long input_events; |
46 | unsigned long speed_events; | 46 | unsigned long speed_events; |
47 | unsigned long exceed_events; | 47 | unsigned long exceed_events; |
@@ -148,7 +148,7 @@ static void motorcontrol_irq(struct urb *urb) | |||
148 | set_bit(1, &mc->exceed_events); | 148 | set_bit(1, &mc->exceed_events); |
149 | 149 | ||
150 | if (mc->input_events || mc->exceed_events || mc->speed_events) | 150 | if (mc->input_events || mc->exceed_events || mc->speed_events) |
151 | schedule_work(&mc->do_notify); | 151 | schedule_delayed_work(&mc->do_notify, 0); |
152 | 152 | ||
153 | resubmit: | 153 | resubmit: |
154 | status = usb_submit_urb(urb, SLAB_ATOMIC); | 154 | status = usb_submit_urb(urb, SLAB_ATOMIC); |
@@ -159,9 +159,10 @@ resubmit: | |||
159 | mc->udev->devpath, status); | 159 | mc->udev->devpath, status); |
160 | } | 160 | } |
161 | 161 | ||
162 | static void do_notify(void *data) | 162 | static void do_notify(struct work_struct *work) |
163 | { | 163 | { |
164 | struct motorcontrol *mc = data; | 164 | struct motorcontrol *mc = |
165 | container_of(work, struct motorcontrol, do_notify.work); | ||
165 | int i; | 166 | int i; |
166 | char sysfs_file[8]; | 167 | char sysfs_file[8]; |
167 | 168 | ||
@@ -348,7 +349,7 @@ static int motorcontrol_probe(struct usb_interface *intf, const struct usb_devic | |||
348 | mc->udev = usb_get_dev(dev); | 349 | mc->udev = usb_get_dev(dev); |
349 | mc->intf = intf; | 350 | mc->intf = intf; |
350 | mc->acceleration[0] = mc->acceleration[1] = 10; | 351 | mc->acceleration[0] = mc->acceleration[1] = 10; |
351 | INIT_WORK(&mc->do_notify, do_notify, mc); | 352 | INIT_DELAYED_WORK(&mc->do_notify, do_notify); |
352 | usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, | 353 | usb_fill_int_urb(mc->irq, mc->udev, pipe, mc->data, |
353 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, | 354 | maxp > URB_INT_SIZE ? URB_INT_SIZE : maxp, |
354 | motorcontrol_irq, mc, endpoint->bInterval); | 355 | motorcontrol_irq, mc, endpoint->bInterval); |
diff --git a/drivers/usb/net/kaweth.c b/drivers/usb/net/kaweth.c index 7c906a43e497..fa78326d0bf0 100644 --- a/drivers/usb/net/kaweth.c +++ b/drivers/usb/net/kaweth.c | |||
@@ -222,7 +222,7 @@ struct kaweth_device | |||
222 | int suspend_lowmem_ctrl; | 222 | int suspend_lowmem_ctrl; |
223 | int linkstate; | 223 | int linkstate; |
224 | int opened; | 224 | int opened; |
225 | struct work_struct lowmem_work; | 225 | struct delayed_work lowmem_work; |
226 | 226 | ||
227 | struct usb_device *dev; | 227 | struct usb_device *dev; |
228 | struct net_device *net; | 228 | struct net_device *net; |
@@ -530,9 +530,10 @@ resubmit: | |||
530 | kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); | 530 | kaweth_resubmit_int_urb(kaweth, GFP_ATOMIC); |
531 | } | 531 | } |
532 | 532 | ||
533 | static void kaweth_resubmit_tl(void *d) | 533 | static void kaweth_resubmit_tl(struct work_struct *work) |
534 | { | 534 | { |
535 | struct kaweth_device *kaweth = (struct kaweth_device *)d; | 535 | struct kaweth_device *kaweth = |
536 | container_of(work, struct kaweth_device, lowmem_work.work); | ||
536 | 537 | ||
537 | if (IS_BLOCKED(kaweth->status)) | 538 | if (IS_BLOCKED(kaweth->status)) |
538 | return; | 539 | return; |
@@ -1126,7 +1127,7 @@ err_fw: | |||
1126 | 1127 | ||
1127 | /* kaweth is zeroed as part of alloc_netdev */ | 1128 | /* kaweth is zeroed as part of alloc_netdev */ |
1128 | 1129 | ||
1129 | INIT_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl, (void *)kaweth); | 1130 | INIT_DELAYED_WORK(&kaweth->lowmem_work, kaweth_resubmit_tl); |
1130 | 1131 | ||
1131 | SET_MODULE_OWNER(netdev); | 1132 | SET_MODULE_OWNER(netdev); |
1132 | 1133 | ||
diff --git a/drivers/usb/net/pegasus.c b/drivers/usb/net/pegasus.c index 69eb0db399df..b5690b3834e3 100644 --- a/drivers/usb/net/pegasus.c +++ b/drivers/usb/net/pegasus.c | |||
@@ -1281,9 +1281,9 @@ static inline void setup_pegasus_II(pegasus_t * pegasus) | |||
1281 | static struct workqueue_struct *pegasus_workqueue = NULL; | 1281 | static struct workqueue_struct *pegasus_workqueue = NULL; |
1282 | #define CARRIER_CHECK_DELAY (2 * HZ) | 1282 | #define CARRIER_CHECK_DELAY (2 * HZ) |
1283 | 1283 | ||
1284 | static void check_carrier(void *data) | 1284 | static void check_carrier(struct work_struct *work) |
1285 | { | 1285 | { |
1286 | pegasus_t *pegasus = data; | 1286 | pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); |
1287 | set_carrier(pegasus->net); | 1287 | set_carrier(pegasus->net); |
1288 | if (!(pegasus->flags & PEGASUS_UNPLUG)) { | 1288 | if (!(pegasus->flags & PEGASUS_UNPLUG)) { |
1289 | queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, | 1289 | queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, |
@@ -1319,7 +1319,7 @@ static int pegasus_probe(struct usb_interface *intf, | |||
1319 | 1319 | ||
1320 | tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); | 1320 | tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); |
1321 | 1321 | ||
1322 | INIT_WORK(&pegasus->carrier_check, check_carrier, pegasus); | 1322 | INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); |
1323 | 1323 | ||
1324 | pegasus->intf = intf; | 1324 | pegasus->intf = intf; |
1325 | pegasus->usb = dev; | 1325 | pegasus->usb = dev; |
diff --git a/drivers/usb/net/pegasus.h b/drivers/usb/net/pegasus.h index 006438069b66..98f6898cae1f 100644 --- a/drivers/usb/net/pegasus.h +++ b/drivers/usb/net/pegasus.h | |||
@@ -95,7 +95,7 @@ typedef struct pegasus { | |||
95 | int dev_index; | 95 | int dev_index; |
96 | int intr_interval; | 96 | int intr_interval; |
97 | struct tasklet_struct rx_tl; | 97 | struct tasklet_struct rx_tl; |
98 | struct work_struct carrier_check; | 98 | struct delayed_work carrier_check; |
99 | struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; | 99 | struct urb *ctrl_urb, *rx_urb, *tx_urb, *intr_urb; |
100 | struct sk_buff *rx_pool[RX_SKBS]; | 100 | struct sk_buff *rx_pool[RX_SKBS]; |
101 | struct sk_buff *rx_skb; | 101 | struct sk_buff *rx_skb; |
diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 7672e11c94c4..327f97555679 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c | |||
@@ -782,9 +782,10 @@ static struct ethtool_ops usbnet_ethtool_ops = { | |||
782 | * especially now that control transfers can be queued. | 782 | * especially now that control transfers can be queued. |
783 | */ | 783 | */ |
784 | static void | 784 | static void |
785 | kevent (void *data) | 785 | kevent (struct work_struct *work) |
786 | { | 786 | { |
787 | struct usbnet *dev = data; | 787 | struct usbnet *dev = |
788 | container_of(work, struct usbnet, kevent); | ||
788 | int status; | 789 | int status; |
789 | 790 | ||
790 | /* usb_clear_halt() needs a thread context */ | 791 | /* usb_clear_halt() needs a thread context */ |
@@ -1146,7 +1147,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1146 | skb_queue_head_init (&dev->done); | 1147 | skb_queue_head_init (&dev->done); |
1147 | dev->bh.func = usbnet_bh; | 1148 | dev->bh.func = usbnet_bh; |
1148 | dev->bh.data = (unsigned long) dev; | 1149 | dev->bh.data = (unsigned long) dev; |
1149 | INIT_WORK (&dev->kevent, kevent, dev); | 1150 | INIT_WORK (&dev->kevent, kevent); |
1150 | dev->delay.function = usbnet_bh; | 1151 | dev->delay.function = usbnet_bh; |
1151 | dev->delay.data = (unsigned long) dev; | 1152 | dev->delay.data = (unsigned long) dev; |
1152 | init_timer (&dev->delay); | 1153 | init_timer (&dev->delay); |
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c index b1b5707bc99a..86bcf63b6ba5 100644 --- a/drivers/usb/serial/aircable.c +++ b/drivers/usb/serial/aircable.c | |||
@@ -92,6 +92,7 @@ struct aircable_private { | |||
92 | struct circ_buf *rx_buf; /* read buffer */ | 92 | struct circ_buf *rx_buf; /* read buffer */ |
93 | int rx_flags; /* for throttilng */ | 93 | int rx_flags; /* for throttilng */ |
94 | struct work_struct rx_work; /* work cue for the receiving line */ | 94 | struct work_struct rx_work; /* work cue for the receiving line */ |
95 | struct usb_serial_port *port; /* USB port with which associated */ | ||
95 | }; | 96 | }; |
96 | 97 | ||
97 | /* Private methods */ | 98 | /* Private methods */ |
@@ -251,10 +252,11 @@ static void aircable_send(struct usb_serial_port *port) | |||
251 | schedule_work(&port->work); | 252 | schedule_work(&port->work); |
252 | } | 253 | } |
253 | 254 | ||
254 | static void aircable_read(void *params) | 255 | static void aircable_read(struct work_struct *work) |
255 | { | 256 | { |
256 | struct usb_serial_port *port = params; | 257 | struct aircable_private *priv = |
257 | struct aircable_private *priv = usb_get_serial_port_data(port); | 258 | container_of(work, struct aircable_private, rx_work); |
259 | struct usb_serial_port *port = priv->port; | ||
258 | struct tty_struct *tty; | 260 | struct tty_struct *tty; |
259 | unsigned char *data; | 261 | unsigned char *data; |
260 | int count; | 262 | int count; |
@@ -349,7 +351,8 @@ static int aircable_attach (struct usb_serial *serial) | |||
349 | } | 351 | } |
350 | 352 | ||
351 | priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); | 353 | priv->rx_flags &= ~(THROTTLED | ACTUALLY_THROTTLED); |
352 | INIT_WORK(&priv->rx_work, aircable_read, port); | 354 | priv->port = port; |
355 | INIT_WORK(&priv->rx_work, aircable_read); | ||
353 | 356 | ||
354 | usb_set_serial_port_data(serial->port[0], priv); | 357 | usb_set_serial_port_data(serial->port[0], priv); |
355 | 358 | ||
@@ -516,7 +519,7 @@ static void aircable_read_bulk_callback(struct urb *urb) | |||
516 | package_length - shift); | 519 | package_length - shift); |
517 | } | 520 | } |
518 | } | 521 | } |
519 | aircable_read(port); | 522 | aircable_read(&priv->rx_work); |
520 | } | 523 | } |
521 | 524 | ||
522 | /* Schedule the next read _if_ we are still open */ | 525 | /* Schedule the next read _if_ we are still open */ |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 5e3ac281a2f8..83d0e21145b0 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
@@ -430,13 +430,14 @@ struct digi_port { | |||
430 | int dp_in_close; /* close in progress */ | 430 | int dp_in_close; /* close in progress */ |
431 | wait_queue_head_t dp_close_wait; /* wait queue for close */ | 431 | wait_queue_head_t dp_close_wait; /* wait queue for close */ |
432 | struct work_struct dp_wakeup_work; | 432 | struct work_struct dp_wakeup_work; |
433 | struct usb_serial_port *dp_port; | ||
433 | }; | 434 | }; |
434 | 435 | ||
435 | 436 | ||
436 | /* Local Function Declarations */ | 437 | /* Local Function Declarations */ |
437 | 438 | ||
438 | static void digi_wakeup_write( struct usb_serial_port *port ); | 439 | static void digi_wakeup_write( struct usb_serial_port *port ); |
439 | static void digi_wakeup_write_lock(void *); | 440 | static void digi_wakeup_write_lock(struct work_struct *work); |
440 | static int digi_write_oob_command( struct usb_serial_port *port, | 441 | static int digi_write_oob_command( struct usb_serial_port *port, |
441 | unsigned char *buf, int count, int interruptible ); | 442 | unsigned char *buf, int count, int interruptible ); |
442 | static int digi_write_inb_command( struct usb_serial_port *port, | 443 | static int digi_write_inb_command( struct usb_serial_port *port, |
@@ -598,11 +599,12 @@ static inline long cond_wait_interruptible_timeout_irqrestore( | |||
598 | * on writes. | 599 | * on writes. |
599 | */ | 600 | */ |
600 | 601 | ||
601 | static void digi_wakeup_write_lock(void *arg) | 602 | static void digi_wakeup_write_lock(struct work_struct *work) |
602 | { | 603 | { |
603 | struct usb_serial_port *port = arg; | 604 | struct digi_port *priv = |
605 | container_of(work, struct digi_port, dp_wakeup_work); | ||
606 | struct usb_serial_port *port = priv->dp_port; | ||
604 | unsigned long flags; | 607 | unsigned long flags; |
605 | struct digi_port *priv = usb_get_serial_port_data(port); | ||
606 | 608 | ||
607 | 609 | ||
608 | spin_lock_irqsave( &priv->dp_port_lock, flags ); | 610 | spin_lock_irqsave( &priv->dp_port_lock, flags ); |
@@ -1702,8 +1704,8 @@ dbg( "digi_startup: TOP" ); | |||
1702 | init_waitqueue_head( &priv->dp_flush_wait ); | 1704 | init_waitqueue_head( &priv->dp_flush_wait ); |
1703 | priv->dp_in_close = 0; | 1705 | priv->dp_in_close = 0; |
1704 | init_waitqueue_head( &priv->dp_close_wait ); | 1706 | init_waitqueue_head( &priv->dp_close_wait ); |
1705 | INIT_WORK(&priv->dp_wakeup_work, | 1707 | INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock); |
1706 | digi_wakeup_write_lock, serial->port[i]); | 1708 | priv->dp_port = serial->port[i]; |
1707 | 1709 | ||
1708 | /* initialize write wait queue for this port */ | 1710 | /* initialize write wait queue for this port */ |
1709 | init_waitqueue_head( &serial->port[i]->write_wait ); | 1711 | init_waitqueue_head( &serial->port[i]->write_wait ); |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 89ce2775be15..72e4d48f51e9 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -559,7 +559,8 @@ struct ftdi_private { | |||
559 | char prev_status, diff_status; /* Used for TIOCMIWAIT */ | 559 | char prev_status, diff_status; /* Used for TIOCMIWAIT */ |
560 | __u8 rx_flags; /* receive state flags (throttling) */ | 560 | __u8 rx_flags; /* receive state flags (throttling) */ |
561 | spinlock_t rx_lock; /* spinlock for receive state */ | 561 | spinlock_t rx_lock; /* spinlock for receive state */ |
562 | struct work_struct rx_work; | 562 | struct delayed_work rx_work; |
563 | struct usb_serial_port *port; | ||
563 | int rx_processed; | 564 | int rx_processed; |
564 | unsigned long rx_bytes; | 565 | unsigned long rx_bytes; |
565 | 566 | ||
@@ -593,7 +594,7 @@ static int ftdi_write_room (struct usb_serial_port *port); | |||
593 | static int ftdi_chars_in_buffer (struct usb_serial_port *port); | 594 | static int ftdi_chars_in_buffer (struct usb_serial_port *port); |
594 | static void ftdi_write_bulk_callback (struct urb *urb); | 595 | static void ftdi_write_bulk_callback (struct urb *urb); |
595 | static void ftdi_read_bulk_callback (struct urb *urb); | 596 | static void ftdi_read_bulk_callback (struct urb *urb); |
596 | static void ftdi_process_read (void *param); | 597 | static void ftdi_process_read (struct work_struct *work); |
597 | static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); | 598 | static void ftdi_set_termios (struct usb_serial_port *port, struct termios * old); |
598 | static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); | 599 | static int ftdi_tiocmget (struct usb_serial_port *port, struct file *file); |
599 | static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); | 600 | static int ftdi_tiocmset (struct usb_serial_port *port, struct file * file, unsigned int set, unsigned int clear); |
@@ -1201,7 +1202,8 @@ static int ftdi_sio_attach (struct usb_serial *serial) | |||
1201 | port->read_urb->transfer_buffer_length = BUFSZ; | 1202 | port->read_urb->transfer_buffer_length = BUFSZ; |
1202 | } | 1203 | } |
1203 | 1204 | ||
1204 | INIT_WORK(&priv->rx_work, ftdi_process_read, port); | 1205 | INIT_DELAYED_WORK(&priv->rx_work, ftdi_process_read); |
1206 | priv->port = port; | ||
1205 | 1207 | ||
1206 | /* Free port's existing write urb and transfer buffer. */ | 1208 | /* Free port's existing write urb and transfer buffer. */ |
1207 | if (port->write_urb) { | 1209 | if (port->write_urb) { |
@@ -1640,17 +1642,18 @@ static void ftdi_read_bulk_callback (struct urb *urb) | |||
1640 | priv->rx_bytes += countread; | 1642 | priv->rx_bytes += countread; |
1641 | spin_unlock_irqrestore(&priv->rx_lock, flags); | 1643 | spin_unlock_irqrestore(&priv->rx_lock, flags); |
1642 | 1644 | ||
1643 | ftdi_process_read(port); | 1645 | ftdi_process_read(&priv->rx_work.work); |
1644 | 1646 | ||
1645 | } /* ftdi_read_bulk_callback */ | 1647 | } /* ftdi_read_bulk_callback */ |
1646 | 1648 | ||
1647 | 1649 | ||
1648 | static void ftdi_process_read (void *param) | 1650 | static void ftdi_process_read (struct work_struct *work) |
1649 | { /* ftdi_process_read */ | 1651 | { /* ftdi_process_read */ |
1650 | struct usb_serial_port *port = (struct usb_serial_port*)param; | 1652 | struct ftdi_private *priv = |
1653 | container_of(work, struct ftdi_private, rx_work.work); | ||
1654 | struct usb_serial_port *port = priv->port; | ||
1651 | struct urb *urb; | 1655 | struct urb *urb; |
1652 | struct tty_struct *tty; | 1656 | struct tty_struct *tty; |
1653 | struct ftdi_private *priv; | ||
1654 | char error_flag; | 1657 | char error_flag; |
1655 | unsigned char *data; | 1658 | unsigned char *data; |
1656 | 1659 | ||
@@ -2179,7 +2182,7 @@ static void ftdi_unthrottle (struct usb_serial_port *port) | |||
2179 | spin_unlock_irqrestore(&priv->rx_lock, flags); | 2182 | spin_unlock_irqrestore(&priv->rx_lock, flags); |
2180 | 2183 | ||
2181 | if (actually_throttled) | 2184 | if (actually_throttled) |
2182 | schedule_work(&priv->rx_work); | 2185 | schedule_delayed_work(&priv->rx_work, 0); |
2183 | } | 2186 | } |
2184 | 2187 | ||
2185 | static int __init ftdi_init (void) | 2188 | static int __init ftdi_init (void) |
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 909005107ea2..e09a0bfe6231 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c | |||
@@ -120,6 +120,8 @@ struct keyspan_pda_private { | |||
120 | int tx_throttled; | 120 | int tx_throttled; |
121 | struct work_struct wakeup_work; | 121 | struct work_struct wakeup_work; |
122 | struct work_struct unthrottle_work; | 122 | struct work_struct unthrottle_work; |
123 | struct usb_serial *serial; | ||
124 | struct usb_serial_port *port; | ||
123 | }; | 125 | }; |
124 | 126 | ||
125 | 127 | ||
@@ -175,9 +177,11 @@ static struct usb_device_id id_table_fake_xircom [] = { | |||
175 | }; | 177 | }; |
176 | #endif | 178 | #endif |
177 | 179 | ||
178 | static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) | 180 | static void keyspan_pda_wakeup_write(struct work_struct *work) |
179 | { | 181 | { |
180 | 182 | struct keyspan_pda_private *priv = | |
183 | container_of(work, struct keyspan_pda_private, wakeup_work); | ||
184 | struct usb_serial_port *port = priv->port; | ||
181 | struct tty_struct *tty = port->tty; | 185 | struct tty_struct *tty = port->tty; |
182 | 186 | ||
183 | /* wake up port processes */ | 187 | /* wake up port processes */ |
@@ -187,8 +191,11 @@ static void keyspan_pda_wakeup_write( struct usb_serial_port *port ) | |||
187 | tty_wakeup(tty); | 191 | tty_wakeup(tty); |
188 | } | 192 | } |
189 | 193 | ||
190 | static void keyspan_pda_request_unthrottle( struct usb_serial *serial ) | 194 | static void keyspan_pda_request_unthrottle(struct work_struct *work) |
191 | { | 195 | { |
196 | struct keyspan_pda_private *priv = | ||
197 | container_of(work, struct keyspan_pda_private, unthrottle_work); | ||
198 | struct usb_serial *serial = priv->serial; | ||
192 | int result; | 199 | int result; |
193 | 200 | ||
194 | dbg(" request_unthrottle"); | 201 | dbg(" request_unthrottle"); |
@@ -765,11 +772,10 @@ static int keyspan_pda_startup (struct usb_serial *serial) | |||
765 | return (1); /* error */ | 772 | return (1); /* error */ |
766 | usb_set_serial_port_data(serial->port[0], priv); | 773 | usb_set_serial_port_data(serial->port[0], priv); |
767 | init_waitqueue_head(&serial->port[0]->write_wait); | 774 | init_waitqueue_head(&serial->port[0]->write_wait); |
768 | INIT_WORK(&priv->wakeup_work, (void *)keyspan_pda_wakeup_write, | 775 | INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write); |
769 | (void *)(serial->port[0])); | 776 | INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle); |
770 | INIT_WORK(&priv->unthrottle_work, | 777 | priv->serial = serial; |
771 | (void *)keyspan_pda_request_unthrottle, | 778 | priv->port = serial->port[0]; |
772 | (void *)(serial)); | ||
773 | return (0); | 779 | return (0); |
774 | } | 780 | } |
775 | 781 | ||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index c1257d5292f5..3d5072f14b8d 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -533,9 +533,10 @@ void usb_serial_port_softint(struct usb_serial_port *port) | |||
533 | schedule_work(&port->work); | 533 | schedule_work(&port->work); |
534 | } | 534 | } |
535 | 535 | ||
536 | static void usb_serial_port_work(void *private) | 536 | static void usb_serial_port_work(struct work_struct *work) |
537 | { | 537 | { |
538 | struct usb_serial_port *port = private; | 538 | struct usb_serial_port *port = |
539 | container_of(work, struct usb_serial_port, work); | ||
539 | struct tty_struct *tty; | 540 | struct tty_struct *tty; |
540 | 541 | ||
541 | dbg("%s - port %d", __FUNCTION__, port->number); | 542 | dbg("%s - port %d", __FUNCTION__, port->number); |
@@ -799,7 +800,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
799 | port->serial = serial; | 800 | port->serial = serial; |
800 | spin_lock_init(&port->lock); | 801 | spin_lock_init(&port->lock); |
801 | mutex_init(&port->mutex); | 802 | mutex_init(&port->mutex); |
802 | INIT_WORK(&port->work, usb_serial_port_work, port); | 803 | INIT_WORK(&port->work, usb_serial_port_work); |
803 | serial->port[i] = port; | 804 | serial->port[i] = port; |
804 | } | 805 | } |
805 | 806 | ||
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 4d1cd7aeccd3..154c7d290597 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c | |||
@@ -227,6 +227,7 @@ struct whiteheat_private { | |||
227 | struct list_head rx_urbs_submitted; | 227 | struct list_head rx_urbs_submitted; |
228 | struct list_head rx_urb_q; | 228 | struct list_head rx_urb_q; |
229 | struct work_struct rx_work; | 229 | struct work_struct rx_work; |
230 | struct usb_serial_port *port; | ||
230 | struct list_head tx_urbs_free; | 231 | struct list_head tx_urbs_free; |
231 | struct list_head tx_urbs_submitted; | 232 | struct list_head tx_urbs_submitted; |
232 | }; | 233 | }; |
@@ -241,7 +242,7 @@ static void command_port_read_callback(struct urb *urb); | |||
241 | static int start_port_read(struct usb_serial_port *port); | 242 | static int start_port_read(struct usb_serial_port *port); |
242 | static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); | 243 | static struct whiteheat_urb_wrap *urb_to_wrap(struct urb *urb, struct list_head *head); |
243 | static struct list_head *list_first(struct list_head *head); | 244 | static struct list_head *list_first(struct list_head *head); |
244 | static void rx_data_softint(void *private); | 245 | static void rx_data_softint(struct work_struct *work); |
245 | 246 | ||
246 | static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); | 247 | static int firm_send_command(struct usb_serial_port *port, __u8 command, __u8 *data, __u8 datasize); |
247 | static int firm_open(struct usb_serial_port *port); | 248 | static int firm_open(struct usb_serial_port *port); |
@@ -424,7 +425,8 @@ static int whiteheat_attach (struct usb_serial *serial) | |||
424 | spin_lock_init(&info->lock); | 425 | spin_lock_init(&info->lock); |
425 | info->flags = 0; | 426 | info->flags = 0; |
426 | info->mcr = 0; | 427 | info->mcr = 0; |
427 | INIT_WORK(&info->rx_work, rx_data_softint, port); | 428 | INIT_WORK(&info->rx_work, rx_data_softint); |
429 | info->port = port; | ||
428 | 430 | ||
429 | INIT_LIST_HEAD(&info->rx_urbs_free); | 431 | INIT_LIST_HEAD(&info->rx_urbs_free); |
430 | INIT_LIST_HEAD(&info->rx_urbs_submitted); | 432 | INIT_LIST_HEAD(&info->rx_urbs_submitted); |
@@ -949,7 +951,7 @@ static void whiteheat_unthrottle (struct usb_serial_port *port) | |||
949 | spin_unlock_irqrestore(&info->lock, flags); | 951 | spin_unlock_irqrestore(&info->lock, flags); |
950 | 952 | ||
951 | if (actually_throttled) | 953 | if (actually_throttled) |
952 | rx_data_softint(port); | 954 | rx_data_softint(&info->rx_work); |
953 | 955 | ||
954 | return; | 956 | return; |
955 | } | 957 | } |
@@ -1400,10 +1402,11 @@ static struct list_head *list_first(struct list_head *head) | |||
1400 | } | 1402 | } |
1401 | 1403 | ||
1402 | 1404 | ||
1403 | static void rx_data_softint(void *private) | 1405 | static void rx_data_softint(struct work_struct *work) |
1404 | { | 1406 | { |
1405 | struct usb_serial_port *port = (struct usb_serial_port *)private; | 1407 | struct whiteheat_private *info = |
1406 | struct whiteheat_private *info = usb_get_serial_port_data(port); | 1408 | container_of(work, struct whiteheat_private, rx_work); |
1409 | struct usb_serial_port *port = info->port; | ||
1407 | struct tty_struct *tty = port->tty; | 1410 | struct tty_struct *tty = port->tty; |
1408 | struct whiteheat_urb_wrap *wrap; | 1411 | struct whiteheat_urb_wrap *wrap; |
1409 | struct urb *urb; | 1412 | struct urb *urb; |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 302174b8e477..31f476a64790 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -383,9 +383,9 @@ static void fbcon_update_softback(struct vc_data *vc) | |||
383 | softback_top = 0; | 383 | softback_top = 0; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void fb_flashcursor(void *private) | 386 | static void fb_flashcursor(struct work_struct *work) |
387 | { | 387 | { |
388 | struct fb_info *info = private; | 388 | struct fb_info *info = container_of(work, struct fb_info, queue); |
389 | struct fbcon_ops *ops = info->fbcon_par; | 389 | struct fbcon_ops *ops = info->fbcon_par; |
390 | struct display *p; | 390 | struct display *p; |
391 | struct vc_data *vc = NULL; | 391 | struct vc_data *vc = NULL; |
@@ -442,7 +442,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info) | |||
442 | if ((!info->queue.func || info->queue.func == fb_flashcursor) && | 442 | if ((!info->queue.func || info->queue.func == fb_flashcursor) && |
443 | !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { | 443 | !(ops->flags & FBCON_FLAGS_CURSOR_TIMER)) { |
444 | if (!info->queue.func) | 444 | if (!info->queue.func) |
445 | INIT_WORK(&info->queue, fb_flashcursor, info); | 445 | INIT_WORK(&info->queue, fb_flashcursor); |
446 | 446 | ||
447 | init_timer(&ops->cursor_timer); | 447 | init_timer(&ops->cursor_timer); |
448 | ops->cursor_timer.function = cursor_timer_handler; | 448 | ops->cursor_timer.function = cursor_timer_handler; |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 8a8ae55a7403..38eb0b69c2d7 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
@@ -964,9 +964,10 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) | |||
964 | * Our LCD controller task (which is called when we blank or unblank) | 964 | * Our LCD controller task (which is called when we blank or unblank) |
965 | * via keventd. | 965 | * via keventd. |
966 | */ | 966 | */ |
967 | static void pxafb_task(void *dummy) | 967 | static void pxafb_task(struct work_struct *work) |
968 | { | 968 | { |
969 | struct pxafb_info *fbi = dummy; | 969 | struct pxafb_info *fbi = |
970 | container_of(work, struct pxafb_info, task); | ||
970 | u_int state = xchg(&fbi->task_state, -1); | 971 | u_int state = xchg(&fbi->task_state, -1); |
971 | 972 | ||
972 | set_ctrlr_state(fbi, state); | 973 | set_ctrlr_state(fbi, state); |
@@ -1159,7 +1160,7 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) | |||
1159 | } | 1160 | } |
1160 | 1161 | ||
1161 | init_waitqueue_head(&fbi->ctrlr_wait); | 1162 | init_waitqueue_head(&fbi->ctrlr_wait); |
1162 | INIT_WORK(&fbi->task, pxafb_task, fbi); | 1163 | INIT_WORK(&fbi->task, pxafb_task); |
1163 | init_MUTEX(&fbi->ctrlr_sem); | 1164 | init_MUTEX(&fbi->ctrlr_sem); |
1164 | 1165 | ||
1165 | return fbi; | 1166 | return fbi; |
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 90a79c784549..944273c3dbff 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -110,8 +110,8 @@ struct v9fs_mux_rpc { | |||
110 | }; | 110 | }; |
111 | 111 | ||
112 | static int v9fs_poll_proc(void *); | 112 | static int v9fs_poll_proc(void *); |
113 | static void v9fs_read_work(void *); | 113 | static void v9fs_read_work(struct work_struct *work); |
114 | static void v9fs_write_work(void *); | 114 | static void v9fs_write_work(struct work_struct *work); |
115 | static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, | 115 | static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address, |
116 | poll_table * p); | 116 | poll_table * p); |
117 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); | 117 | static u16 v9fs_mux_get_tag(struct v9fs_mux_data *); |
@@ -297,8 +297,8 @@ struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize, | |||
297 | m->rbuf = NULL; | 297 | m->rbuf = NULL; |
298 | m->wpos = m->wsize = 0; | 298 | m->wpos = m->wsize = 0; |
299 | m->wbuf = NULL; | 299 | m->wbuf = NULL; |
300 | INIT_WORK(&m->rq, v9fs_read_work, m); | 300 | INIT_WORK(&m->rq, v9fs_read_work); |
301 | INIT_WORK(&m->wq, v9fs_write_work, m); | 301 | INIT_WORK(&m->wq, v9fs_write_work); |
302 | m->wsched = 0; | 302 | m->wsched = 0; |
303 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | 303 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); |
304 | m->poll_task = NULL; | 304 | m->poll_task = NULL; |
@@ -458,13 +458,13 @@ static int v9fs_poll_proc(void *a) | |||
458 | /** | 458 | /** |
459 | * v9fs_write_work - called when a transport can send some data | 459 | * v9fs_write_work - called when a transport can send some data |
460 | */ | 460 | */ |
461 | static void v9fs_write_work(void *a) | 461 | static void v9fs_write_work(struct work_struct *work) |
462 | { | 462 | { |
463 | int n, err; | 463 | int n, err; |
464 | struct v9fs_mux_data *m; | 464 | struct v9fs_mux_data *m; |
465 | struct v9fs_req *req; | 465 | struct v9fs_req *req; |
466 | 466 | ||
467 | m = a; | 467 | m = container_of(work, struct v9fs_mux_data, wq); |
468 | 468 | ||
469 | if (m->err < 0) { | 469 | if (m->err < 0) { |
470 | clear_bit(Wworksched, &m->wsched); | 470 | clear_bit(Wworksched, &m->wsched); |
@@ -564,7 +564,7 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |||
564 | /** | 564 | /** |
565 | * v9fs_read_work - called when there is some data to be read from a transport | 565 | * v9fs_read_work - called when there is some data to be read from a transport |
566 | */ | 566 | */ |
567 | static void v9fs_read_work(void *a) | 567 | static void v9fs_read_work(struct work_struct *work) |
568 | { | 568 | { |
569 | int n, err; | 569 | int n, err; |
570 | struct v9fs_mux_data *m; | 570 | struct v9fs_mux_data *m; |
@@ -572,7 +572,7 @@ static void v9fs_read_work(void *a) | |||
572 | struct v9fs_fcall *rcall; | 572 | struct v9fs_fcall *rcall; |
573 | char *rbuf; | 573 | char *rbuf; |
574 | 574 | ||
575 | m = a; | 575 | m = container_of(work, struct v9fs_mux_data, rq); |
576 | 576 | ||
577 | if (m->err < 0) | 577 | if (m->err < 0) |
578 | return; | 578 | return; |
@@ -53,13 +53,13 @@ static kmem_cache_t *kioctx_cachep; | |||
53 | static struct workqueue_struct *aio_wq; | 53 | static struct workqueue_struct *aio_wq; |
54 | 54 | ||
55 | /* Used for rare fput completion. */ | 55 | /* Used for rare fput completion. */ |
56 | static void aio_fput_routine(void *); | 56 | static void aio_fput_routine(struct work_struct *); |
57 | static DECLARE_WORK(fput_work, aio_fput_routine, NULL); | 57 | static DECLARE_WORK(fput_work, aio_fput_routine); |
58 | 58 | ||
59 | static DEFINE_SPINLOCK(fput_lock); | 59 | static DEFINE_SPINLOCK(fput_lock); |
60 | static LIST_HEAD(fput_head); | 60 | static LIST_HEAD(fput_head); |
61 | 61 | ||
62 | static void aio_kick_handler(void *); | 62 | static void aio_kick_handler(struct work_struct *); |
63 | static void aio_queue_work(struct kioctx *); | 63 | static void aio_queue_work(struct kioctx *); |
64 | 64 | ||
65 | /* aio_setup | 65 | /* aio_setup |
@@ -227,7 +227,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
227 | 227 | ||
228 | INIT_LIST_HEAD(&ctx->active_reqs); | 228 | INIT_LIST_HEAD(&ctx->active_reqs); |
229 | INIT_LIST_HEAD(&ctx->run_list); | 229 | INIT_LIST_HEAD(&ctx->run_list); |
230 | INIT_WORK(&ctx->wq, aio_kick_handler, ctx); | 230 | INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler); |
231 | 231 | ||
232 | if (aio_setup_ring(ctx) < 0) | 232 | if (aio_setup_ring(ctx) < 0) |
233 | goto out_freectx; | 233 | goto out_freectx; |
@@ -469,7 +469,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) | |||
469 | wake_up(&ctx->wait); | 469 | wake_up(&ctx->wait); |
470 | } | 470 | } |
471 | 471 | ||
472 | static void aio_fput_routine(void *data) | 472 | static void aio_fput_routine(struct work_struct *data) |
473 | { | 473 | { |
474 | spin_lock_irq(&fput_lock); | 474 | spin_lock_irq(&fput_lock); |
475 | while (likely(!list_empty(&fput_head))) { | 475 | while (likely(!list_empty(&fput_head))) { |
@@ -857,9 +857,9 @@ static inline void aio_run_all_iocbs(struct kioctx *ctx) | |||
857 | * space. | 857 | * space. |
858 | * Run on aiod's context. | 858 | * Run on aiod's context. |
859 | */ | 859 | */ |
860 | static void aio_kick_handler(void *data) | 860 | static void aio_kick_handler(struct work_struct *work) |
861 | { | 861 | { |
862 | struct kioctx *ctx = data; | 862 | struct kioctx *ctx = container_of(work, struct kioctx, wq.work); |
863 | mm_segment_t oldfs = get_fs(); | 863 | mm_segment_t oldfs = get_fs(); |
864 | int requeue; | 864 | int requeue; |
865 | 865 | ||
@@ -874,7 +874,7 @@ static void aio_kick_handler(void *data) | |||
874 | * we're in a worker thread already, don't use queue_delayed_work, | 874 | * we're in a worker thread already, don't use queue_delayed_work, |
875 | */ | 875 | */ |
876 | if (requeue) | 876 | if (requeue) |
877 | queue_work(aio_wq, &ctx->wq); | 877 | queue_delayed_work(aio_wq, &ctx->wq, 0); |
878 | } | 878 | } |
879 | 879 | ||
880 | 880 | ||
@@ -940,16 +940,16 @@ static void bio_release_pages(struct bio *bio) | |||
940 | * run one bio_put() against the BIO. | 940 | * run one bio_put() against the BIO. |
941 | */ | 941 | */ |
942 | 942 | ||
943 | static void bio_dirty_fn(void *data); | 943 | static void bio_dirty_fn(struct work_struct *work); |
944 | 944 | ||
945 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn, NULL); | 945 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
946 | static DEFINE_SPINLOCK(bio_dirty_lock); | 946 | static DEFINE_SPINLOCK(bio_dirty_lock); |
947 | static struct bio *bio_dirty_list; | 947 | static struct bio *bio_dirty_list; |
948 | 948 | ||
949 | /* | 949 | /* |
950 | * This runs in process context | 950 | * This runs in process context |
951 | */ | 951 | */ |
952 | static void bio_dirty_fn(void *data) | 952 | static void bio_dirty_fn(struct work_struct *work) |
953 | { | 953 | { |
954 | unsigned long flags; | 954 | unsigned long flags; |
955 | struct bio *bio; | 955 | struct bio *bio; |
@@ -91,8 +91,10 @@ out: | |||
91 | spin_unlock(&fddef->lock); | 91 | spin_unlock(&fddef->lock); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void free_fdtable_work(struct fdtable_defer *f) | 94 | static void free_fdtable_work(struct work_struct *work) |
95 | { | 95 | { |
96 | struct fdtable_defer *f = | ||
97 | container_of(work, struct fdtable_defer, wq); | ||
96 | struct fdtable *fdt; | 98 | struct fdtable *fdt; |
97 | 99 | ||
98 | spin_lock_bh(&f->lock); | 100 | spin_lock_bh(&f->lock); |
@@ -351,7 +353,7 @@ static void __devinit fdtable_defer_list_init(int cpu) | |||
351 | { | 353 | { |
352 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); | 354 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); |
353 | spin_lock_init(&fddef->lock); | 355 | spin_lock_init(&fddef->lock); |
354 | INIT_WORK(&fddef->wq, (void (*)(void *))free_fdtable_work, fddef); | 356 | INIT_WORK(&fddef->wq, free_fdtable_work); |
355 | init_timer(&fddef->timer); | 357 | init_timer(&fddef->timer); |
356 | fddef->timer.data = (unsigned long)fddef; | 358 | fddef->timer.data = (unsigned long)fddef; |
357 | fddef->timer.function = fdtable_timer; | 359 | fddef->timer.function = fdtable_timer; |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 78fe0fae23ff..55f5333dae99 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | struct greedy { | 36 | struct greedy { |
37 | struct gfs2_holder gr_gh; | 37 | struct gfs2_holder gr_gh; |
38 | struct work_struct gr_work; | 38 | struct delayed_work gr_work; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct gfs2_gl_hash_bucket { | 41 | struct gfs2_gl_hash_bucket { |
@@ -1368,9 +1368,9 @@ static void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, | |||
1368 | glops->go_xmote_th(gl, state, flags); | 1368 | glops->go_xmote_th(gl, state, flags); |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void greedy_work(void *data) | 1371 | static void greedy_work(struct work_struct *work) |
1372 | { | 1372 | { |
1373 | struct greedy *gr = data; | 1373 | struct greedy *gr = container_of(work, struct greedy, gr_work.work); |
1374 | struct gfs2_holder *gh = &gr->gr_gh; | 1374 | struct gfs2_holder *gh = &gr->gr_gh; |
1375 | struct gfs2_glock *gl = gh->gh_gl; | 1375 | struct gfs2_glock *gl = gh->gh_gl; |
1376 | const struct gfs2_glock_operations *glops = gl->gl_ops; | 1376 | const struct gfs2_glock_operations *glops = gl->gl_ops; |
@@ -1422,7 +1422,7 @@ int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time) | |||
1422 | 1422 | ||
1423 | gfs2_holder_init(gl, 0, 0, gh); | 1423 | gfs2_holder_init(gl, 0, 0, gh); |
1424 | set_bit(HIF_GREEDY, &gh->gh_iflags); | 1424 | set_bit(HIF_GREEDY, &gh->gh_iflags); |
1425 | INIT_WORK(&gr->gr_work, greedy_work, gr); | 1425 | INIT_DELAYED_WORK(&gr->gr_work, greedy_work); |
1426 | 1426 | ||
1427 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); | 1427 | set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags); |
1428 | schedule_delayed_work(&gr->gr_work, time); | 1428 | schedule_delayed_work(&gr->gr_work, time); |
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 42e3bef270c9..72dad552aa00 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c | |||
@@ -577,12 +577,12 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) | |||
577 | server->rcv.ptr = (unsigned char*)&server->rcv.buf; | 577 | server->rcv.ptr = (unsigned char*)&server->rcv.buf; |
578 | server->rcv.len = 10; | 578 | server->rcv.len = 10; |
579 | server->rcv.state = 0; | 579 | server->rcv.state = 0; |
580 | INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc, server); | 580 | INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); |
581 | INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc, server); | 581 | INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); |
582 | sock->sk->sk_write_space = ncp_tcp_write_space; | 582 | sock->sk->sk_write_space = ncp_tcp_write_space; |
583 | } else { | 583 | } else { |
584 | INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc, server); | 584 | INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); |
585 | INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc, server); | 585 | INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); |
586 | server->timeout_tm.data = (unsigned long)server; | 586 | server->timeout_tm.data = (unsigned long)server; |
587 | server->timeout_tm.function = ncpdgram_timeout_call; | 587 | server->timeout_tm.function = ncpdgram_timeout_call; |
588 | } | 588 | } |
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 11c2b252ebed..e496d8b65e92 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c | |||
@@ -350,9 +350,10 @@ static void info_server(struct ncp_server *server, unsigned int id, const void * | |||
350 | } | 350 | } |
351 | } | 351 | } |
352 | 352 | ||
353 | void ncpdgram_rcv_proc(void *s) | 353 | void ncpdgram_rcv_proc(struct work_struct *work) |
354 | { | 354 | { |
355 | struct ncp_server *server = s; | 355 | struct ncp_server *server = |
356 | container_of(work, struct ncp_server, rcv.tq); | ||
356 | struct socket* sock; | 357 | struct socket* sock; |
357 | 358 | ||
358 | sock = server->ncp_sock; | 359 | sock = server->ncp_sock; |
@@ -468,9 +469,10 @@ static void __ncpdgram_timeout_proc(struct ncp_server *server) | |||
468 | } | 469 | } |
469 | } | 470 | } |
470 | 471 | ||
471 | void ncpdgram_timeout_proc(void *s) | 472 | void ncpdgram_timeout_proc(struct work_struct *work) |
472 | { | 473 | { |
473 | struct ncp_server *server = s; | 474 | struct ncp_server *server = |
475 | container_of(work, struct ncp_server, timeout_tq); | ||
474 | mutex_lock(&server->rcv.creq_mutex); | 476 | mutex_lock(&server->rcv.creq_mutex); |
475 | __ncpdgram_timeout_proc(server); | 477 | __ncpdgram_timeout_proc(server); |
476 | mutex_unlock(&server->rcv.creq_mutex); | 478 | mutex_unlock(&server->rcv.creq_mutex); |
@@ -652,18 +654,20 @@ skipdata:; | |||
652 | } | 654 | } |
653 | } | 655 | } |
654 | 656 | ||
655 | void ncp_tcp_rcv_proc(void *s) | 657 | void ncp_tcp_rcv_proc(struct work_struct *work) |
656 | { | 658 | { |
657 | struct ncp_server *server = s; | 659 | struct ncp_server *server = |
660 | container_of(work, struct ncp_server, rcv.tq); | ||
658 | 661 | ||
659 | mutex_lock(&server->rcv.creq_mutex); | 662 | mutex_lock(&server->rcv.creq_mutex); |
660 | __ncptcp_rcv_proc(server); | 663 | __ncptcp_rcv_proc(server); |
661 | mutex_unlock(&server->rcv.creq_mutex); | 664 | mutex_unlock(&server->rcv.creq_mutex); |
662 | } | 665 | } |
663 | 666 | ||
664 | void ncp_tcp_tx_proc(void *s) | 667 | void ncp_tcp_tx_proc(struct work_struct *work) |
665 | { | 668 | { |
666 | struct ncp_server *server = s; | 669 | struct ncp_server *server = |
670 | container_of(work, struct ncp_server, tx.tq); | ||
667 | 671 | ||
668 | mutex_lock(&server->rcv.creq_mutex); | 672 | mutex_lock(&server->rcv.creq_mutex); |
669 | __ncptcp_try_send(server); | 673 | __ncptcp_try_send(server); |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 5fea638743e4..23ab145daa2d 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -143,7 +143,7 @@ static struct nfs_client *nfs_alloc_client(const char *hostname, | |||
143 | INIT_LIST_HEAD(&clp->cl_state_owners); | 143 | INIT_LIST_HEAD(&clp->cl_state_owners); |
144 | INIT_LIST_HEAD(&clp->cl_unused); | 144 | INIT_LIST_HEAD(&clp->cl_unused); |
145 | spin_lock_init(&clp->cl_lock); | 145 | spin_lock_init(&clp->cl_lock); |
146 | INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp); | 146 | INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); |
147 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); | 147 | rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); |
148 | clp->cl_boot_time = CURRENT_TIME; | 148 | clp->cl_boot_time = CURRENT_TIME; |
149 | clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; | 149 | clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index ec1114b33d89..371b804e7cc8 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -18,10 +18,10 @@ | |||
18 | 18 | ||
19 | #define NFSDBG_FACILITY NFSDBG_VFS | 19 | #define NFSDBG_FACILITY NFSDBG_VFS |
20 | 20 | ||
21 | static void nfs_expire_automounts(void *list); | 21 | static void nfs_expire_automounts(struct work_struct *work); |
22 | 22 | ||
23 | LIST_HEAD(nfs_automount_list); | 23 | LIST_HEAD(nfs_automount_list); |
24 | static DECLARE_WORK(nfs_automount_task, nfs_expire_automounts, &nfs_automount_list); | 24 | static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); |
25 | int nfs_mountpoint_expiry_timeout = 500 * HZ; | 25 | int nfs_mountpoint_expiry_timeout = 500 * HZ; |
26 | 26 | ||
27 | static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, | 27 | static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent, |
@@ -164,9 +164,9 @@ struct inode_operations nfs_referral_inode_operations = { | |||
164 | .follow_link = nfs_follow_mountpoint, | 164 | .follow_link = nfs_follow_mountpoint, |
165 | }; | 165 | }; |
166 | 166 | ||
167 | static void nfs_expire_automounts(void *data) | 167 | static void nfs_expire_automounts(struct work_struct *work) |
168 | { | 168 | { |
169 | struct list_head *list = (struct list_head *)data; | 169 | struct list_head *list = &nfs_automount_list; |
170 | 170 | ||
171 | mark_mounts_for_expiry(list); | 171 | mark_mounts_for_expiry(list); |
172 | if (!list_empty(list)) | 172 | if (!list_empty(list)) |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 6f346677332d..c26cd978c7cc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -185,7 +185,7 @@ extern const u32 nfs4_fs_locations_bitmap[2]; | |||
185 | extern void nfs4_schedule_state_renewal(struct nfs_client *); | 185 | extern void nfs4_schedule_state_renewal(struct nfs_client *); |
186 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); | 186 | extern void nfs4_renewd_prepare_shutdown(struct nfs_server *); |
187 | extern void nfs4_kill_renewd(struct nfs_client *); | 187 | extern void nfs4_kill_renewd(struct nfs_client *); |
188 | extern void nfs4_renew_state(void *); | 188 | extern void nfs4_renew_state(struct work_struct *); |
189 | 189 | ||
190 | /* nfs4state.c */ | 190 | /* nfs4state.c */ |
191 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); | 191 | struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp); |
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 7b6df1852e75..823298561c0a 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c | |||
@@ -59,9 +59,10 @@ | |||
59 | #define NFSDBG_FACILITY NFSDBG_PROC | 59 | #define NFSDBG_FACILITY NFSDBG_PROC |
60 | 60 | ||
61 | void | 61 | void |
62 | nfs4_renew_state(void *data) | 62 | nfs4_renew_state(struct work_struct *work) |
63 | { | 63 | { |
64 | struct nfs_client *clp = (struct nfs_client *)data; | 64 | struct nfs_client *clp = |
65 | container_of(work, struct nfs_client, cl_renewd.work); | ||
65 | struct rpc_cred *cred; | 66 | struct rpc_cred *cred; |
66 | long lease, timeout; | 67 | long lease, timeout; |
67 | unsigned long last, now; | 68 | unsigned long last, now; |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 293b6495829f..e431e93ab503 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -1829,9 +1829,8 @@ out: | |||
1829 | } | 1829 | } |
1830 | 1830 | ||
1831 | static struct workqueue_struct *laundry_wq; | 1831 | static struct workqueue_struct *laundry_wq; |
1832 | static struct work_struct laundromat_work; | 1832 | static void laundromat_main(struct work_struct *); |
1833 | static void laundromat_main(void *); | 1833 | static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main); |
1834 | static DECLARE_WORK(laundromat_work, laundromat_main, NULL); | ||
1835 | 1834 | ||
1836 | __be32 | 1835 | __be32 |
1837 | nfsd4_renew(clientid_t *clid) | 1836 | nfsd4_renew(clientid_t *clid) |
@@ -1940,7 +1939,7 @@ nfs4_laundromat(void) | |||
1940 | } | 1939 | } |
1941 | 1940 | ||
1942 | void | 1941 | void |
1943 | laundromat_main(void *not_used) | 1942 | laundromat_main(struct work_struct *not_used) |
1944 | { | 1943 | { |
1945 | time_t t; | 1944 | time_t t; |
1946 | 1945 | ||
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 85a048b7a67b..edc91ca3792a 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -1197,10 +1197,12 @@ int ocfs2_flush_truncate_log(struct ocfs2_super *osb) | |||
1197 | return status; | 1197 | return status; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | static void ocfs2_truncate_log_worker(void *data) | 1200 | static void ocfs2_truncate_log_worker(struct work_struct *work) |
1201 | { | 1201 | { |
1202 | int status; | 1202 | int status; |
1203 | struct ocfs2_super *osb = data; | 1203 | struct ocfs2_super *osb = |
1204 | container_of(work, struct ocfs2_super, | ||
1205 | osb_truncate_log_wq.work); | ||
1204 | 1206 | ||
1205 | mlog_entry_void(); | 1207 | mlog_entry_void(); |
1206 | 1208 | ||
@@ -1432,7 +1434,8 @@ int ocfs2_truncate_log_init(struct ocfs2_super *osb) | |||
1432 | /* ocfs2_truncate_log_shutdown keys on the existence of | 1434 | /* ocfs2_truncate_log_shutdown keys on the existence of |
1433 | * osb->osb_tl_inode so we don't set any of the osb variables | 1435 | * osb->osb_tl_inode so we don't set any of the osb variables |
1434 | * until we're sure all is well. */ | 1436 | * until we're sure all is well. */ |
1435 | INIT_WORK(&osb->osb_truncate_log_wq, ocfs2_truncate_log_worker, osb); | 1437 | INIT_DELAYED_WORK(&osb->osb_truncate_log_wq, |
1438 | ocfs2_truncate_log_worker); | ||
1436 | osb->osb_tl_bh = tl_bh; | 1439 | osb->osb_tl_bh = tl_bh; |
1437 | osb->osb_tl_inode = tl_inode; | 1440 | osb->osb_tl_inode = tl_inode; |
1438 | 1441 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 305cba3681fe..4cd9a9580456 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -141,7 +141,7 @@ struct o2hb_region { | |||
141 | * recognizes a node going up and down in one iteration */ | 141 | * recognizes a node going up and down in one iteration */ |
142 | u64 hr_generation; | 142 | u64 hr_generation; |
143 | 143 | ||
144 | struct work_struct hr_write_timeout_work; | 144 | struct delayed_work hr_write_timeout_work; |
145 | unsigned long hr_last_timeout_start; | 145 | unsigned long hr_last_timeout_start; |
146 | 146 | ||
147 | /* Used during o2hb_check_slot to hold a copy of the block | 147 | /* Used during o2hb_check_slot to hold a copy of the block |
@@ -156,9 +156,11 @@ struct o2hb_bio_wait_ctxt { | |||
156 | int wc_error; | 156 | int wc_error; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | static void o2hb_write_timeout(void *arg) | 159 | static void o2hb_write_timeout(struct work_struct *work) |
160 | { | 160 | { |
161 | struct o2hb_region *reg = arg; | 161 | struct o2hb_region *reg = |
162 | container_of(work, struct o2hb_region, | ||
163 | hr_write_timeout_work.work); | ||
162 | 164 | ||
163 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " | 165 | mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u " |
164 | "milliseconds\n", reg->hr_dev_name, | 166 | "milliseconds\n", reg->hr_dev_name, |
@@ -1404,7 +1406,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1404 | goto out; | 1406 | goto out; |
1405 | } | 1407 | } |
1406 | 1408 | ||
1407 | INIT_WORK(®->hr_write_timeout_work, o2hb_write_timeout, reg); | 1409 | INIT_DELAYED_WORK(®->hr_write_timeout_work, o2hb_write_timeout); |
1408 | 1410 | ||
1409 | /* | 1411 | /* |
1410 | * A node is considered live after it has beat LIVE_THRESHOLD | 1412 | * A node is considered live after it has beat LIVE_THRESHOLD |
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 7bba98fbfc15..4705d659fe57 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c | |||
@@ -88,7 +88,7 @@ void o2quo_disk_timeout(void) | |||
88 | o2quo_fence_self(); | 88 | o2quo_fence_self(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void o2quo_make_decision(void *arg) | 91 | static void o2quo_make_decision(struct work_struct *work) |
92 | { | 92 | { |
93 | int quorum; | 93 | int quorum; |
94 | int lowest_hb, lowest_reachable = 0, fence = 0; | 94 | int lowest_hb, lowest_reachable = 0, fence = 0; |
@@ -306,7 +306,7 @@ void o2quo_init(void) | |||
306 | struct o2quo_state *qs = &o2quo_state; | 306 | struct o2quo_state *qs = &o2quo_state; |
307 | 307 | ||
308 | spin_lock_init(&qs->qs_lock); | 308 | spin_lock_init(&qs->qs_lock); |
309 | INIT_WORK(&qs->qs_work, o2quo_make_decision, NULL); | 309 | INIT_WORK(&qs->qs_work, o2quo_make_decision); |
310 | } | 310 | } |
311 | 311 | ||
312 | void o2quo_exit(void) | 312 | void o2quo_exit(void) |
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index b650efa8c8be..9b3209dc0b16 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c | |||
@@ -140,11 +140,11 @@ static int o2net_sys_err_translations[O2NET_ERR_MAX] = | |||
140 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; | 140 | [O2NET_ERR_DIED] = -EHOSTDOWN,}; |
141 | 141 | ||
142 | /* can't quite avoid *all* internal declarations :/ */ | 142 | /* can't quite avoid *all* internal declarations :/ */ |
143 | static void o2net_sc_connect_completed(void *arg); | 143 | static void o2net_sc_connect_completed(struct work_struct *work); |
144 | static void o2net_rx_until_empty(void *arg); | 144 | static void o2net_rx_until_empty(struct work_struct *work); |
145 | static void o2net_shutdown_sc(void *arg); | 145 | static void o2net_shutdown_sc(struct work_struct *work); |
146 | static void o2net_listen_data_ready(struct sock *sk, int bytes); | 146 | static void o2net_listen_data_ready(struct sock *sk, int bytes); |
147 | static void o2net_sc_send_keep_req(void *arg); | 147 | static void o2net_sc_send_keep_req(struct work_struct *work); |
148 | static void o2net_idle_timer(unsigned long data); | 148 | static void o2net_idle_timer(unsigned long data); |
149 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); | 149 | static void o2net_sc_postpone_idle(struct o2net_sock_container *sc); |
150 | 150 | ||
@@ -308,10 +308,10 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) | |||
308 | o2nm_node_get(node); | 308 | o2nm_node_get(node); |
309 | sc->sc_node = node; | 309 | sc->sc_node = node; |
310 | 310 | ||
311 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed, sc); | 311 | INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); |
312 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty, sc); | 312 | INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); |
313 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc, sc); | 313 | INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); |
314 | INIT_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req, sc); | 314 | INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req); |
315 | 315 | ||
316 | init_timer(&sc->sc_idle_timeout); | 316 | init_timer(&sc->sc_idle_timeout); |
317 | sc->sc_idle_timeout.function = o2net_idle_timer; | 317 | sc->sc_idle_timeout.function = o2net_idle_timer; |
@@ -342,7 +342,7 @@ static void o2net_sc_queue_work(struct o2net_sock_container *sc, | |||
342 | sc_put(sc); | 342 | sc_put(sc); |
343 | } | 343 | } |
344 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, | 344 | static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, |
345 | struct work_struct *work, | 345 | struct delayed_work *work, |
346 | int delay) | 346 | int delay) |
347 | { | 347 | { |
348 | sc_get(sc); | 348 | sc_get(sc); |
@@ -350,7 +350,7 @@ static void o2net_sc_queue_delayed_work(struct o2net_sock_container *sc, | |||
350 | sc_put(sc); | 350 | sc_put(sc); |
351 | } | 351 | } |
352 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, | 352 | static void o2net_sc_cancel_delayed_work(struct o2net_sock_container *sc, |
353 | struct work_struct *work) | 353 | struct delayed_work *work) |
354 | { | 354 | { |
355 | if (cancel_delayed_work(work)) | 355 | if (cancel_delayed_work(work)) |
356 | sc_put(sc); | 356 | sc_put(sc); |
@@ -564,9 +564,11 @@ static void o2net_ensure_shutdown(struct o2net_node *nn, | |||
564 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state | 564 | * ourselves as state_change couldn't get the nn_lock and call set_nn_state |
565 | * itself. | 565 | * itself. |
566 | */ | 566 | */ |
567 | static void o2net_shutdown_sc(void *arg) | 567 | static void o2net_shutdown_sc(struct work_struct *work) |
568 | { | 568 | { |
569 | struct o2net_sock_container *sc = arg; | 569 | struct o2net_sock_container *sc = |
570 | container_of(work, struct o2net_sock_container, | ||
571 | sc_shutdown_work); | ||
570 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); | 572 | struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); |
571 | 573 | ||
572 | sclog(sc, "shutting down\n"); | 574 | sclog(sc, "shutting down\n"); |
@@ -1201,9 +1203,10 @@ out: | |||
1201 | /* this work func is triggerd by data ready. it reads until it can read no | 1203 | /* this work func is triggerd by data ready. it reads until it can read no |
1202 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing | 1204 | * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing |
1203 | * our work the work struct will be marked and we'll be called again. */ | 1205 | * our work the work struct will be marked and we'll be called again. */ |
1204 | static void o2net_rx_until_empty(void *arg) | 1206 | static void o2net_rx_until_empty(struct work_struct *work) |
1205 | { | 1207 | { |
1206 | struct o2net_sock_container *sc = arg; | 1208 | struct o2net_sock_container *sc = |
1209 | container_of(work, struct o2net_sock_container, sc_rx_work); | ||
1207 | int ret; | 1210 | int ret; |
1208 | 1211 | ||
1209 | do { | 1212 | do { |
@@ -1249,9 +1252,11 @@ static int o2net_set_nodelay(struct socket *sock) | |||
1249 | 1252 | ||
1250 | /* called when a connect completes and after a sock is accepted. the | 1253 | /* called when a connect completes and after a sock is accepted. the |
1251 | * rx path will see the response and mark the sc valid */ | 1254 | * rx path will see the response and mark the sc valid */ |
1252 | static void o2net_sc_connect_completed(void *arg) | 1255 | static void o2net_sc_connect_completed(struct work_struct *work) |
1253 | { | 1256 | { |
1254 | struct o2net_sock_container *sc = arg; | 1257 | struct o2net_sock_container *sc = |
1258 | container_of(work, struct o2net_sock_container, | ||
1259 | sc_connect_work); | ||
1255 | 1260 | ||
1256 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", | 1261 | mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n", |
1257 | (unsigned long long)O2NET_PROTOCOL_VERSION, | 1262 | (unsigned long long)O2NET_PROTOCOL_VERSION, |
@@ -1262,9 +1267,11 @@ static void o2net_sc_connect_completed(void *arg) | |||
1262 | } | 1267 | } |
1263 | 1268 | ||
1264 | /* this is called as a work_struct func. */ | 1269 | /* this is called as a work_struct func. */ |
1265 | static void o2net_sc_send_keep_req(void *arg) | 1270 | static void o2net_sc_send_keep_req(struct work_struct *work) |
1266 | { | 1271 | { |
1267 | struct o2net_sock_container *sc = arg; | 1272 | struct o2net_sock_container *sc = |
1273 | container_of(work, struct o2net_sock_container, | ||
1274 | sc_keepalive_work.work); | ||
1268 | 1275 | ||
1269 | o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); | 1276 | o2net_sendpage(sc, o2net_keep_req, sizeof(*o2net_keep_req)); |
1270 | sc_put(sc); | 1277 | sc_put(sc); |
@@ -1314,14 +1321,15 @@ static void o2net_sc_postpone_idle(struct o2net_sock_container *sc) | |||
1314 | * having a connect attempt fail, etc. This centralizes the logic which decides | 1321 | * having a connect attempt fail, etc. This centralizes the logic which decides |
1315 | * if a connect attempt should be made or if we should give up and all future | 1322 | * if a connect attempt should be made or if we should give up and all future |
1316 | * transmit attempts should fail */ | 1323 | * transmit attempts should fail */ |
1317 | static void o2net_start_connect(void *arg) | 1324 | static void o2net_start_connect(struct work_struct *work) |
1318 | { | 1325 | { |
1319 | struct o2net_node *nn = arg; | 1326 | struct o2net_node *nn = |
1327 | container_of(work, struct o2net_node, nn_connect_work.work); | ||
1320 | struct o2net_sock_container *sc = NULL; | 1328 | struct o2net_sock_container *sc = NULL; |
1321 | struct o2nm_node *node = NULL, *mynode = NULL; | 1329 | struct o2nm_node *node = NULL, *mynode = NULL; |
1322 | struct socket *sock = NULL; | 1330 | struct socket *sock = NULL; |
1323 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; | 1331 | struct sockaddr_in myaddr = {0, }, remoteaddr = {0, }; |
1324 | int ret = 0; | 1332 | int ret = 0, stop; |
1325 | 1333 | ||
1326 | /* if we're greater we initiate tx, otherwise we accept */ | 1334 | /* if we're greater we initiate tx, otherwise we accept */ |
1327 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) | 1335 | if (o2nm_this_node() <= o2net_num_from_nn(nn)) |
@@ -1342,10 +1350,9 @@ static void o2net_start_connect(void *arg) | |||
1342 | 1350 | ||
1343 | spin_lock(&nn->nn_lock); | 1351 | spin_lock(&nn->nn_lock); |
1344 | /* see if we already have one pending or have given up */ | 1352 | /* see if we already have one pending or have given up */ |
1345 | if (nn->nn_sc || nn->nn_persistent_error) | 1353 | stop = (nn->nn_sc || nn->nn_persistent_error); |
1346 | arg = NULL; | ||
1347 | spin_unlock(&nn->nn_lock); | 1354 | spin_unlock(&nn->nn_lock); |
1348 | if (arg == NULL) /* *shrug*, needed some indicator */ | 1355 | if (stop) |
1349 | goto out; | 1356 | goto out; |
1350 | 1357 | ||
1351 | nn->nn_last_connect_attempt = jiffies; | 1358 | nn->nn_last_connect_attempt = jiffies; |
@@ -1421,9 +1428,10 @@ out: | |||
1421 | return; | 1428 | return; |
1422 | } | 1429 | } |
1423 | 1430 | ||
1424 | static void o2net_connect_expired(void *arg) | 1431 | static void o2net_connect_expired(struct work_struct *work) |
1425 | { | 1432 | { |
1426 | struct o2net_node *nn = arg; | 1433 | struct o2net_node *nn = |
1434 | container_of(work, struct o2net_node, nn_connect_expired.work); | ||
1427 | 1435 | ||
1428 | spin_lock(&nn->nn_lock); | 1436 | spin_lock(&nn->nn_lock); |
1429 | if (!nn->nn_sc_valid) { | 1437 | if (!nn->nn_sc_valid) { |
@@ -1436,9 +1444,10 @@ static void o2net_connect_expired(void *arg) | |||
1436 | spin_unlock(&nn->nn_lock); | 1444 | spin_unlock(&nn->nn_lock); |
1437 | } | 1445 | } |
1438 | 1446 | ||
1439 | static void o2net_still_up(void *arg) | 1447 | static void o2net_still_up(struct work_struct *work) |
1440 | { | 1448 | { |
1441 | struct o2net_node *nn = arg; | 1449 | struct o2net_node *nn = |
1450 | container_of(work, struct o2net_node, nn_still_up.work); | ||
1442 | 1451 | ||
1443 | o2quo_hb_still_up(o2net_num_from_nn(nn)); | 1452 | o2quo_hb_still_up(o2net_num_from_nn(nn)); |
1444 | } | 1453 | } |
@@ -1644,9 +1653,9 @@ out: | |||
1644 | return ret; | 1653 | return ret; |
1645 | } | 1654 | } |
1646 | 1655 | ||
1647 | static void o2net_accept_many(void *arg) | 1656 | static void o2net_accept_many(struct work_struct *work) |
1648 | { | 1657 | { |
1649 | struct socket *sock = arg; | 1658 | struct socket *sock = o2net_listen_sock; |
1650 | while (o2net_accept_one(sock) == 0) | 1659 | while (o2net_accept_one(sock) == 0) |
1651 | cond_resched(); | 1660 | cond_resched(); |
1652 | } | 1661 | } |
@@ -1700,7 +1709,7 @@ static int o2net_open_listening_sock(__be16 port) | |||
1700 | write_unlock_bh(&sock->sk->sk_callback_lock); | 1709 | write_unlock_bh(&sock->sk->sk_callback_lock); |
1701 | 1710 | ||
1702 | o2net_listen_sock = sock; | 1711 | o2net_listen_sock = sock; |
1703 | INIT_WORK(&o2net_listen_work, o2net_accept_many, sock); | 1712 | INIT_WORK(&o2net_listen_work, o2net_accept_many); |
1704 | 1713 | ||
1705 | sock->sk->sk_reuse = 1; | 1714 | sock->sk->sk_reuse = 1; |
1706 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); | 1715 | ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); |
@@ -1819,9 +1828,10 @@ int o2net_init(void) | |||
1819 | struct o2net_node *nn = o2net_nn_from_num(i); | 1828 | struct o2net_node *nn = o2net_nn_from_num(i); |
1820 | 1829 | ||
1821 | spin_lock_init(&nn->nn_lock); | 1830 | spin_lock_init(&nn->nn_lock); |
1822 | INIT_WORK(&nn->nn_connect_work, o2net_start_connect, nn); | 1831 | INIT_DELAYED_WORK(&nn->nn_connect_work, o2net_start_connect); |
1823 | INIT_WORK(&nn->nn_connect_expired, o2net_connect_expired, nn); | 1832 | INIT_DELAYED_WORK(&nn->nn_connect_expired, |
1824 | INIT_WORK(&nn->nn_still_up, o2net_still_up, nn); | 1833 | o2net_connect_expired); |
1834 | INIT_DELAYED_WORK(&nn->nn_still_up, o2net_still_up); | ||
1825 | /* until we see hb from a node we'll return einval */ | 1835 | /* until we see hb from a node we'll return einval */ |
1826 | nn->nn_persistent_error = -ENOTCONN; | 1836 | nn->nn_persistent_error = -ENOTCONN; |
1827 | init_waitqueue_head(&nn->nn_sc_wq); | 1837 | init_waitqueue_head(&nn->nn_sc_wq); |
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 4b46aac7d243..daebbd3a2c8c 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h | |||
@@ -86,18 +86,18 @@ struct o2net_node { | |||
86 | * connect attempt fails and so can be self-arming. shutdown is | 86 | * connect attempt fails and so can be self-arming. shutdown is |
87 | * careful to first mark the nn such that no connects will be attempted | 87 | * careful to first mark the nn such that no connects will be attempted |
88 | * before canceling delayed connect work and flushing the queue. */ | 88 | * before canceling delayed connect work and flushing the queue. */ |
89 | struct work_struct nn_connect_work; | 89 | struct delayed_work nn_connect_work; |
90 | unsigned long nn_last_connect_attempt; | 90 | unsigned long nn_last_connect_attempt; |
91 | 91 | ||
92 | /* this is queued as nodes come up and is canceled when a connection is | 92 | /* this is queued as nodes come up and is canceled when a connection is |
93 | * established. this expiring gives up on the node and errors out | 93 | * established. this expiring gives up on the node and errors out |
94 | * transmits */ | 94 | * transmits */ |
95 | struct work_struct nn_connect_expired; | 95 | struct delayed_work nn_connect_expired; |
96 | 96 | ||
97 | /* after we give up on a socket we wait a while before deciding | 97 | /* after we give up on a socket we wait a while before deciding |
98 | * that it is still heartbeating and that we should do some | 98 | * that it is still heartbeating and that we should do some |
99 | * quorum work */ | 99 | * quorum work */ |
100 | struct work_struct nn_still_up; | 100 | struct delayed_work nn_still_up; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | struct o2net_sock_container { | 103 | struct o2net_sock_container { |
@@ -129,7 +129,7 @@ struct o2net_sock_container { | |||
129 | struct work_struct sc_shutdown_work; | 129 | struct work_struct sc_shutdown_work; |
130 | 130 | ||
131 | struct timer_list sc_idle_timeout; | 131 | struct timer_list sc_idle_timeout; |
132 | struct work_struct sc_keepalive_work; | 132 | struct delayed_work sc_keepalive_work; |
133 | 133 | ||
134 | unsigned sc_handshake_ok:1; | 134 | unsigned sc_handshake_ok:1; |
135 | 135 | ||
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index fa968180b072..6b6ff76538c5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -153,7 +153,7 @@ static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned | |||
153 | * called functions that cannot be directly called from the | 153 | * called functions that cannot be directly called from the |
154 | * net message handlers for some reason, usually because | 154 | * net message handlers for some reason, usually because |
155 | * they need to send net messages of their own. */ | 155 | * they need to send net messages of their own. */ |
156 | void dlm_dispatch_work(void *data); | 156 | void dlm_dispatch_work(struct work_struct *work); |
157 | 157 | ||
158 | struct dlm_lock_resource; | 158 | struct dlm_lock_resource; |
159 | struct dlm_work_item; | 159 | struct dlm_work_item; |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index f6cdab3a2c6a..420a375a3949 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -1297,7 +1297,7 @@ static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | |||
1297 | 1297 | ||
1298 | spin_lock_init(&dlm->work_lock); | 1298 | spin_lock_init(&dlm->work_lock); |
1299 | INIT_LIST_HEAD(&dlm->work_list); | 1299 | INIT_LIST_HEAD(&dlm->work_list); |
1300 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); | 1300 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); |
1301 | 1301 | ||
1302 | kref_init(&dlm->dlm_refs); | 1302 | kref_init(&dlm->dlm_refs); |
1303 | dlm->dlm_state = DLM_CTXT_NEW; | 1303 | dlm->dlm_state = DLM_CTXT_NEW; |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 9d950d7cea38..fb3e2b0817f1 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -153,9 +153,10 @@ static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | |||
153 | } | 153 | } |
154 | 154 | ||
155 | /* Worker function used during recovery. */ | 155 | /* Worker function used during recovery. */ |
156 | void dlm_dispatch_work(void *data) | 156 | void dlm_dispatch_work(struct work_struct *work) |
157 | { | 157 | { |
158 | struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; | 158 | struct dlm_ctxt *dlm = |
159 | container_of(work, struct dlm_ctxt, dispatched_work); | ||
159 | LIST_HEAD(tmp_list); | 160 | LIST_HEAD(tmp_list); |
160 | struct list_head *iter, *iter2; | 161 | struct list_head *iter, *iter2; |
161 | struct dlm_work_item *item; | 162 | struct dlm_work_item *item; |
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c index eead48bbfac6..7d2f578b267d 100644 --- a/fs/ocfs2/dlm/userdlm.c +++ b/fs/ocfs2/dlm/userdlm.c | |||
@@ -171,15 +171,14 @@ static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) | |||
171 | BUG(); | 171 | BUG(); |
172 | } | 172 | } |
173 | 173 | ||
174 | static void user_dlm_unblock_lock(void *opaque); | 174 | static void user_dlm_unblock_lock(struct work_struct *work); |
175 | 175 | ||
176 | static void __user_dlm_queue_lockres(struct user_lock_res *lockres) | 176 | static void __user_dlm_queue_lockres(struct user_lock_res *lockres) |
177 | { | 177 | { |
178 | if (!(lockres->l_flags & USER_LOCK_QUEUED)) { | 178 | if (!(lockres->l_flags & USER_LOCK_QUEUED)) { |
179 | user_dlm_grab_inode_ref(lockres); | 179 | user_dlm_grab_inode_ref(lockres); |
180 | 180 | ||
181 | INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, | 181 | INIT_WORK(&lockres->l_work, user_dlm_unblock_lock); |
182 | lockres); | ||
183 | 182 | ||
184 | queue_work(user_dlm_worker, &lockres->l_work); | 183 | queue_work(user_dlm_worker, &lockres->l_work); |
185 | lockres->l_flags |= USER_LOCK_QUEUED; | 184 | lockres->l_flags |= USER_LOCK_QUEUED; |
@@ -279,10 +278,11 @@ static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) | |||
279 | iput(inode); | 278 | iput(inode); |
280 | } | 279 | } |
281 | 280 | ||
282 | static void user_dlm_unblock_lock(void *opaque) | 281 | static void user_dlm_unblock_lock(struct work_struct *work) |
283 | { | 282 | { |
284 | int new_level, status; | 283 | int new_level, status; |
285 | struct user_lock_res *lockres = (struct user_lock_res *) opaque; | 284 | struct user_lock_res *lockres = |
285 | container_of(work, struct user_lock_res, l_work); | ||
286 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | 286 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); |
287 | 287 | ||
288 | mlog(0, "processing lockres %.*s\n", lockres->l_namelen, | 288 | mlog(0, "processing lockres %.*s\n", lockres->l_namelen, |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index c0ad7cb59521..1d7f4ab1e5ed 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -703,11 +703,12 @@ struct ocfs2_la_recovery_item { | |||
703 | * NOTE: This function can and will sleep on recovery of other nodes | 703 | * NOTE: This function can and will sleep on recovery of other nodes |
704 | * during cluster locking, just like any other ocfs2 process. | 704 | * during cluster locking, just like any other ocfs2 process. |
705 | */ | 705 | */ |
706 | void ocfs2_complete_recovery(void *data) | 706 | void ocfs2_complete_recovery(struct work_struct *work) |
707 | { | 707 | { |
708 | int ret; | 708 | int ret; |
709 | struct ocfs2_super *osb = data; | 709 | struct ocfs2_journal *journal = |
710 | struct ocfs2_journal *journal = osb->journal; | 710 | container_of(work, struct ocfs2_journal, j_recovery_work); |
711 | struct ocfs2_super *osb = journal->j_osb; | ||
711 | struct ocfs2_dinode *la_dinode, *tl_dinode; | 712 | struct ocfs2_dinode *la_dinode, *tl_dinode; |
712 | struct ocfs2_la_recovery_item *item; | 713 | struct ocfs2_la_recovery_item *item; |
713 | struct list_head *p, *n; | 714 | struct list_head *p, *n; |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index d86cb960b7ec..899112ad8136 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -133,7 +133,7 @@ static inline void ocfs2_inode_set_new(struct ocfs2_super *osb, | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* Exported only for the journal struct init code in super.c. Do not call. */ | 135 | /* Exported only for the journal struct init code in super.c. Do not call. */ |
136 | void ocfs2_complete_recovery(void *data); | 136 | void ocfs2_complete_recovery(struct work_struct *work); |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * Journal Control: | 139 | * Journal Control: |
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 078883772bd6..b767fd7da6eb 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h | |||
@@ -285,7 +285,7 @@ struct ocfs2_super | |||
285 | /* Truncate log info */ | 285 | /* Truncate log info */ |
286 | struct inode *osb_tl_inode; | 286 | struct inode *osb_tl_inode; |
287 | struct buffer_head *osb_tl_bh; | 287 | struct buffer_head *osb_tl_bh; |
288 | struct work_struct osb_truncate_log_wq; | 288 | struct delayed_work osb_truncate_log_wq; |
289 | 289 | ||
290 | struct ocfs2_node_map osb_recovering_orphan_dirs; | 290 | struct ocfs2_node_map osb_recovering_orphan_dirs; |
291 | unsigned int *osb_orphan_wipes; | 291 | unsigned int *osb_orphan_wipes; |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index b0992573dee2..d9b4214a12da 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1365,7 +1365,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
1365 | spin_lock_init(&journal->j_lock); | 1365 | spin_lock_init(&journal->j_lock); |
1366 | journal->j_trans_id = (unsigned long) 1; | 1366 | journal->j_trans_id = (unsigned long) 1; |
1367 | INIT_LIST_HEAD(&journal->j_la_cleanups); | 1367 | INIT_LIST_HEAD(&journal->j_la_cleanups); |
1368 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery, osb); | 1368 | INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery); |
1369 | journal->j_state = OCFS2_JOURNAL_FREE; | 1369 | journal->j_state = OCFS2_JOURNAL_FREE; |
1370 | 1370 | ||
1371 | /* get some pseudo constants for clustersize bits */ | 1371 | /* get some pseudo constants for clustersize bits */ |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index ac93174c9639..7280a23ef344 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -104,7 +104,7 @@ static int release_journal_dev(struct super_block *super, | |||
104 | struct reiserfs_journal *journal); | 104 | struct reiserfs_journal *journal); |
105 | static int dirty_one_transaction(struct super_block *s, | 105 | static int dirty_one_transaction(struct super_block *s, |
106 | struct reiserfs_journal_list *jl); | 106 | struct reiserfs_journal_list *jl); |
107 | static void flush_async_commits(void *p); | 107 | static void flush_async_commits(struct work_struct *work); |
108 | static void queue_log_writer(struct super_block *s); | 108 | static void queue_log_writer(struct super_block *s); |
109 | 109 | ||
110 | /* values for join in do_journal_begin_r */ | 110 | /* values for join in do_journal_begin_r */ |
@@ -2836,7 +2836,8 @@ int journal_init(struct super_block *p_s_sb, const char *j_dev_name, | |||
2836 | if (reiserfs_mounted_fs_count <= 1) | 2836 | if (reiserfs_mounted_fs_count <= 1) |
2837 | commit_wq = create_workqueue("reiserfs"); | 2837 | commit_wq = create_workqueue("reiserfs"); |
2838 | 2838 | ||
2839 | INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb); | 2839 | INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); |
2840 | journal->j_work_sb = p_s_sb; | ||
2840 | return 0; | 2841 | return 0; |
2841 | free_and_return: | 2842 | free_and_return: |
2842 | free_journal_ram(p_s_sb); | 2843 | free_journal_ram(p_s_sb); |
@@ -3447,10 +3448,11 @@ int journal_end_sync(struct reiserfs_transaction_handle *th, | |||
3447 | /* | 3448 | /* |
3448 | ** writeback the pending async commits to disk | 3449 | ** writeback the pending async commits to disk |
3449 | */ | 3450 | */ |
3450 | static void flush_async_commits(void *p) | 3451 | static void flush_async_commits(struct work_struct *work) |
3451 | { | 3452 | { |
3452 | struct super_block *p_s_sb = p; | 3453 | struct reiserfs_journal *journal = |
3453 | struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb); | 3454 | container_of(work, struct reiserfs_journal, j_work.work); |
3455 | struct super_block *p_s_sb = journal->j_work_sb; | ||
3454 | struct reiserfs_journal_list *jl; | 3456 | struct reiserfs_journal_list *jl; |
3455 | struct list_head *entry; | 3457 | struct list_head *entry; |
3456 | 3458 | ||
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 09360cf1e1f2..8e6b56fc1cad 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -149,9 +149,10 @@ xfs_destroy_ioend( | |||
149 | */ | 149 | */ |
150 | STATIC void | 150 | STATIC void |
151 | xfs_end_bio_delalloc( | 151 | xfs_end_bio_delalloc( |
152 | void *data) | 152 | struct work_struct *work) |
153 | { | 153 | { |
154 | xfs_ioend_t *ioend = data; | 154 | xfs_ioend_t *ioend = |
155 | container_of(work, xfs_ioend_t, io_work); | ||
155 | 156 | ||
156 | xfs_destroy_ioend(ioend); | 157 | xfs_destroy_ioend(ioend); |
157 | } | 158 | } |
@@ -161,9 +162,10 @@ xfs_end_bio_delalloc( | |||
161 | */ | 162 | */ |
162 | STATIC void | 163 | STATIC void |
163 | xfs_end_bio_written( | 164 | xfs_end_bio_written( |
164 | void *data) | 165 | struct work_struct *work) |
165 | { | 166 | { |
166 | xfs_ioend_t *ioend = data; | 167 | xfs_ioend_t *ioend = |
168 | container_of(work, xfs_ioend_t, io_work); | ||
167 | 169 | ||
168 | xfs_destroy_ioend(ioend); | 170 | xfs_destroy_ioend(ioend); |
169 | } | 171 | } |
@@ -176,9 +178,10 @@ xfs_end_bio_written( | |||
176 | */ | 178 | */ |
177 | STATIC void | 179 | STATIC void |
178 | xfs_end_bio_unwritten( | 180 | xfs_end_bio_unwritten( |
179 | void *data) | 181 | struct work_struct *work) |
180 | { | 182 | { |
181 | xfs_ioend_t *ioend = data; | 183 | xfs_ioend_t *ioend = |
184 | container_of(work, xfs_ioend_t, io_work); | ||
182 | bhv_vnode_t *vp = ioend->io_vnode; | 185 | bhv_vnode_t *vp = ioend->io_vnode; |
183 | xfs_off_t offset = ioend->io_offset; | 186 | xfs_off_t offset = ioend->io_offset; |
184 | size_t size = ioend->io_size; | 187 | size_t size = ioend->io_size; |
@@ -220,11 +223,11 @@ xfs_alloc_ioend( | |||
220 | ioend->io_size = 0; | 223 | ioend->io_size = 0; |
221 | 224 | ||
222 | if (type == IOMAP_UNWRITTEN) | 225 | if (type == IOMAP_UNWRITTEN) |
223 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend); | 226 | INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten); |
224 | else if (type == IOMAP_DELAY) | 227 | else if (type == IOMAP_DELAY) |
225 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend); | 228 | INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc); |
226 | else | 229 | else |
227 | INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend); | 230 | INIT_WORK(&ioend->io_work, xfs_end_bio_written); |
228 | 231 | ||
229 | return ioend; | 232 | return ioend; |
230 | } | 233 | } |
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index d3382843698e..eef4a0ba11e9 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -994,9 +994,10 @@ xfs_buf_wait_unpin( | |||
994 | 994 | ||
995 | STATIC void | 995 | STATIC void |
996 | xfs_buf_iodone_work( | 996 | xfs_buf_iodone_work( |
997 | void *v) | 997 | struct work_struct *work) |
998 | { | 998 | { |
999 | xfs_buf_t *bp = (xfs_buf_t *)v; | 999 | xfs_buf_t *bp = |
1000 | container_of(work, xfs_buf_t, b_iodone_work); | ||
1000 | 1001 | ||
1001 | if (bp->b_iodone) | 1002 | if (bp->b_iodone) |
1002 | (*(bp->b_iodone))(bp); | 1003 | (*(bp->b_iodone))(bp); |
@@ -1017,10 +1018,10 @@ xfs_buf_ioend( | |||
1017 | 1018 | ||
1018 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { | 1019 | if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) { |
1019 | if (schedule) { | 1020 | if (schedule) { |
1020 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp); | 1021 | INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); |
1021 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); | 1022 | queue_work(xfslogd_workqueue, &bp->b_iodone_work); |
1022 | } else { | 1023 | } else { |
1023 | xfs_buf_iodone_work(bp); | 1024 | xfs_buf_iodone_work(&bp->b_iodone_work); |
1024 | } | 1025 | } |
1025 | } else { | 1026 | } else { |
1026 | up(&bp->b_iodonesema); | 1027 | up(&bp->b_iodonesema); |
diff --git a/include/asm-arm/arch-omap/irda.h b/include/asm-arm/arch-omap/irda.h index 805ae3575e44..345a649ec838 100644 --- a/include/asm-arm/arch-omap/irda.h +++ b/include/asm-arm/arch-omap/irda.h | |||
@@ -24,7 +24,7 @@ struct omap_irda_config { | |||
24 | /* Very specific to the needs of some platforms (h3,h4) | 24 | /* Very specific to the needs of some platforms (h3,h4) |
25 | * having calls which can sleep in irda_set_speed. | 25 | * having calls which can sleep in irda_set_speed. |
26 | */ | 26 | */ |
27 | struct work_struct gpio_expa; | 27 | struct delayed_work gpio_expa; |
28 | int rx_channel; | 28 | int rx_channel; |
29 | int tx_channel; | 29 | int tx_channel; |
30 | unsigned long dest_start; | 30 | unsigned long dest_start; |
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index 51a166242522..a6c024e2506f 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -14,7 +14,7 @@ | |||
14 | * on us. We need to use _exactly_ the address the user gave us, | 14 | * on us. We need to use _exactly_ the address the user gave us, |
15 | * not some alias that contains the same information. | 15 | * not some alias that contains the same information. |
16 | */ | 16 | */ |
17 | typedef struct { volatile int counter; } atomic_t; | 17 | typedef struct { int counter; } atomic_t; |
18 | 18 | ||
19 | #define ATOMIC_INIT(i) { (i) } | 19 | #define ATOMIC_INIT(i) { (i) } |
20 | 20 | ||
diff --git a/include/asm-i386/spinlock_types.h b/include/asm-i386/spinlock_types.h index 59efe849f351..4da9345c1500 100644 --- a/include/asm-i386/spinlock_types.h +++ b/include/asm-i386/spinlock_types.h | |||
@@ -6,13 +6,13 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } raw_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
diff --git a/include/asm-m68knommu/irq.h b/include/asm-m68knommu/irq.h index 45e7a2fd1689..7b8f874f8429 100644 --- a/include/asm-m68knommu/irq.h +++ b/include/asm-m68knommu/irq.h | |||
@@ -86,5 +86,6 @@ extern void (*mach_disable_irq)(unsigned int); | |||
86 | #define enable_irq(x) do { } while (0) | 86 | #define enable_irq(x) do { } while (0) |
87 | #define disable_irq(x) do { } while (0) | 87 | #define disable_irq(x) do { } while (0) |
88 | #define disable_irq_nosync(x) disable_irq(x) | 88 | #define disable_irq_nosync(x) disable_irq(x) |
89 | #define irq_canonicalize(irq) (irq) | ||
89 | 90 | ||
90 | #endif /* _M68K_IRQ_H_ */ | 91 | #endif /* _M68K_IRQ_H_ */ |
diff --git a/include/asm-m68knommu/rtc.h b/include/asm-m68knommu/rtc.h new file mode 100644 index 000000000000..eaf18ec83c8e --- /dev/null +++ b/include/asm-m68knommu/rtc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-m68k/rtc.h> | |||
diff --git a/include/asm-m68knommu/ucontext.h b/include/asm-m68knommu/ucontext.h index 5d570cedbb02..713a27f901cd 100644 --- a/include/asm-m68knommu/ucontext.h +++ b/include/asm-m68knommu/ucontext.h | |||
@@ -5,21 +5,17 @@ typedef int greg_t; | |||
5 | #define NGREG 18 | 5 | #define NGREG 18 |
6 | typedef greg_t gregset_t[NGREG]; | 6 | typedef greg_t gregset_t[NGREG]; |
7 | 7 | ||
8 | #ifdef CONFIG_FPU | ||
9 | typedef struct fpregset { | 8 | typedef struct fpregset { |
10 | int f_pcr; | 9 | int f_pcr; |
11 | int f_psr; | 10 | int f_psr; |
12 | int f_fpiaddr; | 11 | int f_fpiaddr; |
13 | int f_fpregs[8][3]; | 12 | int f_fpregs[8][3]; |
14 | } fpregset_t; | 13 | } fpregset_t; |
15 | #endif | ||
16 | 14 | ||
17 | struct mcontext { | 15 | struct mcontext { |
18 | int version; | 16 | int version; |
19 | gregset_t gregs; | 17 | gregset_t gregs; |
20 | #ifdef CONFIG_FPU | ||
21 | fpregset_t fpregs; | 18 | fpregset_t fpregs; |
22 | #endif | ||
23 | }; | 19 | }; |
24 | 20 | ||
25 | #define MCONTEXT_VERSION 2 | 21 | #define MCONTEXT_VERSION 2 |
@@ -29,9 +25,7 @@ struct ucontext { | |||
29 | struct ucontext *uc_link; | 25 | struct ucontext *uc_link; |
30 | stack_t uc_stack; | 26 | stack_t uc_stack; |
31 | struct mcontext uc_mcontext; | 27 | struct mcontext uc_mcontext; |
32 | #ifdef CONFIG_FPU | ||
33 | unsigned long uc_filler[80]; | 28 | unsigned long uc_filler[80]; |
34 | #endif | ||
35 | sigset_t uc_sigmask; /* mask last for extensibility */ | 29 | sigset_t uc_sigmask; /* mask last for extensibility */ |
36 | }; | 30 | }; |
37 | 31 | ||
diff --git a/include/asm-mips/i8259.h b/include/asm-mips/i8259.h index 0214abe3f0af..4df8d8b118c0 100644 --- a/include/asm-mips/i8259.h +++ b/include/asm-mips/i8259.h | |||
@@ -19,10 +19,31 @@ | |||
19 | 19 | ||
20 | #include <asm/io.h> | 20 | #include <asm/io.h> |
21 | 21 | ||
22 | /* i8259A PIC registers */ | ||
23 | #define PIC_MASTER_CMD 0x20 | ||
24 | #define PIC_MASTER_IMR 0x21 | ||
25 | #define PIC_MASTER_ISR PIC_MASTER_CMD | ||
26 | #define PIC_MASTER_POLL PIC_MASTER_ISR | ||
27 | #define PIC_MASTER_OCW3 PIC_MASTER_ISR | ||
28 | #define PIC_SLAVE_CMD 0xa0 | ||
29 | #define PIC_SLAVE_IMR 0xa1 | ||
30 | |||
31 | /* i8259A PIC related value */ | ||
32 | #define PIC_CASCADE_IR 2 | ||
33 | #define MASTER_ICW4_DEFAULT 0x01 | ||
34 | #define SLAVE_ICW4_DEFAULT 0x01 | ||
35 | #define PIC_ICW4_AEOI 2 | ||
36 | |||
22 | extern spinlock_t i8259A_lock; | 37 | extern spinlock_t i8259A_lock; |
23 | 38 | ||
39 | extern void init_8259A(int auto_eoi); | ||
40 | extern void enable_8259A_irq(unsigned int irq); | ||
41 | extern void disable_8259A_irq(unsigned int irq); | ||
42 | |||
24 | extern void init_i8259_irqs(void); | 43 | extern void init_i8259_irqs(void); |
25 | 44 | ||
45 | #define I8259A_IRQ_BASE 0 | ||
46 | |||
26 | /* | 47 | /* |
27 | * Do the traditional i8259 interrupt polling thing. This is for the few | 48 | * Do the traditional i8259 interrupt polling thing. This is for the few |
28 | * cases where no better interrupt acknowledge method is available and we | 49 | * cases where no better interrupt acknowledge method is available and we |
@@ -35,15 +56,15 @@ static inline int i8259_irq(void) | |||
35 | spin_lock(&i8259A_lock); | 56 | spin_lock(&i8259A_lock); |
36 | 57 | ||
37 | /* Perform an interrupt acknowledge cycle on controller 1. */ | 58 | /* Perform an interrupt acknowledge cycle on controller 1. */ |
38 | outb(0x0C, 0x20); /* prepare for poll */ | 59 | outb(0x0C, PIC_MASTER_CMD); /* prepare for poll */ |
39 | irq = inb(0x20) & 7; | 60 | irq = inb(PIC_MASTER_CMD) & 7; |
40 | if (irq == 2) { | 61 | if (irq == PIC_CASCADE_IR) { |
41 | /* | 62 | /* |
42 | * Interrupt is cascaded so perform interrupt | 63 | * Interrupt is cascaded so perform interrupt |
43 | * acknowledge on controller 2. | 64 | * acknowledge on controller 2. |
44 | */ | 65 | */ |
45 | outb(0x0C, 0xA0); /* prepare for poll */ | 66 | outb(0x0C, PIC_SLAVE_CMD); /* prepare for poll */ |
46 | irq = (inb(0xA0) & 7) + 8; | 67 | irq = (inb(PIC_SLAVE_CMD) & 7) + 8; |
47 | } | 68 | } |
48 | 69 | ||
49 | if (unlikely(irq == 7)) { | 70 | if (unlikely(irq == 7)) { |
@@ -54,14 +75,14 @@ static inline int i8259_irq(void) | |||
54 | * significant bit is not set then there is no valid | 75 | * significant bit is not set then there is no valid |
55 | * interrupt. | 76 | * interrupt. |
56 | */ | 77 | */ |
57 | outb(0x0B, 0x20); /* ISR register */ | 78 | outb(0x0B, PIC_MASTER_ISR); /* ISR register */ |
58 | if(~inb(0x20) & 0x80) | 79 | if(~inb(PIC_MASTER_ISR) & 0x80) |
59 | irq = -1; | 80 | irq = -1; |
60 | } | 81 | } |
61 | 82 | ||
62 | spin_unlock(&i8259A_lock); | 83 | spin_unlock(&i8259A_lock); |
63 | 84 | ||
64 | return irq; | 85 | return likely(irq >= 0) ? irq + I8259A_IRQ_BASE : irq; |
65 | } | 86 | } |
66 | 87 | ||
67 | #endif /* _ASM_I8259_H */ | 88 | #endif /* _ASM_I8259_H */ |
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index d20f2e9b28be..2fbd47eba32d 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h | |||
@@ -156,9 +156,9 @@ pfn_pte(unsigned long pfn, pgprot_t prot) | |||
156 | #define __pte_offset(address) \ | 156 | #define __pte_offset(address) \ |
157 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 157 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
158 | #define pte_offset(dir, address) \ | 158 | #define pte_offset(dir, address) \ |
159 | ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) | 159 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
160 | #define pte_offset_kernel(dir, address) \ | 160 | #define pte_offset_kernel(dir, address) \ |
161 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 161 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
162 | 162 | ||
163 | #define pte_offset_map(dir, address) \ | 163 | #define pte_offset_map(dir, address) \ |
164 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 164 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h index b9b1e86493ee..a5b18710b6a4 100644 --- a/include/asm-mips/pgtable-64.h +++ b/include/asm-mips/pgtable-64.h | |||
@@ -212,9 +212,9 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address) | |||
212 | #define __pte_offset(address) \ | 212 | #define __pte_offset(address) \ |
213 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 213 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
214 | #define pte_offset(dir, address) \ | 214 | #define pte_offset(dir, address) \ |
215 | ((pte_t *) (pmd_page_vaddr(*dir)) + __pte_offset(address)) | 215 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
216 | #define pte_offset_kernel(dir, address) \ | 216 | #define pte_offset_kernel(dir, address) \ |
217 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) | 217 | ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
218 | #define pte_offset_map(dir, address) \ | 218 | #define pte_offset_map(dir, address) \ |
219 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) | 219 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
220 | #define pte_offset_map_nested(dir, address) \ | 220 | #define pte_offset_map_nested(dir, address) \ |
diff --git a/include/asm-sh/atomic.h b/include/asm-sh/atomic.h index 8bdc1ba56f73..28305c3cbddf 100644 --- a/include/asm-sh/atomic.h +++ b/include/asm-sh/atomic.h | |||
@@ -28,11 +28,11 @@ static inline void atomic_add(int i, atomic_t *v) | |||
28 | unsigned long tmp; | 28 | unsigned long tmp; |
29 | 29 | ||
30 | __asm__ __volatile__ ( | 30 | __asm__ __volatile__ ( |
31 | "1: movli.l @%3, %0 ! atomic_add \n" | 31 | "1: movli.l @%2, %0 ! atomic_add \n" |
32 | " add %2, %0 \n" | 32 | " add %1, %0 \n" |
33 | " movco.l %0, @%3 \n" | 33 | " movco.l %0, @%2 \n" |
34 | " bf 1b \n" | 34 | " bf 1b \n" |
35 | : "=&z" (tmp), "=r" (&v->counter) | 35 | : "=&z" (tmp) |
36 | : "r" (i), "r" (&v->counter) | 36 | : "r" (i), "r" (&v->counter) |
37 | : "t"); | 37 | : "t"); |
38 | #else | 38 | #else |
@@ -50,11 +50,11 @@ static inline void atomic_sub(int i, atomic_t *v) | |||
50 | unsigned long tmp; | 50 | unsigned long tmp; |
51 | 51 | ||
52 | __asm__ __volatile__ ( | 52 | __asm__ __volatile__ ( |
53 | "1: movli.l @%3, %0 ! atomic_sub \n" | 53 | "1: movli.l @%2, %0 ! atomic_sub \n" |
54 | " sub %2, %0 \n" | 54 | " sub %1, %0 \n" |
55 | " movco.l %0, @%3 \n" | 55 | " movco.l %0, @%2 \n" |
56 | " bf 1b \n" | 56 | " bf 1b \n" |
57 | : "=&z" (tmp), "=r" (&v->counter) | 57 | : "=&z" (tmp) |
58 | : "r" (i), "r" (&v->counter) | 58 | : "r" (i), "r" (&v->counter) |
59 | : "t"); | 59 | : "t"); |
60 | #else | 60 | #else |
@@ -80,12 +80,12 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
80 | 80 | ||
81 | #ifdef CONFIG_CPU_SH4A | 81 | #ifdef CONFIG_CPU_SH4A |
82 | __asm__ __volatile__ ( | 82 | __asm__ __volatile__ ( |
83 | "1: movli.l @%3, %0 ! atomic_add_return \n" | 83 | "1: movli.l @%2, %0 ! atomic_add_return \n" |
84 | " add %2, %0 \n" | 84 | " add %1, %0 \n" |
85 | " movco.l %0, @%3 \n" | 85 | " movco.l %0, @%2 \n" |
86 | " bf 1b \n" | 86 | " bf 1b \n" |
87 | " synco \n" | 87 | " synco \n" |
88 | : "=&z" (temp), "=r" (&v->counter) | 88 | : "=&z" (temp) |
89 | : "r" (i), "r" (&v->counter) | 89 | : "r" (i), "r" (&v->counter) |
90 | : "t"); | 90 | : "t"); |
91 | #else | 91 | #else |
@@ -109,12 +109,12 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
109 | 109 | ||
110 | #ifdef CONFIG_CPU_SH4A | 110 | #ifdef CONFIG_CPU_SH4A |
111 | __asm__ __volatile__ ( | 111 | __asm__ __volatile__ ( |
112 | "1: movli.l @%3, %0 ! atomic_sub_return \n" | 112 | "1: movli.l @%2, %0 ! atomic_sub_return \n" |
113 | " sub %2, %0 \n" | 113 | " sub %1, %0 \n" |
114 | " movco.l %0, @%3 \n" | 114 | " movco.l %0, @%2 \n" |
115 | " bf 1b \n" | 115 | " bf 1b \n" |
116 | " synco \n" | 116 | " synco \n" |
117 | : "=&z" (temp), "=r" (&v->counter) | 117 | : "=&z" (temp) |
118 | : "r" (i), "r" (&v->counter) | 118 | : "r" (i), "r" (&v->counter) |
119 | : "t"); | 119 | : "t"); |
120 | #else | 120 | #else |
@@ -186,11 +186,11 @@ static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | |||
186 | unsigned long tmp; | 186 | unsigned long tmp; |
187 | 187 | ||
188 | __asm__ __volatile__ ( | 188 | __asm__ __volatile__ ( |
189 | "1: movli.l @%3, %0 ! atomic_clear_mask \n" | 189 | "1: movli.l @%2, %0 ! atomic_clear_mask \n" |
190 | " and %2, %0 \n" | 190 | " and %1, %0 \n" |
191 | " movco.l %0, @%3 \n" | 191 | " movco.l %0, @%2 \n" |
192 | " bf 1b \n" | 192 | " bf 1b \n" |
193 | : "=&z" (tmp), "=r" (&v->counter) | 193 | : "=&z" (tmp) |
194 | : "r" (~mask), "r" (&v->counter) | 194 | : "r" (~mask), "r" (&v->counter) |
195 | : "t"); | 195 | : "t"); |
196 | #else | 196 | #else |
@@ -208,11 +208,11 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | |||
208 | unsigned long tmp; | 208 | unsigned long tmp; |
209 | 209 | ||
210 | __asm__ __volatile__ ( | 210 | __asm__ __volatile__ ( |
211 | "1: movli.l @%3, %0 ! atomic_set_mask \n" | 211 | "1: movli.l @%2, %0 ! atomic_set_mask \n" |
212 | " or %2, %0 \n" | 212 | " or %1, %0 \n" |
213 | " movco.l %0, @%3 \n" | 213 | " movco.l %0, @%2 \n" |
214 | " bf 1b \n" | 214 | " bf 1b \n" |
215 | : "=&z" (tmp), "=r" (&v->counter) | 215 | : "=&z" (tmp) |
216 | : "r" (mask), "r" (&v->counter) | 216 | : "r" (mask), "r" (&v->counter) |
217 | : "t"); | 217 | : "t"); |
218 | #else | 218 | #else |
diff --git a/include/asm-sh/bugs.h b/include/asm-sh/bugs.h index beeea40f549e..795047da5e17 100644 --- a/include/asm-sh/bugs.h +++ b/include/asm-sh/bugs.h | |||
@@ -23,16 +23,20 @@ static void __init check_bugs(void) | |||
23 | cpu_data->loops_per_jiffy = loops_per_jiffy; | 23 | cpu_data->loops_per_jiffy = loops_per_jiffy; |
24 | 24 | ||
25 | switch (cpu_data->type) { | 25 | switch (cpu_data->type) { |
26 | case CPU_SH7604: | 26 | case CPU_SH7604 ... CPU_SH7619: |
27 | *p++ = '2'; | 27 | *p++ = '2'; |
28 | break; | 28 | break; |
29 | case CPU_SH7206: | ||
30 | *p++ = '2'; | ||
31 | *p++ = 'a'; | ||
32 | break; | ||
29 | case CPU_SH7705 ... CPU_SH7300: | 33 | case CPU_SH7705 ... CPU_SH7300: |
30 | *p++ = '3'; | 34 | *p++ = '3'; |
31 | break; | 35 | break; |
32 | case CPU_SH7750 ... CPU_SH4_501: | 36 | case CPU_SH7750 ... CPU_SH4_501: |
33 | *p++ = '4'; | 37 | *p++ = '4'; |
34 | break; | 38 | break; |
35 | case CPU_SH7770 ... CPU_SH7781: | 39 | case CPU_SH7770 ... CPU_SH7785: |
36 | *p++ = '4'; | 40 | *p++ = '4'; |
37 | *p++ = 'a'; | 41 | *p++ = 'a'; |
38 | break; | 42 | break; |
diff --git a/include/asm-sh/clock.h b/include/asm-sh/clock.h index fdfb75b30f0d..1df92807f8c5 100644 --- a/include/asm-sh/clock.h +++ b/include/asm-sh/clock.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/kref.h> | 4 | #include <linux/kref.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
6 | #include <linux/seq_file.h> | 6 | #include <linux/seq_file.h> |
7 | #include <linux/clk.h> | ||
7 | 8 | ||
8 | struct clk; | 9 | struct clk; |
9 | 10 | ||
@@ -18,7 +19,7 @@ struct clk_ops { | |||
18 | struct clk { | 19 | struct clk { |
19 | struct list_head node; | 20 | struct list_head node; |
20 | const char *name; | 21 | const char *name; |
21 | 22 | int id; | |
22 | struct module *owner; | 23 | struct module *owner; |
23 | 24 | ||
24 | struct clk *parent; | 25 | struct clk *parent; |
@@ -40,22 +41,13 @@ void arch_init_clk_ops(struct clk_ops **, int type); | |||
40 | int clk_init(void); | 41 | int clk_init(void); |
41 | 42 | ||
42 | int __clk_enable(struct clk *); | 43 | int __clk_enable(struct clk *); |
43 | int clk_enable(struct clk *); | ||
44 | |||
45 | void __clk_disable(struct clk *); | 44 | void __clk_disable(struct clk *); |
46 | void clk_disable(struct clk *); | ||
47 | 45 | ||
48 | int clk_set_rate(struct clk *, unsigned long rate); | ||
49 | unsigned long clk_get_rate(struct clk *); | ||
50 | void clk_recalc_rate(struct clk *); | 46 | void clk_recalc_rate(struct clk *); |
51 | 47 | ||
52 | struct clk *clk_get(const char *id); | ||
53 | void clk_put(struct clk *); | ||
54 | |||
55 | int clk_register(struct clk *); | 48 | int clk_register(struct clk *); |
56 | void clk_unregister(struct clk *); | 49 | void clk_unregister(struct clk *); |
57 | 50 | ||
58 | int show_clocks(struct seq_file *m); | 51 | int show_clocks(struct seq_file *m); |
59 | 52 | ||
60 | #endif /* __ASM_SH_CLOCK_H */ | 53 | #endif /* __ASM_SH_CLOCK_H */ |
61 | |||
diff --git a/include/asm-sh/cpu-sh2/cache.h b/include/asm-sh/cpu-sh2/cache.h index cd96402e8562..20b9796842dc 100644 --- a/include/asm-sh/cpu-sh2/cache.h +++ b/include/asm-sh/cpu-sh2/cache.h | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #define L1_CACHE_SHIFT 4 | 13 | #define L1_CACHE_SHIFT 4 |
14 | 14 | ||
15 | #if defined(CONFIG_CPU_SUBTYPE_SH7604) | ||
15 | #define CCR 0xfffffe92 /* Address of Cache Control Register */ | 16 | #define CCR 0xfffffe92 /* Address of Cache Control Register */ |
16 | 17 | ||
17 | #define CCR_CACHE_CE 0x01 /* Cache enable */ | 18 | #define CCR_CACHE_CE 0x01 /* Cache enable */ |
@@ -27,5 +28,26 @@ | |||
27 | #define CCR_CACHE_ORA CCR_CACHE_TW | 28 | #define CCR_CACHE_ORA CCR_CACHE_TW |
28 | #define CCR_CACHE_WT 0x00 /* SH-2 is _always_ write-through */ | 29 | #define CCR_CACHE_WT 0x00 /* SH-2 is _always_ write-through */ |
29 | 30 | ||
31 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
32 | #define CCR1 0xffffffec | ||
33 | #define CCR CCR1 | ||
34 | |||
35 | #define CCR_CACHE_CE 0x01 /* Cache enable */ | ||
36 | #define CCR_CACHE_WT 0x06 /* CCR[bit1=1,bit2=1] */ | ||
37 | /* 0x00000000-0x7fffffff: Write-through */ | ||
38 | /* 0x80000000-0x9fffffff: Write-back */ | ||
39 | /* 0xc0000000-0xdfffffff: Write-through */ | ||
40 | #define CCR_CACHE_CB 0x00 /* CCR[bit1=0,bit2=0] */ | ||
41 | /* 0x00000000-0x7fffffff: Write-back */ | ||
42 | /* 0x80000000-0x9fffffff: Write-through */ | ||
43 | /* 0xc0000000-0xdfffffff: Write-back */ | ||
44 | #define CCR_CACHE_CF 0x08 /* Cache invalidate */ | ||
45 | |||
46 | #define CACHE_OC_ADDRESS_ARRAY 0xf0000000 | ||
47 | #define CACHE_OC_DATA_ARRAY 0xf1000000 | ||
48 | |||
49 | #define CCR_CACHE_ENABLE CCR_CACHE_CE | ||
50 | #define CCR_CACHE_INVALIDATE CCR_CACHE_CF | ||
51 | #endif | ||
30 | #endif /* __ASM_CPU_SH2_CACHE_H */ | 52 | #endif /* __ASM_CPU_SH2_CACHE_H */ |
31 | 53 | ||
diff --git a/include/asm-sh/cpu-sh2/freq.h b/include/asm-sh/cpu-sh2/freq.h new file mode 100644 index 000000000000..31de475da70b --- /dev/null +++ b/include/asm-sh/cpu-sh2/freq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh2/freq.h | ||
3 | * | ||
4 | * Copyright (C) 2006 Yoshinori Sato | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH2_FREQ_H | ||
11 | #define __ASM_CPU_SH2_FREQ_H | ||
12 | |||
13 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
14 | #define FREQCR 0xf815ff80 | ||
15 | #endif | ||
16 | |||
17 | #endif /* __ASM_CPU_SH2_FREQ_H */ | ||
18 | |||
diff --git a/include/asm-sh/cpu-sh2/mmu_context.h b/include/asm-sh/cpu-sh2/mmu_context.h new file mode 100644 index 000000000000..beeb299e01ec --- /dev/null +++ b/include/asm-sh/cpu-sh2/mmu_context.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh2/mmu_context.h | ||
3 | * | ||
4 | * Copyright (C) 2003 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH2_MMU_CONTEXT_H | ||
11 | #define __ASM_CPU_SH2_MMU_CONTEXT_H | ||
12 | |||
13 | /* No MMU */ | ||
14 | |||
15 | #endif /* __ASM_CPU_SH2_MMU_CONTEXT_H */ | ||
16 | |||
diff --git a/include/asm-sh/cpu-sh2/timer.h b/include/asm-sh/cpu-sh2/timer.h new file mode 100644 index 000000000000..a39c241e8195 --- /dev/null +++ b/include/asm-sh/cpu-sh2/timer.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __ASM_CPU_SH2_TIMER_H | ||
2 | #define __ASM_CPU_SH2_TIMER_H | ||
3 | |||
4 | /* Nothing needed yet */ | ||
5 | |||
6 | #endif /* __ASM_CPU_SH2_TIMER_H */ | ||
diff --git a/include/asm-sh/cpu-sh2a/addrspace.h b/include/asm-sh/cpu-sh2a/addrspace.h new file mode 100644 index 000000000000..3d2e9aa21522 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/addrspace.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/addrspace.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/cache.h b/include/asm-sh/cpu-sh2a/cache.h new file mode 100644 index 000000000000..3e4b9e480982 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/cache.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh2a/cache.h | ||
3 | * | ||
4 | * Copyright (C) 2004 Paul Mundt | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH2A_CACHE_H | ||
11 | #define __ASM_CPU_SH2A_CACHE_H | ||
12 | |||
13 | #define L1_CACHE_SHIFT 4 | ||
14 | |||
15 | #define CCR1 0xfffc1000 | ||
16 | #define CCR2 0xfffc1004 | ||
17 | |||
18 | /* CCR1 behaves more like the traditional CCR */ | ||
19 | #define CCR CCR1 | ||
20 | |||
21 | /* | ||
22 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not | ||
23 | * listed here are reserved. | ||
24 | */ | ||
25 | #define CCR_CACHE_CB 0x0000 /* Hack */ | ||
26 | #define CCR_CACHE_OCE 0x0001 | ||
27 | #define CCR_CACHE_WT 0x0002 | ||
28 | #define CCR_CACHE_OCI 0x0008 /* OCF */ | ||
29 | #define CCR_CACHE_ICE 0x0100 | ||
30 | #define CCR_CACHE_ICI 0x0800 /* ICF */ | ||
31 | |||
32 | #define CACHE_IC_ADDRESS_ARRAY 0xf0000000 | ||
33 | #define CACHE_OC_ADDRESS_ARRAY 0xf0800000 | ||
34 | |||
35 | #define CCR_CACHE_ENABLE (CCR_CACHE_OCE | CCR_CACHE_ICE) | ||
36 | #define CCR_CACHE_INVALIDATE (CCR_CACHE_OCI | CCR_CACHE_ICI) | ||
37 | |||
38 | #endif /* __ASM_CPU_SH2A_CACHE_H */ | ||
39 | |||
diff --git a/include/asm-sh/cpu-sh2a/cacheflush.h b/include/asm-sh/cpu-sh2a/cacheflush.h new file mode 100644 index 000000000000..fa3186c73350 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/cacheflush.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/cacheflush.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/dma.h b/include/asm-sh/cpu-sh2a/dma.h new file mode 100644 index 000000000000..0d5ad85c1de8 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/dma.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/dma.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/freq.h b/include/asm-sh/cpu-sh2a/freq.h new file mode 100644 index 000000000000..e518fff6d10f --- /dev/null +++ b/include/asm-sh/cpu-sh2a/freq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * include/asm-sh/cpu-sh2a/freq.h | ||
3 | * | ||
4 | * Copyright (C) 2006 Yoshinori Sato | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #ifndef __ASM_CPU_SH2A_FREQ_H | ||
11 | #define __ASM_CPU_SH2A_FREQ_H | ||
12 | |||
13 | #if defined(CONFIG_CPU_SUBTYPE_SH7206) | ||
14 | #define FREQCR 0xfffe0010 | ||
15 | #endif | ||
16 | |||
17 | #endif /* __ASM_CPU_SH2A_FREQ_H */ | ||
18 | |||
diff --git a/include/asm-sh/cpu-sh2a/mmu_context.h b/include/asm-sh/cpu-sh2a/mmu_context.h new file mode 100644 index 000000000000..cd2387f7db9e --- /dev/null +++ b/include/asm-sh/cpu-sh2a/mmu_context.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/mmu_context.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/timer.h b/include/asm-sh/cpu-sh2a/timer.h new file mode 100644 index 000000000000..fee504adf11e --- /dev/null +++ b/include/asm-sh/cpu-sh2a/timer.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/timer.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/ubc.h b/include/asm-sh/cpu-sh2a/ubc.h new file mode 100644 index 000000000000..cf28062b96a2 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/ubc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/ubc.h> | |||
diff --git a/include/asm-sh/cpu-sh2a/watchdog.h b/include/asm-sh/cpu-sh2a/watchdog.h new file mode 100644 index 000000000000..c1b3e2488478 --- /dev/null +++ b/include/asm-sh/cpu-sh2a/watchdog.h | |||
@@ -0,0 +1 @@ | |||
#include <asm/cpu-sh2/watchdog.h> | |||
diff --git a/include/asm-sh/dma.h b/include/asm-sh/dma.h index d9daa028689f..faf3051cd429 100644 --- a/include/asm-sh/dma.h +++ b/include/asm-sh/dma.h | |||
@@ -14,9 +14,7 @@ | |||
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/wait.h> | 15 | #include <linux/wait.h> |
16 | #include <linux/sysdev.h> | 16 | #include <linux/sysdev.h> |
17 | #include <linux/device.h> | ||
18 | #include <asm/cpu/dma.h> | 17 | #include <asm/cpu/dma.h> |
19 | #include <asm/semaphore.h> | ||
20 | 18 | ||
21 | /* The maximum address that we can perform a DMA transfer to on this platform */ | 19 | /* The maximum address that we can perform a DMA transfer to on this platform */ |
22 | /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any | 20 | /* Don't define MAX_DMA_ADDRESS; it's useless on the SuperH and any |
@@ -46,16 +44,21 @@ | |||
46 | * DMAC (dma_info) flags | 44 | * DMAC (dma_info) flags |
47 | */ | 45 | */ |
48 | enum { | 46 | enum { |
49 | DMAC_CHANNELS_CONFIGURED = 0x00, | 47 | DMAC_CHANNELS_CONFIGURED = 0x01, |
50 | DMAC_CHANNELS_TEI_CAPABLE = 0x01, | 48 | DMAC_CHANNELS_TEI_CAPABLE = 0x02, /* Transfer end interrupt */ |
51 | }; | 49 | }; |
52 | 50 | ||
53 | /* | 51 | /* |
54 | * DMA channel capabilities / flags | 52 | * DMA channel capabilities / flags |
55 | */ | 53 | */ |
56 | enum { | 54 | enum { |
57 | DMA_TEI_CAPABLE = 0x01, | 55 | DMA_CONFIGURED = 0x01, |
58 | DMA_CONFIGURED = 0x02, | 56 | |
57 | /* | ||
58 | * Transfer end interrupt, inherited from DMAC. | ||
59 | * wait_queue used in dma_wait_for_completion. | ||
60 | */ | ||
61 | DMA_TEI_CAPABLE = 0x02, | ||
59 | }; | 62 | }; |
60 | 63 | ||
61 | extern spinlock_t dma_spin_lock; | 64 | extern spinlock_t dma_spin_lock; |
@@ -68,28 +71,31 @@ struct dma_ops { | |||
68 | 71 | ||
69 | int (*get_residue)(struct dma_channel *chan); | 72 | int (*get_residue)(struct dma_channel *chan); |
70 | int (*xfer)(struct dma_channel *chan); | 73 | int (*xfer)(struct dma_channel *chan); |
71 | void (*configure)(struct dma_channel *chan, unsigned long flags); | 74 | int (*configure)(struct dma_channel *chan, unsigned long flags); |
75 | int (*extend)(struct dma_channel *chan, unsigned long op, void *param); | ||
72 | }; | 76 | }; |
73 | 77 | ||
74 | struct dma_channel { | 78 | struct dma_channel { |
75 | char dev_id[16]; | 79 | char dev_id[16]; /* unique name per DMAC of channel */ |
76 | 80 | ||
77 | unsigned int chan; /* Physical channel number */ | 81 | unsigned int chan; /* DMAC channel number */ |
78 | unsigned int vchan; /* Virtual channel number */ | 82 | unsigned int vchan; /* Virtual channel number */ |
83 | |||
79 | unsigned int mode; | 84 | unsigned int mode; |
80 | unsigned int count; | 85 | unsigned int count; |
81 | 86 | ||
82 | unsigned long sar; | 87 | unsigned long sar; |
83 | unsigned long dar; | 88 | unsigned long dar; |
84 | 89 | ||
90 | const char **caps; | ||
91 | |||
85 | unsigned long flags; | 92 | unsigned long flags; |
86 | atomic_t busy; | 93 | atomic_t busy; |
87 | 94 | ||
88 | struct semaphore sem; | ||
89 | wait_queue_head_t wait_queue; | 95 | wait_queue_head_t wait_queue; |
90 | 96 | ||
91 | struct sys_device dev; | 97 | struct sys_device dev; |
92 | char *name; | 98 | void *priv_data; |
93 | }; | 99 | }; |
94 | 100 | ||
95 | struct dma_info { | 101 | struct dma_info { |
@@ -103,6 +109,12 @@ struct dma_info { | |||
103 | struct dma_channel *channels; | 109 | struct dma_channel *channels; |
104 | 110 | ||
105 | struct list_head list; | 111 | struct list_head list; |
112 | int first_channel_nr; | ||
113 | }; | ||
114 | |||
115 | struct dma_chan_caps { | ||
116 | int ch_num; | ||
117 | const char **caplist; | ||
106 | }; | 118 | }; |
107 | 119 | ||
108 | #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev) | 120 | #define to_dma_channel(channel) container_of(channel, struct dma_channel, dev) |
@@ -121,6 +133,8 @@ extern int dma_xfer(unsigned int chan, unsigned long from, | |||
121 | #define dma_read_page(chan, from, to) \ | 133 | #define dma_read_page(chan, from, to) \ |
122 | dma_read(chan, from, to, PAGE_SIZE) | 134 | dma_read(chan, from, to, PAGE_SIZE) |
123 | 135 | ||
136 | extern int request_dma_bycap(const char **dmac, const char **caps, | ||
137 | const char *dev_id); | ||
124 | extern int request_dma(unsigned int chan, const char *dev_id); | 138 | extern int request_dma(unsigned int chan, const char *dev_id); |
125 | extern void free_dma(unsigned int chan); | 139 | extern void free_dma(unsigned int chan); |
126 | extern int get_dma_residue(unsigned int chan); | 140 | extern int get_dma_residue(unsigned int chan); |
@@ -131,6 +145,10 @@ extern void dma_configure_channel(unsigned int chan, unsigned long flags); | |||
131 | 145 | ||
132 | extern int register_dmac(struct dma_info *info); | 146 | extern int register_dmac(struct dma_info *info); |
133 | extern void unregister_dmac(struct dma_info *info); | 147 | extern void unregister_dmac(struct dma_info *info); |
148 | extern struct dma_info *get_dma_info_by_name(const char *dmac_name); | ||
149 | |||
150 | extern int dma_extend(unsigned int chan, unsigned long op, void *param); | ||
151 | extern int register_chan_caps(const char *dmac, struct dma_chan_caps *capslist); | ||
134 | 152 | ||
135 | #ifdef CONFIG_SYSFS | 153 | #ifdef CONFIG_SYSFS |
136 | /* arch/sh/drivers/dma/dma-sysfs.c */ | 154 | /* arch/sh/drivers/dma/dma-sysfs.c */ |
diff --git a/include/asm-sh/elf.h b/include/asm-sh/elf.h index fc050fd7645e..43ca244564b1 100644 --- a/include/asm-sh/elf.h +++ b/include/asm-sh/elf.h | |||
@@ -74,7 +74,7 @@ typedef struct user_fpu_struct elf_fpregset_t; | |||
74 | #define ELF_ARCH EM_SH | 74 | #define ELF_ARCH EM_SH |
75 | 75 | ||
76 | #define USE_ELF_CORE_DUMP | 76 | #define USE_ELF_CORE_DUMP |
77 | #define ELF_EXEC_PAGESIZE 4096 | 77 | #define ELF_EXEC_PAGESIZE PAGE_SIZE |
78 | 78 | ||
79 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical | 79 | /* This is the location that an ET_DYN program is loaded if exec'ed. Typical |
80 | use of this is to invoke "./ld.so someprog" to test out a new version of | 80 | use of this is to invoke "./ld.so someprog" to test out a new version of |
diff --git a/include/asm-sh/entry-macros.S b/include/asm-sh/entry-macros.S new file mode 100644 index 000000000000..500030eae7aa --- /dev/null +++ b/include/asm-sh/entry-macros.S | |||
@@ -0,0 +1,33 @@ | |||
1 | ! entry.S macro define | ||
2 | |||
3 | .macro cli | ||
4 | stc sr, r0 | ||
5 | or #0xf0, r0 | ||
6 | ldc r0, sr | ||
7 | .endm | ||
8 | |||
9 | .macro sti | ||
10 | mov #0xf0, r11 | ||
11 | extu.b r11, r11 | ||
12 | not r11, r11 | ||
13 | stc sr, r10 | ||
14 | and r11, r10 | ||
15 | #ifdef CONFIG_HAS_SR_RB | ||
16 | stc k_g_imask, r11 | ||
17 | or r11, r10 | ||
18 | #endif | ||
19 | ldc r10, sr | ||
20 | .endm | ||
21 | |||
22 | .macro get_current_thread_info, ti, tmp | ||
23 | #ifdef CONFIG_HAS_SR_RB | ||
24 | stc r7_bank, \ti | ||
25 | #else | ||
26 | mov #((THREAD_SIZE - 1) >> 10) ^ 0xff, \tmp | ||
27 | shll8 \tmp | ||
28 | shll2 \tmp | ||
29 | mov r15, \ti | ||
30 | and \tmp, \ti | ||
31 | #endif | ||
32 | .endm | ||
33 | |||
diff --git a/include/asm-sh/irq-sh73180.h b/include/asm-sh/irq-sh73180.h deleted file mode 100644 index b28af9a69d72..000000000000 --- a/include/asm-sh/irq-sh73180.h +++ /dev/null | |||
@@ -1,314 +0,0 @@ | |||
1 | #ifndef __ASM_SH_IRQ_SH73180_H | ||
2 | #define __ASM_SH_IRQ_SH73180_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm-sh/irq-sh73180.h | ||
6 | * | ||
7 | * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp> | ||
8 | */ | ||
9 | |||
10 | #undef INTC_IPRA | ||
11 | #undef INTC_IPRB | ||
12 | #undef INTC_IPRC | ||
13 | #undef INTC_IPRD | ||
14 | |||
15 | #undef DMTE0_IRQ | ||
16 | #undef DMTE1_IRQ | ||
17 | #undef DMTE2_IRQ | ||
18 | #undef DMTE3_IRQ | ||
19 | #undef DMTE4_IRQ | ||
20 | #undef DMTE5_IRQ | ||
21 | #undef DMTE6_IRQ | ||
22 | #undef DMTE7_IRQ | ||
23 | #undef DMAE_IRQ | ||
24 | #undef DMA_IPR_ADDR | ||
25 | #undef DMA_IPR_POS | ||
26 | #undef DMA_PRIORITY | ||
27 | |||
28 | #undef INTC_IMCR0 | ||
29 | #undef INTC_IMCR1 | ||
30 | #undef INTC_IMCR2 | ||
31 | #undef INTC_IMCR3 | ||
32 | #undef INTC_IMCR4 | ||
33 | #undef INTC_IMCR5 | ||
34 | #undef INTC_IMCR6 | ||
35 | #undef INTC_IMCR7 | ||
36 | #undef INTC_IMCR8 | ||
37 | #undef INTC_IMCR9 | ||
38 | #undef INTC_IMCR10 | ||
39 | |||
40 | |||
41 | #define INTC_IPRA 0xA4080000UL | ||
42 | #define INTC_IPRB 0xA4080004UL | ||
43 | #define INTC_IPRC 0xA4080008UL | ||
44 | #define INTC_IPRD 0xA408000CUL | ||
45 | #define INTC_IPRE 0xA4080010UL | ||
46 | #define INTC_IPRF 0xA4080014UL | ||
47 | #define INTC_IPRG 0xA4080018UL | ||
48 | #define INTC_IPRH 0xA408001CUL | ||
49 | #define INTC_IPRI 0xA4080020UL | ||
50 | #define INTC_IPRJ 0xA4080024UL | ||
51 | #define INTC_IPRK 0xA4080028UL | ||
52 | |||
53 | #define INTC_IMR0 0xA4080080UL | ||
54 | #define INTC_IMR1 0xA4080084UL | ||
55 | #define INTC_IMR2 0xA4080088UL | ||
56 | #define INTC_IMR3 0xA408008CUL | ||
57 | #define INTC_IMR4 0xA4080090UL | ||
58 | #define INTC_IMR5 0xA4080094UL | ||
59 | #define INTC_IMR6 0xA4080098UL | ||
60 | #define INTC_IMR7 0xA408009CUL | ||
61 | #define INTC_IMR8 0xA40800A0UL | ||
62 | #define INTC_IMR9 0xA40800A4UL | ||
63 | #define INTC_IMR10 0xA40800A8UL | ||
64 | #define INTC_IMR11 0xA40800ACUL | ||
65 | |||
66 | #define INTC_IMCR0 0xA40800C0UL | ||
67 | #define INTC_IMCR1 0xA40800C4UL | ||
68 | #define INTC_IMCR2 0xA40800C8UL | ||
69 | #define INTC_IMCR3 0xA40800CCUL | ||
70 | #define INTC_IMCR4 0xA40800D0UL | ||
71 | #define INTC_IMCR5 0xA40800D4UL | ||
72 | #define INTC_IMCR6 0xA40800D8UL | ||
73 | #define INTC_IMCR7 0xA40800DCUL | ||
74 | #define INTC_IMCR8 0xA40800E0UL | ||
75 | #define INTC_IMCR9 0xA40800E4UL | ||
76 | #define INTC_IMCR10 0xA40800E8UL | ||
77 | #define INTC_IMCR11 0xA40800ECUL | ||
78 | |||
79 | #define INTC_ICR0 0xA4140000UL | ||
80 | #define INTC_ICR1 0xA414001CUL | ||
81 | |||
82 | #define INTMSK0 0xa4140044 | ||
83 | #define INTMSKCLR0 0xa4140064 | ||
84 | #define INTC_INTPRI0 0xa4140010 | ||
85 | |||
86 | /* | ||
87 | NOTE: | ||
88 | |||
89 | *_IRQ = (INTEVT2 - 0x200)/0x20 | ||
90 | */ | ||
91 | |||
92 | /* TMU0 */ | ||
93 | #define TMU0_IRQ 16 | ||
94 | #define TMU0_IPR_ADDR INTC_IPRA | ||
95 | #define TMU0_IPR_POS 3 | ||
96 | #define TMU0_PRIORITY 2 | ||
97 | |||
98 | #define TIMER_IRQ 16 | ||
99 | #define TIMER_IPR_ADDR INTC_IPRA | ||
100 | #define TIMER_IPR_POS 3 | ||
101 | #define TIMER_PRIORITY 2 | ||
102 | |||
103 | /* TMU1 */ | ||
104 | #define TMU1_IRQ 17 | ||
105 | #define TMU1_IPR_ADDR INTC_IPRA | ||
106 | #define TMU1_IPR_POS 2 | ||
107 | #define TMU1_PRIORITY 2 | ||
108 | |||
109 | /* TMU2 */ | ||
110 | #define TMU2_IRQ 18 | ||
111 | #define TMU2_IPR_ADDR INTC_IPRA | ||
112 | #define TMU2_IPR_POS 1 | ||
113 | #define TMU2_PRIORITY 2 | ||
114 | |||
115 | /* LCDC */ | ||
116 | #define LCDC_IRQ 28 | ||
117 | #define LCDC_IPR_ADDR INTC_IPRB | ||
118 | #define LCDC_IPR_POS 2 | ||
119 | #define LCDC_PRIORITY 2 | ||
120 | |||
121 | /* VIO (Video I/O) */ | ||
122 | #define CEU_IRQ 52 | ||
123 | #define BEU_IRQ 53 | ||
124 | #define VEU_IRQ 54 | ||
125 | #define VOU_IRQ 55 | ||
126 | #define VIO_IPR_ADDR INTC_IPRE | ||
127 | #define VIO_IPR_POS 2 | ||
128 | #define VIO_PRIORITY 2 | ||
129 | |||
130 | /* MFI (Multi Functional Interface) */ | ||
131 | #define MFI_IRQ 56 | ||
132 | #define MFI_IPR_ADDR INTC_IPRE | ||
133 | #define MFI_IPR_POS 1 | ||
134 | #define MFI_PRIORITY 2 | ||
135 | |||
136 | /* VPU (Video Processing Unit) */ | ||
137 | #define VPU_IRQ 60 | ||
138 | #define VPU_IPR_ADDR INTC_IPRE | ||
139 | #define VPU_IPR_POS 0 | ||
140 | #define VPU_PRIORITY 2 | ||
141 | |||
142 | /* 3DG */ | ||
143 | #define TDG_IRQ 63 | ||
144 | #define TDG_IPR_ADDR INTC_IPRJ | ||
145 | #define TDG_IPR_POS 2 | ||
146 | #define TDG_PRIORITY 2 | ||
147 | |||
148 | /* DMAC(1) */ | ||
149 | #define DMTE0_IRQ 48 | ||
150 | #define DMTE1_IRQ 49 | ||
151 | #define DMTE2_IRQ 50 | ||
152 | #define DMTE3_IRQ 51 | ||
153 | #define DMA1_IPR_ADDR INTC_IPRE | ||
154 | #define DMA1_IPR_POS 3 | ||
155 | #define DMA1_PRIORITY 7 | ||
156 | |||
157 | /* DMAC(2) */ | ||
158 | #define DMTE4_IRQ 76 | ||
159 | #define DMTE5_IRQ 77 | ||
160 | #define DMA2_IPR_ADDR INTC_IPRF | ||
161 | #define DMA2_IPR_POS 2 | ||
162 | #define DMA2_PRIORITY 7 | ||
163 | |||
164 | /* SCIF0 */ | ||
165 | #define SCIF_ERI_IRQ 80 | ||
166 | #define SCIF_RXI_IRQ 81 | ||
167 | #define SCIF_BRI_IRQ 82 | ||
168 | #define SCIF_TXI_IRQ 83 | ||
169 | #define SCIF_IPR_ADDR INTC_IPRG | ||
170 | #define SCIF_IPR_POS 3 | ||
171 | #define SCIF_PRIORITY 3 | ||
172 | |||
173 | /* SIOF0 */ | ||
174 | #define SIOF0_IRQ 84 | ||
175 | #define SIOF0_IPR_ADDR INTC_IPRH | ||
176 | #define SIOF0_IPR_POS 3 | ||
177 | #define SIOF0_PRIORITY 3 | ||
178 | |||
179 | /* FLCTL (Flash Memory Controller) */ | ||
180 | #define FLSTE_IRQ 92 | ||
181 | #define FLTEND_IRQ 93 | ||
182 | #define FLTRQ0_IRQ 94 | ||
183 | #define FLTRQ1_IRQ 95 | ||
184 | #define FLCTL_IPR_ADDR INTC_IPRH | ||
185 | #define FLCTL_IPR_POS 1 | ||
186 | #define FLCTL_PRIORITY 3 | ||
187 | |||
188 | /* IIC(0) (IIC Bus Interface) */ | ||
189 | #define IIC0_ALI_IRQ 96 | ||
190 | #define IIC0_TACKI_IRQ 97 | ||
191 | #define IIC0_WAITI_IRQ 98 | ||
192 | #define IIC0_DTEI_IRQ 99 | ||
193 | #define IIC0_IPR_ADDR INTC_IPRH | ||
194 | #define IIC0_IPR_POS 0 | ||
195 | #define IIC0_PRIORITY 3 | ||
196 | |||
197 | /* IIC(1) (IIC Bus Interface) */ | ||
198 | #define IIC1_ALI_IRQ 44 | ||
199 | #define IIC1_TACKI_IRQ 45 | ||
200 | #define IIC1_WAITI_IRQ 46 | ||
201 | #define IIC1_DTEI_IRQ 47 | ||
202 | #define IIC1_IPR_ADDR INTC_IPRG | ||
203 | #define IIC1_IPR_POS 0 | ||
204 | #define IIC1_PRIORITY 3 | ||
205 | |||
206 | /* SIO0 */ | ||
207 | #define SIO0_IRQ 88 | ||
208 | #define SIO0_IPR_ADDR INTC_IPRI | ||
209 | #define SIO0_IPR_POS 3 | ||
210 | #define SIO0_PRIORITY 3 | ||
211 | |||
212 | /* SDHI */ | ||
213 | #define SDHI_SDHII0_IRQ 100 | ||
214 | #define SDHI_SDHII1_IRQ 101 | ||
215 | #define SDHI_SDHII2_IRQ 102 | ||
216 | #define SDHI_SDHII3_IRQ 103 | ||
217 | #define SDHI_IPR_ADDR INTC_IPRK | ||
218 | #define SDHI_IPR_POS 0 | ||
219 | #define SDHI_PRIORITY 3 | ||
220 | |||
221 | /* SIU (Sound Interface Unit) */ | ||
222 | #define SIU_IRQ 108 | ||
223 | #define SIU_IPR_ADDR INTC_IPRJ | ||
224 | #define SIU_IPR_POS 1 | ||
225 | #define SIU_PRIORITY 3 | ||
226 | |||
227 | #define PORT_PACR 0xA4050100UL | ||
228 | #define PORT_PBCR 0xA4050102UL | ||
229 | #define PORT_PCCR 0xA4050104UL | ||
230 | #define PORT_PDCR 0xA4050106UL | ||
231 | #define PORT_PECR 0xA4050108UL | ||
232 | #define PORT_PFCR 0xA405010AUL | ||
233 | #define PORT_PGCR 0xA405010CUL | ||
234 | #define PORT_PHCR 0xA405010EUL | ||
235 | #define PORT_PJCR 0xA4050110UL | ||
236 | #define PORT_PKCR 0xA4050112UL | ||
237 | #define PORT_PLCR 0xA4050114UL | ||
238 | #define PORT_SCPCR 0xA4050116UL | ||
239 | #define PORT_PMCR 0xA4050118UL | ||
240 | #define PORT_PNCR 0xA405011AUL | ||
241 | #define PORT_PQCR 0xA405011CUL | ||
242 | #define PORT_PRCR 0xA405011EUL | ||
243 | #define PORT_PTCR 0xA405014CUL | ||
244 | #define PORT_PUCR 0xA405014EUL | ||
245 | #define PORT_PVCR 0xA4050150UL | ||
246 | |||
247 | #define PORT_PSELA 0xA4050140UL | ||
248 | #define PORT_PSELB 0xA4050142UL | ||
249 | #define PORT_PSELC 0xA4050144UL | ||
250 | #define PORT_PSELE 0xA4050158UL | ||
251 | |||
252 | #define PORT_HIZCRA 0xA4050146UL | ||
253 | #define PORT_HIZCRB 0xA4050148UL | ||
254 | #define PORT_DRVCR 0xA405014AUL | ||
255 | |||
256 | #define PORT_PADR 0xA4050120UL | ||
257 | #define PORT_PBDR 0xA4050122UL | ||
258 | #define PORT_PCDR 0xA4050124UL | ||
259 | #define PORT_PDDR 0xA4050126UL | ||
260 | #define PORT_PEDR 0xA4050128UL | ||
261 | #define PORT_PFDR 0xA405012AUL | ||
262 | #define PORT_PGDR 0xA405012CUL | ||
263 | #define PORT_PHDR 0xA405012EUL | ||
264 | #define PORT_PJDR 0xA4050130UL | ||
265 | #define PORT_PKDR 0xA4050132UL | ||
266 | #define PORT_PLDR 0xA4050134UL | ||
267 | #define PORT_SCPDR 0xA4050136UL | ||
268 | #define PORT_PMDR 0xA4050138UL | ||
269 | #define PORT_PNDR 0xA405013AUL | ||
270 | #define PORT_PQDR 0xA405013CUL | ||
271 | #define PORT_PRDR 0xA405013EUL | ||
272 | #define PORT_PTDR 0xA405016CUL | ||
273 | #define PORT_PUDR 0xA405016EUL | ||
274 | #define PORT_PVDR 0xA4050170UL | ||
275 | |||
276 | #define IRQ0_IRQ 32 | ||
277 | #define IRQ1_IRQ 33 | ||
278 | #define IRQ2_IRQ 34 | ||
279 | #define IRQ3_IRQ 35 | ||
280 | #define IRQ4_IRQ 36 | ||
281 | #define IRQ5_IRQ 37 | ||
282 | #define IRQ6_IRQ 38 | ||
283 | #define IRQ7_IRQ 39 | ||
284 | |||
285 | #define INTPRI00 0xA4140010UL | ||
286 | |||
287 | #define IRQ0_IPR_ADDR INTPRI00 | ||
288 | #define IRQ1_IPR_ADDR INTPRI00 | ||
289 | #define IRQ2_IPR_ADDR INTPRI00 | ||
290 | #define IRQ3_IPR_ADDR INTPRI00 | ||
291 | #define IRQ4_IPR_ADDR INTPRI00 | ||
292 | #define IRQ5_IPR_ADDR INTPRI00 | ||
293 | #define IRQ6_IPR_ADDR INTPRI00 | ||
294 | #define IRQ7_IPR_ADDR INTPRI00 | ||
295 | |||
296 | #define IRQ0_IPR_POS 7 | ||
297 | #define IRQ1_IPR_POS 6 | ||
298 | #define IRQ2_IPR_POS 5 | ||
299 | #define IRQ3_IPR_POS 4 | ||
300 | #define IRQ4_IPR_POS 3 | ||
301 | #define IRQ5_IPR_POS 2 | ||
302 | #define IRQ6_IPR_POS 1 | ||
303 | #define IRQ7_IPR_POS 0 | ||
304 | |||
305 | #define IRQ0_PRIORITY 1 | ||
306 | #define IRQ1_PRIORITY 1 | ||
307 | #define IRQ2_PRIORITY 1 | ||
308 | #define IRQ3_PRIORITY 1 | ||
309 | #define IRQ4_PRIORITY 1 | ||
310 | #define IRQ5_PRIORITY 1 | ||
311 | #define IRQ6_PRIORITY 1 | ||
312 | #define IRQ7_PRIORITY 1 | ||
313 | |||
314 | #endif /* __ASM_SH_IRQ_SH73180_H */ | ||
diff --git a/include/asm-sh/irq-sh7343.h b/include/asm-sh/irq-sh7343.h deleted file mode 100644 index 5d15419b53b0..000000000000 --- a/include/asm-sh/irq-sh7343.h +++ /dev/null | |||
@@ -1,317 +0,0 @@ | |||
1 | #ifndef __ASM_SH_IRQ_SH7343_H | ||
2 | #define __ASM_SH_IRQ_SH7343_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm-sh/irq-sh7343.h | ||
6 | * | ||
7 | * Copyright (C) 2006 Kenati Technologies Inc. | ||
8 | * Andre Mccurdy <andre@kenati.com> | ||
9 | * Ranjit Deshpande <ranjit@kenati.com> | ||
10 | */ | ||
11 | |||
12 | #undef INTC_IPRA | ||
13 | #undef INTC_IPRB | ||
14 | #undef INTC_IPRC | ||
15 | #undef INTC_IPRD | ||
16 | |||
17 | #undef DMTE0_IRQ | ||
18 | #undef DMTE1_IRQ | ||
19 | #undef DMTE2_IRQ | ||
20 | #undef DMTE3_IRQ | ||
21 | #undef DMTE4_IRQ | ||
22 | #undef DMTE5_IRQ | ||
23 | #undef DMTE6_IRQ | ||
24 | #undef DMTE7_IRQ | ||
25 | #undef DMAE_IRQ | ||
26 | #undef DMA_IPR_ADDR | ||
27 | #undef DMA_IPR_POS | ||
28 | #undef DMA_PRIORITY | ||
29 | |||
30 | #undef INTC_IMCR0 | ||
31 | #undef INTC_IMCR1 | ||
32 | #undef INTC_IMCR2 | ||
33 | #undef INTC_IMCR3 | ||
34 | #undef INTC_IMCR4 | ||
35 | #undef INTC_IMCR5 | ||
36 | #undef INTC_IMCR6 | ||
37 | #undef INTC_IMCR7 | ||
38 | #undef INTC_IMCR8 | ||
39 | #undef INTC_IMCR9 | ||
40 | #undef INTC_IMCR10 | ||
41 | |||
42 | |||
43 | #define INTC_IPRA 0xA4080000UL | ||
44 | #define INTC_IPRB 0xA4080004UL | ||
45 | #define INTC_IPRC 0xA4080008UL | ||
46 | #define INTC_IPRD 0xA408000CUL | ||
47 | #define INTC_IPRE 0xA4080010UL | ||
48 | #define INTC_IPRF 0xA4080014UL | ||
49 | #define INTC_IPRG 0xA4080018UL | ||
50 | #define INTC_IPRH 0xA408001CUL | ||
51 | #define INTC_IPRI 0xA4080020UL | ||
52 | #define INTC_IPRJ 0xA4080024UL | ||
53 | #define INTC_IPRK 0xA4080028UL | ||
54 | #define INTC_IPRL 0xA408002CUL | ||
55 | |||
56 | #define INTC_IMR0 0xA4080080UL | ||
57 | #define INTC_IMR1 0xA4080084UL | ||
58 | #define INTC_IMR2 0xA4080088UL | ||
59 | #define INTC_IMR3 0xA408008CUL | ||
60 | #define INTC_IMR4 0xA4080090UL | ||
61 | #define INTC_IMR5 0xA4080094UL | ||
62 | #define INTC_IMR6 0xA4080098UL | ||
63 | #define INTC_IMR7 0xA408009CUL | ||
64 | #define INTC_IMR8 0xA40800A0UL | ||
65 | #define INTC_IMR9 0xA40800A4UL | ||
66 | #define INTC_IMR10 0xA40800A8UL | ||
67 | #define INTC_IMR11 0xA40800ACUL | ||
68 | |||
69 | #define INTC_IMCR0 0xA40800C0UL | ||
70 | #define INTC_IMCR1 0xA40800C4UL | ||
71 | #define INTC_IMCR2 0xA40800C8UL | ||
72 | #define INTC_IMCR3 0xA40800CCUL | ||
73 | #define INTC_IMCR4 0xA40800D0UL | ||
74 | #define INTC_IMCR5 0xA40800D4UL | ||
75 | #define INTC_IMCR6 0xA40800D8UL | ||
76 | #define INTC_IMCR7 0xA40800DCUL | ||
77 | #define INTC_IMCR8 0xA40800E0UL | ||
78 | #define INTC_IMCR9 0xA40800E4UL | ||
79 | #define INTC_IMCR10 0xA40800E8UL | ||
80 | #define INTC_IMCR11 0xA40800ECUL | ||
81 | |||
82 | #define INTC_ICR0 0xA4140000UL | ||
83 | #define INTC_ICR1 0xA414001CUL | ||
84 | |||
85 | #define INTMSK0 0xa4140044 | ||
86 | #define INTMSKCLR0 0xa4140064 | ||
87 | #define INTC_INTPRI0 0xa4140010 | ||
88 | |||
89 | /* | ||
90 | NOTE: | ||
91 | |||
92 | *_IRQ = (INTEVT2 - 0x200)/0x20 | ||
93 | */ | ||
94 | |||
95 | /* TMU0 */ | ||
96 | #define TMU0_IRQ 16 | ||
97 | #define TMU0_IPR_ADDR INTC_IPRA | ||
98 | #define TMU0_IPR_POS 3 | ||
99 | #define TMU0_PRIORITY 2 | ||
100 | |||
101 | #define TIMER_IRQ 16 | ||
102 | #define TIMER_IPR_ADDR INTC_IPRA | ||
103 | #define TIMER_IPR_POS 3 | ||
104 | #define TIMER_PRIORITY 2 | ||
105 | |||
106 | /* TMU1 */ | ||
107 | #define TMU1_IRQ 17 | ||
108 | #define TMU1_IPR_ADDR INTC_IPRA | ||
109 | #define TMU1_IPR_POS 2 | ||
110 | #define TMU1_PRIORITY 2 | ||
111 | |||
112 | /* TMU2 */ | ||
113 | #define TMU2_IRQ 18 | ||
114 | #define TMU2_IPR_ADDR INTC_IPRA | ||
115 | #define TMU2_IPR_POS 1 | ||
116 | #define TMU2_PRIORITY 2 | ||
117 | |||
118 | /* LCDC */ | ||
119 | #define LCDC_IRQ 28 | ||
120 | #define LCDC_IPR_ADDR INTC_IPRB | ||
121 | #define LCDC_IPR_POS 2 | ||
122 | #define LCDC_PRIORITY 2 | ||
123 | |||
124 | /* VIO (Video I/O) */ | ||
125 | #define CEU_IRQ 52 | ||
126 | #define BEU_IRQ 53 | ||
127 | #define VEU_IRQ 54 | ||
128 | #define VOU_IRQ 55 | ||
129 | #define VIO_IPR_ADDR INTC_IPRE | ||
130 | #define VIO_IPR_POS 2 | ||
131 | #define VIO_PRIORITY 2 | ||
132 | |||
133 | /* MFI (Multi Functional Interface) */ | ||
134 | #define MFI_IRQ 56 | ||
135 | #define MFI_IPR_ADDR INTC_IPRE | ||
136 | #define MFI_IPR_POS 1 | ||
137 | #define MFI_PRIORITY 2 | ||
138 | |||
139 | /* VPU (Video Processing Unit) */ | ||
140 | #define VPU_IRQ 60 | ||
141 | #define VPU_IPR_ADDR INTC_IPRE | ||
142 | #define VPU_IPR_POS 0 | ||
143 | #define VPU_PRIORITY 2 | ||
144 | |||
145 | /* 3DG */ | ||
146 | #define TDG_IRQ 63 | ||
147 | #define TDG_IPR_ADDR INTC_IPRJ | ||
148 | #define TDG_IPR_POS 2 | ||
149 | #define TDG_PRIORITY 2 | ||
150 | |||
151 | /* DMAC(1) */ | ||
152 | #define DMTE0_IRQ 48 | ||
153 | #define DMTE1_IRQ 49 | ||
154 | #define DMTE2_IRQ 50 | ||
155 | #define DMTE3_IRQ 51 | ||
156 | #define DMA1_IPR_ADDR INTC_IPRE | ||
157 | #define DMA1_IPR_POS 3 | ||
158 | #define DMA1_PRIORITY 7 | ||
159 | |||
160 | /* DMAC(2) */ | ||
161 | #define DMTE4_IRQ 76 | ||
162 | #define DMTE5_IRQ 77 | ||
163 | #define DMA2_IPR_ADDR INTC_IPRF | ||
164 | #define DMA2_IPR_POS 2 | ||
165 | #define DMA2_PRIORITY 7 | ||
166 | |||
167 | /* SCIF0 */ | ||
168 | #define SCIF_ERI_IRQ 80 | ||
169 | #define SCIF_RXI_IRQ 81 | ||
170 | #define SCIF_BRI_IRQ 82 | ||
171 | #define SCIF_TXI_IRQ 83 | ||
172 | #define SCIF_IPR_ADDR INTC_IPRG | ||
173 | #define SCIF_IPR_POS 3 | ||
174 | #define SCIF_PRIORITY 3 | ||
175 | |||
176 | /* SIOF0 */ | ||
177 | #define SIOF0_IRQ 84 | ||
178 | #define SIOF0_IPR_ADDR INTC_IPRH | ||
179 | #define SIOF0_IPR_POS 3 | ||
180 | #define SIOF0_PRIORITY 3 | ||
181 | |||
182 | /* FLCTL (Flash Memory Controller) */ | ||
183 | #define FLSTE_IRQ 92 | ||
184 | #define FLTEND_IRQ 93 | ||
185 | #define FLTRQ0_IRQ 94 | ||
186 | #define FLTRQ1_IRQ 95 | ||
187 | #define FLCTL_IPR_ADDR INTC_IPRH | ||
188 | #define FLCTL_IPR_POS 1 | ||
189 | #define FLCTL_PRIORITY 3 | ||
190 | |||
191 | /* IIC(0) (IIC Bus Interface) */ | ||
192 | #define IIC0_ALI_IRQ 96 | ||
193 | #define IIC0_TACKI_IRQ 97 | ||
194 | #define IIC0_WAITI_IRQ 98 | ||
195 | #define IIC0_DTEI_IRQ 99 | ||
196 | #define IIC0_IPR_ADDR INTC_IPRH | ||
197 | #define IIC0_IPR_POS 0 | ||
198 | #define IIC0_PRIORITY 3 | ||
199 | |||
200 | /* IIC(1) (IIC Bus Interface) */ | ||
201 | #define IIC1_ALI_IRQ 44 | ||
202 | #define IIC1_TACKI_IRQ 45 | ||
203 | #define IIC1_WAITI_IRQ 46 | ||
204 | #define IIC1_DTEI_IRQ 47 | ||
205 | #define IIC1_IPR_ADDR INTC_IPRI | ||
206 | #define IIC1_IPR_POS 0 | ||
207 | #define IIC1_PRIORITY 3 | ||
208 | |||
209 | /* SIO0 */ | ||
210 | #define SIO0_IRQ 88 | ||
211 | #define SIO0_IPR_ADDR INTC_IPRI | ||
212 | #define SIO0_IPR_POS 3 | ||
213 | #define SIO0_PRIORITY 3 | ||
214 | |||
215 | /* SDHI */ | ||
216 | #define SDHI_SDHII0_IRQ 100 | ||
217 | #define SDHI_SDHII1_IRQ 101 | ||
218 | #define SDHI_SDHII2_IRQ 102 | ||
219 | #define SDHI_SDHII3_IRQ 103 | ||
220 | #define SDHI_IPR_ADDR INTC_IPRK | ||
221 | #define SDHI_IPR_POS 0 | ||
222 | #define SDHI_PRIORITY 3 | ||
223 | |||
224 | /* SIU (Sound Interface Unit) */ | ||
225 | #define SIU_IRQ 108 | ||
226 | #define SIU_IPR_ADDR INTC_IPRJ | ||
227 | #define SIU_IPR_POS 1 | ||
228 | #define SIU_PRIORITY 3 | ||
229 | |||
230 | #define PORT_PACR 0xA4050100UL | ||
231 | #define PORT_PBCR 0xA4050102UL | ||
232 | #define PORT_PCCR 0xA4050104UL | ||
233 | #define PORT_PDCR 0xA4050106UL | ||
234 | #define PORT_PECR 0xA4050108UL | ||
235 | #define PORT_PFCR 0xA405010AUL | ||
236 | #define PORT_PGCR 0xA405010CUL | ||
237 | #define PORT_PHCR 0xA405010EUL | ||
238 | #define PORT_PJCR 0xA4050110UL | ||
239 | #define PORT_PKCR 0xA4050112UL | ||
240 | #define PORT_PLCR 0xA4050114UL | ||
241 | #define PORT_SCPCR 0xA4050116UL | ||
242 | #define PORT_PMCR 0xA4050118UL | ||
243 | #define PORT_PNCR 0xA405011AUL | ||
244 | #define PORT_PQCR 0xA405011CUL | ||
245 | #define PORT_PRCR 0xA405011EUL | ||
246 | #define PORT_PTCR 0xA405014CUL | ||
247 | #define PORT_PUCR 0xA405014EUL | ||
248 | #define PORT_PVCR 0xA4050150UL | ||
249 | |||
250 | #define PORT_PSELA 0xA4050140UL | ||
251 | #define PORT_PSELB 0xA4050142UL | ||
252 | #define PORT_PSELC 0xA4050144UL | ||
253 | #define PORT_PSELE 0xA4050158UL | ||
254 | |||
255 | #define PORT_HIZCRA 0xA4050146UL | ||
256 | #define PORT_HIZCRB 0xA4050148UL | ||
257 | #define PORT_DRVCR 0xA405014AUL | ||
258 | |||
259 | #define PORT_PADR 0xA4050120UL | ||
260 | #define PORT_PBDR 0xA4050122UL | ||
261 | #define PORT_PCDR 0xA4050124UL | ||
262 | #define PORT_PDDR 0xA4050126UL | ||
263 | #define PORT_PEDR 0xA4050128UL | ||
264 | #define PORT_PFDR 0xA405012AUL | ||
265 | #define PORT_PGDR 0xA405012CUL | ||
266 | #define PORT_PHDR 0xA405012EUL | ||
267 | #define PORT_PJDR 0xA4050130UL | ||
268 | #define PORT_PKDR 0xA4050132UL | ||
269 | #define PORT_PLDR 0xA4050134UL | ||
270 | #define PORT_SCPDR 0xA4050136UL | ||
271 | #define PORT_PMDR 0xA4050138UL | ||
272 | #define PORT_PNDR 0xA405013AUL | ||
273 | #define PORT_PQDR 0xA405013CUL | ||
274 | #define PORT_PRDR 0xA405013EUL | ||
275 | #define PORT_PTDR 0xA405016CUL | ||
276 | #define PORT_PUDR 0xA405016EUL | ||
277 | #define PORT_PVDR 0xA4050170UL | ||
278 | |||
279 | #define IRQ0_IRQ 32 | ||
280 | #define IRQ1_IRQ 33 | ||
281 | #define IRQ2_IRQ 34 | ||
282 | #define IRQ3_IRQ 35 | ||
283 | #define IRQ4_IRQ 36 | ||
284 | #define IRQ5_IRQ 37 | ||
285 | #define IRQ6_IRQ 38 | ||
286 | #define IRQ7_IRQ 39 | ||
287 | |||
288 | #define INTPRI00 0xA4140010UL | ||
289 | |||
290 | #define IRQ0_IPR_ADDR INTPRI00 | ||
291 | #define IRQ1_IPR_ADDR INTPRI00 | ||
292 | #define IRQ2_IPR_ADDR INTPRI00 | ||
293 | #define IRQ3_IPR_ADDR INTPRI00 | ||
294 | #define IRQ4_IPR_ADDR INTPRI00 | ||
295 | #define IRQ5_IPR_ADDR INTPRI00 | ||
296 | #define IRQ6_IPR_ADDR INTPRI00 | ||
297 | #define IRQ7_IPR_ADDR INTPRI00 | ||
298 | |||
299 | #define IRQ0_IPR_POS 7 | ||
300 | #define IRQ1_IPR_POS 6 | ||
301 | #define IRQ2_IPR_POS 5 | ||
302 | #define IRQ3_IPR_POS 4 | ||
303 | #define IRQ4_IPR_POS 3 | ||
304 | #define IRQ5_IPR_POS 2 | ||
305 | #define IRQ6_IPR_POS 1 | ||
306 | #define IRQ7_IPR_POS 0 | ||
307 | |||
308 | #define IRQ0_PRIORITY 1 | ||
309 | #define IRQ1_PRIORITY 1 | ||
310 | #define IRQ2_PRIORITY 1 | ||
311 | #define IRQ3_PRIORITY 1 | ||
312 | #define IRQ4_PRIORITY 1 | ||
313 | #define IRQ5_PRIORITY 1 | ||
314 | #define IRQ6_PRIORITY 1 | ||
315 | #define IRQ7_PRIORITY 1 | ||
316 | |||
317 | #endif /* __ASM_SH_IRQ_SH7343_H */ | ||
diff --git a/include/asm-sh/irq-sh7780.h b/include/asm-sh/irq-sh7780.h deleted file mode 100644 index 19912ae6a7f7..000000000000 --- a/include/asm-sh/irq-sh7780.h +++ /dev/null | |||
@@ -1,311 +0,0 @@ | |||
1 | #ifndef __ASM_SH_IRQ_SH7780_H | ||
2 | #define __ASM_SH_IRQ_SH7780_H | ||
3 | |||
4 | /* | ||
5 | * linux/include/asm-sh/irq-sh7780.h | ||
6 | * | ||
7 | * Copyright (C) 2004 Takashi SHUDO <shudo@hitachi-ul.co.jp> | ||
8 | */ | ||
9 | #define INTC_BASE 0xffd00000 | ||
10 | #define INTC_ICR0 (INTC_BASE+0x0) | ||
11 | #define INTC_ICR1 (INTC_BASE+0x1c) | ||
12 | #define INTC_INTPRI (INTC_BASE+0x10) | ||
13 | #define INTC_INTREQ (INTC_BASE+0x24) | ||
14 | #define INTC_INTMSK0 (INTC_BASE+0x44) | ||
15 | #define INTC_INTMSK1 (INTC_BASE+0x48) | ||
16 | #define INTC_INTMSK2 (INTC_BASE+0x40080) | ||
17 | #define INTC_INTMSKCLR0 (INTC_BASE+0x64) | ||
18 | #define INTC_INTMSKCLR1 (INTC_BASE+0x68) | ||
19 | #define INTC_INTMSKCLR2 (INTC_BASE+0x40084) | ||
20 | #define INTC_NMIFCR (INTC_BASE+0xc0) | ||
21 | #define INTC_USERIMASK (INTC_BASE+0x30000) | ||
22 | |||
23 | #define INTC_INT2PRI0 (INTC_BASE+0x40000) | ||
24 | #define INTC_INT2PRI1 (INTC_BASE+0x40004) | ||
25 | #define INTC_INT2PRI2 (INTC_BASE+0x40008) | ||
26 | #define INTC_INT2PRI3 (INTC_BASE+0x4000c) | ||
27 | #define INTC_INT2PRI4 (INTC_BASE+0x40010) | ||
28 | #define INTC_INT2PRI5 (INTC_BASE+0x40014) | ||
29 | #define INTC_INT2PRI6 (INTC_BASE+0x40018) | ||
30 | #define INTC_INT2PRI7 (INTC_BASE+0x4001c) | ||
31 | #define INTC_INT2A0 (INTC_BASE+0x40030) | ||
32 | #define INTC_INT2A1 (INTC_BASE+0x40034) | ||
33 | #define INTC_INT2MSKR (INTC_BASE+0x40038) | ||
34 | #define INTC_INT2MSKCR (INTC_BASE+0x4003c) | ||
35 | #define INTC_INT2B0 (INTC_BASE+0x40040) | ||
36 | #define INTC_INT2B1 (INTC_BASE+0x40044) | ||
37 | #define INTC_INT2B2 (INTC_BASE+0x40048) | ||
38 | #define INTC_INT2B3 (INTC_BASE+0x4004c) | ||
39 | #define INTC_INT2B4 (INTC_BASE+0x40050) | ||
40 | #define INTC_INT2B5 (INTC_BASE+0x40054) | ||
41 | #define INTC_INT2B6 (INTC_BASE+0x40058) | ||
42 | #define INTC_INT2B7 (INTC_BASE+0x4005c) | ||
43 | #define INTC_INT2GPIC (INTC_BASE+0x40090) | ||
44 | /* | ||
45 | NOTE: | ||
46 | *_IRQ = (INTEVT2 - 0x200)/0x20 | ||
47 | */ | ||
48 | /* IRQ 0-7 line external int*/ | ||
49 | #define IRQ0_IRQ 2 | ||
50 | #define IRQ0_IPR_ADDR INTC_INTPRI | ||
51 | #define IRQ0_IPR_POS 7 | ||
52 | #define IRQ0_PRIORITY 2 | ||
53 | |||
54 | #define IRQ1_IRQ 4 | ||
55 | #define IRQ1_IPR_ADDR INTC_INTPRI | ||
56 | #define IRQ1_IPR_POS 6 | ||
57 | #define IRQ1_PRIORITY 2 | ||
58 | |||
59 | #define IRQ2_IRQ 6 | ||
60 | #define IRQ2_IPR_ADDR INTC_INTPRI | ||
61 | #define IRQ2_IPR_POS 5 | ||
62 | #define IRQ2_PRIORITY 2 | ||
63 | |||
64 | #define IRQ3_IRQ 8 | ||
65 | #define IRQ3_IPR_ADDR INTC_INTPRI | ||
66 | #define IRQ3_IPR_POS 4 | ||
67 | #define IRQ3_PRIORITY 2 | ||
68 | |||
69 | #define IRQ4_IRQ 10 | ||
70 | #define IRQ4_IPR_ADDR INTC_INTPRI | ||
71 | #define IRQ4_IPR_POS 3 | ||
72 | #define IRQ4_PRIORITY 2 | ||
73 | |||
74 | #define IRQ5_IRQ 12 | ||
75 | #define IRQ5_IPR_ADDR INTC_INTPRI | ||
76 | #define IRQ5_IPR_POS 2 | ||
77 | #define IRQ5_PRIORITY 2 | ||
78 | |||
79 | #define IRQ6_IRQ 14 | ||
80 | #define IRQ6_IPR_ADDR INTC_INTPRI | ||
81 | #define IRQ6_IPR_POS 1 | ||
82 | #define IRQ6_PRIORITY 2 | ||
83 | |||
84 | #define IRQ7_IRQ 0 | ||
85 | #define IRQ7_IPR_ADDR INTC_INTPRI | ||
86 | #define IRQ7_IPR_POS 0 | ||
87 | #define IRQ7_PRIORITY 2 | ||
88 | |||
89 | /* TMU */ | ||
90 | /* ch0 */ | ||
91 | #define TMU_IRQ 28 | ||
92 | #define TMU_IPR_ADDR INTC_INT2PRI0 | ||
93 | #define TMU_IPR_POS 3 | ||
94 | #define TMU_PRIORITY 2 | ||
95 | |||
96 | #define TIMER_IRQ 28 | ||
97 | #define TIMER_IPR_ADDR INTC_INT2PRI0 | ||
98 | #define TIMER_IPR_POS 3 | ||
99 | #define TIMER_PRIORITY 2 | ||
100 | |||
101 | /* ch 1*/ | ||
102 | #define TMU_CH1_IRQ 29 | ||
103 | #define TMU_CH1_IPR_ADDR INTC_INT2PRI0 | ||
104 | #define TMU_CH1_IPR_POS 2 | ||
105 | #define TMU_CH1_PRIORITY 2 | ||
106 | |||
107 | #define TIMER1_IRQ 29 | ||
108 | #define TIMER1_IPR_ADDR INTC_INT2PRI0 | ||
109 | #define TIMER1_IPR_POS 2 | ||
110 | #define TIMER1_PRIORITY 2 | ||
111 | |||
112 | /* ch 2*/ | ||
113 | #define TMU_CH2_IRQ 30 | ||
114 | #define TMU_CH2_IPR_ADDR INTC_INT2PRI0 | ||
115 | #define TMU_CH2_IPR_POS 1 | ||
116 | #define TMU_CH2_PRIORITY 2 | ||
117 | /* ch 2 Input capture */ | ||
118 | #define TMU_CH2IC_IRQ 31 | ||
119 | #define TMU_CH2IC_IPR_ADDR INTC_INT2PRI0 | ||
120 | #define TMU_CH2IC_IPR_POS 0 | ||
121 | #define TMU_CH2IC_PRIORITY 2 | ||
122 | /* ch 3 */ | ||
123 | #define TMU_CH3_IRQ 96 | ||
124 | #define TMU_CH3_IPR_ADDR INTC_INT2PRI1 | ||
125 | #define TMU_CH3_IPR_POS 3 | ||
126 | #define TMU_CH3_PRIORITY 2 | ||
127 | /* ch 4 */ | ||
128 | #define TMU_CH4_IRQ 97 | ||
129 | #define TMU_CH4_IPR_ADDR INTC_INT2PRI1 | ||
130 | #define TMU_CH4_IPR_POS 2 | ||
131 | #define TMU_CH4_PRIORITY 2 | ||
132 | /* ch 5*/ | ||
133 | #define TMU_CH5_IRQ 98 | ||
134 | #define TMU_CH5_IPR_ADDR INTC_INT2PRI1 | ||
135 | #define TMU_CH5_IPR_POS 1 | ||
136 | #define TMU_CH5_PRIORITY 2 | ||
137 | |||
138 | /* SCIF0 */ | ||
139 | #define SCIF0_ERI_IRQ 40 | ||
140 | #define SCIF0_RXI_IRQ 41 | ||
141 | #define SCIF0_BRI_IRQ 42 | ||
142 | #define SCIF0_TXI_IRQ 43 | ||
143 | #define SCIF0_IPR_ADDR INTC_INT2PRI2 | ||
144 | #define SCIF0_IPR_POS 3 | ||
145 | #define SCIF0_PRIORITY 3 | ||
146 | |||
147 | /* SCIF1 */ | ||
148 | #define SCIF1_ERI_IRQ 76 | ||
149 | #define SCIF1_RXI_IRQ 77 | ||
150 | #define SCIF1_BRI_IRQ 78 | ||
151 | #define SCIF1_TXI_IRQ 79 | ||
152 | #define SCIF1_IPR_ADDR INTC_INT2PRI2 | ||
153 | #define SCIF1_IPR_POS 2 | ||
154 | #define SCIF1_PRIORITY 3 | ||
155 | |||
156 | #define WDT_IRQ 27 | ||
157 | #define WDT_IPR_ADDR INTC_INT2PRI2 | ||
158 | #define WDT_IPR_POS 1 | ||
159 | #define WDT_PRIORITY 2 | ||
160 | |||
161 | /* DMAC(0) */ | ||
162 | #define DMINT0_IRQ 34 | ||
163 | #define DMINT1_IRQ 35 | ||
164 | #define DMINT2_IRQ 36 | ||
165 | #define DMINT3_IRQ 37 | ||
166 | #define DMINT4_IRQ 44 | ||
167 | #define DMINT5_IRQ 45 | ||
168 | #define DMINT6_IRQ 46 | ||
169 | #define DMINT7_IRQ 47 | ||
170 | #define DMAE_IRQ 38 | ||
171 | #define DMA0_IPR_ADDR INTC_INT2PRI3 | ||
172 | #define DMA0_IPR_POS 2 | ||
173 | #define DMA0_PRIORITY 7 | ||
174 | |||
175 | /* DMAC(1) */ | ||
176 | #define DMINT8_IRQ 92 | ||
177 | #define DMINT9_IRQ 93 | ||
178 | #define DMINT10_IRQ 94 | ||
179 | #define DMINT11_IRQ 95 | ||
180 | #define DMA1_IPR_ADDR INTC_INT2PRI3 | ||
181 | #define DMA1_IPR_POS 1 | ||
182 | #define DMA1_PRIORITY 7 | ||
183 | |||
184 | #define DMTE0_IRQ DMINT0_IRQ | ||
185 | #define DMTE4_IRQ DMINT4_IRQ | ||
186 | #define DMA_IPR_ADDR DMA0_IPR_ADDR | ||
187 | #define DMA_IPR_POS DMA0_IPR_POS | ||
188 | #define DMA_PRIORITY DMA0_PRIORITY | ||
189 | |||
190 | /* CMT */ | ||
191 | #define CMT_IRQ 56 | ||
192 | #define CMT_IPR_ADDR INTC_INT2PRI4 | ||
193 | #define CMT_IPR_POS 3 | ||
194 | #define CMT_PRIORITY 0 | ||
195 | |||
196 | /* HAC */ | ||
197 | #define HAC_IRQ 60 | ||
198 | #define HAC_IPR_ADDR INTC_INT2PRI4 | ||
199 | #define HAC_IPR_POS 2 | ||
200 | #define CMT_PRIORITY 0 | ||
201 | |||
202 | /* PCIC(0) */ | ||
203 | #define PCIC0_IRQ 64 | ||
204 | #define PCIC0_IPR_ADDR INTC_INT2PRI4 | ||
205 | #define PCIC0_IPR_POS 1 | ||
206 | #define PCIC0_PRIORITY 2 | ||
207 | |||
208 | /* PCIC(1) */ | ||
209 | #define PCIC1_IRQ 65 | ||
210 | #define PCIC1_IPR_ADDR INTC_INT2PRI4 | ||
211 | #define PCIC1_IPR_POS 0 | ||
212 | #define PCIC1_PRIORITY 2 | ||
213 | |||
214 | /* PCIC(2) */ | ||
215 | #define PCIC2_IRQ 66 | ||
216 | #define PCIC2_IPR_ADDR INTC_INT2PRI5 | ||
217 | #define PCIC2_IPR_POS 3 | ||
218 | #define PCIC2_PRIORITY 2 | ||
219 | |||
220 | /* PCIC(3) */ | ||
221 | #define PCIC3_IRQ 67 | ||
222 | #define PCIC3_IPR_ADDR INTC_INT2PRI5 | ||
223 | #define PCIC3_IPR_POS 2 | ||
224 | #define PCIC3_PRIORITY 2 | ||
225 | |||
226 | /* PCIC(4) */ | ||
227 | #define PCIC4_IRQ 68 | ||
228 | #define PCIC4_IPR_ADDR INTC_INT2PRI5 | ||
229 | #define PCIC4_IPR_POS 1 | ||
230 | #define PCIC4_PRIORITY 2 | ||
231 | |||
232 | /* PCIC(5) */ | ||
233 | #define PCICERR_IRQ 69 | ||
234 | #define PCICPWD3_IRQ 70 | ||
235 | #define PCICPWD2_IRQ 71 | ||
236 | #define PCICPWD1_IRQ 72 | ||
237 | #define PCICPWD0_IRQ 73 | ||
238 | #define PCIC5_IPR_ADDR INTC_INT2PRI5 | ||
239 | #define PCIC5_IPR_POS 0 | ||
240 | #define PCIC5_PRIORITY 2 | ||
241 | |||
242 | /* SIOF */ | ||
243 | #define SIOF_IRQ 80 | ||
244 | #define SIOF_IPR_ADDR INTC_INT2PRI6 | ||
245 | #define SIOF_IPR_POS 3 | ||
246 | #define SIOF_PRIORITY 3 | ||
247 | |||
248 | /* HSPI */ | ||
249 | #define HSPI_IRQ 84 | ||
250 | #define HSPI_IPR_ADDR INTC_INT2PRI6 | ||
251 | #define HSPI_IPR_POS 2 | ||
252 | #define HSPI_PRIORITY 3 | ||
253 | |||
254 | /* MMCIF */ | ||
255 | #define MMCIF_FSTAT_IRQ 88 | ||
256 | #define MMCIF_TRAN_IRQ 89 | ||
257 | #define MMCIF_ERR_IRQ 90 | ||
258 | #define MMCIF_FRDY_IRQ 91 | ||
259 | #define MMCIF_IPR_ADDR INTC_INT2PRI6 | ||
260 | #define MMCIF_IPR_POS 1 | ||
261 | #define HSPI_PRIORITY 3 | ||
262 | |||
263 | /* SSI */ | ||
264 | #define SSI_IRQ 100 | ||
265 | #define SSI_IPR_ADDR INTC_INT2PRI6 | ||
266 | #define SSI_IPR_POS 0 | ||
267 | #define SSI_PRIORITY 3 | ||
268 | |||
269 | /* FLCTL */ | ||
270 | #define FLCTL_FLSTE_IRQ 104 | ||
271 | #define FLCTL_FLTEND_IRQ 105 | ||
272 | #define FLCTL_FLTRQ0_IRQ 106 | ||
273 | #define FLCTL_FLTRQ1_IRQ 107 | ||
274 | #define FLCTL_IPR_ADDR INTC_INT2PRI7 | ||
275 | #define FLCTL_IPR_POS 3 | ||
276 | #define FLCTL_PRIORITY 3 | ||
277 | |||
278 | /* GPIO */ | ||
279 | #define GPIO0_IRQ 108 | ||
280 | #define GPIO1_IRQ 109 | ||
281 | #define GPIO2_IRQ 110 | ||
282 | #define GPIO3_IRQ 111 | ||
283 | #define GPIO_IPR_ADDR INTC_INT2PRI7 | ||
284 | #define GPIO_IPR_POS 2 | ||
285 | #define GPIO_PRIORITY 3 | ||
286 | |||
287 | #define INTC_TMU0_MSK 0 | ||
288 | #define INTC_TMU3_MSK 1 | ||
289 | #define INTC_RTC_MSK 2 | ||
290 | #define INTC_SCIF0_MSK 3 | ||
291 | #define INTC_SCIF1_MSK 4 | ||
292 | #define INTC_WDT_MSK 5 | ||
293 | #define INTC_HUID_MSK 7 | ||
294 | #define INTC_DMAC0_MSK 8 | ||
295 | #define INTC_DMAC1_MSK 9 | ||
296 | #define INTC_CMT_MSK 12 | ||
297 | #define INTC_HAC_MSK 13 | ||
298 | #define INTC_PCIC0_MSK 14 | ||
299 | #define INTC_PCIC1_MSK 15 | ||
300 | #define INTC_PCIC2_MSK 16 | ||
301 | #define INTC_PCIC3_MSK 17 | ||
302 | #define INTC_PCIC4_MSK 18 | ||
303 | #define INTC_PCIC5_MSK 19 | ||
304 | #define INTC_SIOF_MSK 20 | ||
305 | #define INTC_HSPI_MSK 21 | ||
306 | #define INTC_MMCIF_MSK 22 | ||
307 | #define INTC_SSI_MSK 23 | ||
308 | #define INTC_FLCTL_MSK 24 | ||
309 | #define INTC_GPIO_MSK 25 | ||
310 | |||
311 | #endif /* __ASM_SH_IRQ_SH7780_H */ | ||
diff --git a/include/asm-sh/irq.h b/include/asm-sh/irq.h index 6cd3e9e2a76a..fd576088e47e 100644 --- a/include/asm-sh/irq.h +++ b/include/asm-sh/irq.h | |||
@@ -1,233 +1,9 @@ | |||
1 | #ifndef __ASM_SH_IRQ_H | 1 | #ifndef __ASM_SH_IRQ_H |
2 | #define __ASM_SH_IRQ_H | 2 | #define __ASM_SH_IRQ_H |
3 | 3 | ||
4 | /* | ||
5 | * | ||
6 | * linux/include/asm-sh/irq.h | ||
7 | * | ||
8 | * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi | ||
9 | * Copyright (C) 2000 Kazumoto Kojima | ||
10 | * Copyright (C) 2003 Paul Mundt | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <asm/machvec.h> | 4 | #include <asm/machvec.h> |
15 | #include <asm/ptrace.h> /* for pt_regs */ | 5 | #include <asm/ptrace.h> /* for pt_regs */ |
16 | 6 | ||
17 | #ifndef CONFIG_CPU_SUBTYPE_SH7780 | ||
18 | |||
19 | #define INTC_DMAC0_MSK 0 | ||
20 | |||
21 | #if defined(CONFIG_CPU_SH3) | ||
22 | #define INTC_IPRA 0xfffffee2UL | ||
23 | #define INTC_IPRB 0xfffffee4UL | ||
24 | #elif defined(CONFIG_CPU_SH4) | ||
25 | #define INTC_IPRA 0xffd00004UL | ||
26 | #define INTC_IPRB 0xffd00008UL | ||
27 | #define INTC_IPRC 0xffd0000cUL | ||
28 | #define INTC_IPRD 0xffd00010UL | ||
29 | #endif | ||
30 | |||
31 | #define TIMER_IRQ 16 | ||
32 | #define TIMER_IPR_ADDR INTC_IPRA | ||
33 | #define TIMER_IPR_POS 3 | ||
34 | #define TIMER_PRIORITY 2 | ||
35 | |||
36 | #define TIMER1_IRQ 17 | ||
37 | #define TIMER1_IPR_ADDR INTC_IPRA | ||
38 | #define TIMER1_IPR_POS 2 | ||
39 | #define TIMER1_PRIORITY 4 | ||
40 | |||
41 | #define RTC_IRQ 22 | ||
42 | #define RTC_IPR_ADDR INTC_IPRA | ||
43 | #define RTC_IPR_POS 0 | ||
44 | #define RTC_PRIORITY TIMER_PRIORITY | ||
45 | |||
46 | #if defined(CONFIG_CPU_SH3) | ||
47 | #define DMTE0_IRQ 48 | ||
48 | #define DMTE1_IRQ 49 | ||
49 | #define DMTE2_IRQ 50 | ||
50 | #define DMTE3_IRQ 51 | ||
51 | #define DMA_IPR_ADDR INTC_IPRE | ||
52 | #define DMA_IPR_POS 3 | ||
53 | #define DMA_PRIORITY 7 | ||
54 | #if defined(CONFIG_CPU_SUBTYPE_SH7300) | ||
55 | /* TMU2 */ | ||
56 | #define TIMER2_IRQ 18 | ||
57 | #define TIMER2_IPR_ADDR INTC_IPRA | ||
58 | #define TIMER2_IPR_POS 1 | ||
59 | #define TIMER2_PRIORITY 2 | ||
60 | |||
61 | /* WDT */ | ||
62 | #define WDT_IRQ 27 | ||
63 | #define WDT_IPR_ADDR INTC_IPRB | ||
64 | #define WDT_IPR_POS 3 | ||
65 | #define WDT_PRIORITY 2 | ||
66 | |||
67 | /* SIM (SIM Card Module) */ | ||
68 | #define SIM_ERI_IRQ 23 | ||
69 | #define SIM_RXI_IRQ 24 | ||
70 | #define SIM_TXI_IRQ 25 | ||
71 | #define SIM_TEND_IRQ 26 | ||
72 | #define SIM_IPR_ADDR INTC_IPRB | ||
73 | #define SIM_IPR_POS 1 | ||
74 | #define SIM_PRIORITY 2 | ||
75 | |||
76 | /* VIO (Video I/O) */ | ||
77 | #define VIO_IRQ 52 | ||
78 | #define VIO_IPR_ADDR INTC_IPRE | ||
79 | #define VIO_IPR_POS 2 | ||
80 | #define VIO_PRIORITY 2 | ||
81 | |||
82 | /* MFI (Multi Functional Interface) */ | ||
83 | #define MFI_IRQ 56 | ||
84 | #define MFI_IPR_ADDR INTC_IPRE | ||
85 | #define MFI_IPR_POS 1 | ||
86 | #define MFI_PRIORITY 2 | ||
87 | |||
88 | /* VPU (Video Processing Unit) */ | ||
89 | #define VPU_IRQ 60 | ||
90 | #define VPU_IPR_ADDR INTC_IPRE | ||
91 | #define VPU_IPR_POS 0 | ||
92 | #define VPU_PRIORITY 2 | ||
93 | |||
94 | /* KEY (Key Scan Interface) */ | ||
95 | #define KEY_IRQ 79 | ||
96 | #define KEY_IPR_ADDR INTC_IPRF | ||
97 | #define KEY_IPR_POS 3 | ||
98 | #define KEY_PRIORITY 2 | ||
99 | |||
100 | /* CMT (Compare Match Timer) */ | ||
101 | #define CMT_IRQ 104 | ||
102 | #define CMT_IPR_ADDR INTC_IPRF | ||
103 | #define CMT_IPR_POS 0 | ||
104 | #define CMT_PRIORITY 2 | ||
105 | |||
106 | /* DMAC(1) */ | ||
107 | #define DMTE0_IRQ 48 | ||
108 | #define DMTE1_IRQ 49 | ||
109 | #define DMTE2_IRQ 50 | ||
110 | #define DMTE3_IRQ 51 | ||
111 | #define DMA1_IPR_ADDR INTC_IPRE | ||
112 | #define DMA1_IPR_POS 3 | ||
113 | #define DMA1_PRIORITY 7 | ||
114 | |||
115 | /* DMAC(2) */ | ||
116 | #define DMTE4_IRQ 76 | ||
117 | #define DMTE5_IRQ 77 | ||
118 | #define DMA2_IPR_ADDR INTC_IPRF | ||
119 | #define DMA2_IPR_POS 2 | ||
120 | #define DMA2_PRIORITY 7 | ||
121 | |||
122 | /* SIOF0 */ | ||
123 | #define SIOF0_IRQ 84 | ||
124 | #define SIOF0_IPR_ADDR INTC_IPRH | ||
125 | #define SIOF0_IPR_POS 3 | ||
126 | #define SIOF0_PRIORITY 3 | ||
127 | |||
128 | /* FLCTL (Flash Memory Controller) */ | ||
129 | #define FLSTE_IRQ 92 | ||
130 | #define FLTEND_IRQ 93 | ||
131 | #define FLTRQ0_IRQ 94 | ||
132 | #define FLTRQ1_IRQ 95 | ||
133 | #define FLCTL_IPR_ADDR INTC_IPRH | ||
134 | #define FLCTL_IPR_POS 1 | ||
135 | #define FLCTL_PRIORITY 3 | ||
136 | |||
137 | /* IIC (IIC Bus Interface) */ | ||
138 | #define IIC_ALI_IRQ 96 | ||
139 | #define IIC_TACKI_IRQ 97 | ||
140 | #define IIC_WAITI_IRQ 98 | ||
141 | #define IIC_DTEI_IRQ 99 | ||
142 | #define IIC_IPR_ADDR INTC_IPRH | ||
143 | #define IIC_IPR_POS 0 | ||
144 | #define IIC_PRIORITY 3 | ||
145 | |||
146 | /* SIO0 */ | ||
147 | #define SIO0_IRQ 88 | ||
148 | #define SIO0_IPR_ADDR INTC_IPRI | ||
149 | #define SIO0_IPR_POS 3 | ||
150 | #define SIO0_PRIORITY 3 | ||
151 | |||
152 | /* SIU (Sound Interface Unit) */ | ||
153 | #define SIU_IRQ 108 | ||
154 | #define SIU_IPR_ADDR INTC_IPRJ | ||
155 | #define SIU_IPR_POS 1 | ||
156 | #define SIU_PRIORITY 3 | ||
157 | |||
158 | #endif | ||
159 | #elif defined(CONFIG_CPU_SH4) | ||
160 | #define DMTE0_IRQ 34 | ||
161 | #define DMTE1_IRQ 35 | ||
162 | #define DMTE2_IRQ 36 | ||
163 | #define DMTE3_IRQ 37 | ||
164 | #define DMTE4_IRQ 44 /* 7751R only */ | ||
165 | #define DMTE5_IRQ 45 /* 7751R only */ | ||
166 | #define DMTE6_IRQ 46 /* 7751R only */ | ||
167 | #define DMTE7_IRQ 47 /* 7751R only */ | ||
168 | #define DMAE_IRQ 38 | ||
169 | #define DMA_IPR_ADDR INTC_IPRC | ||
170 | #define DMA_IPR_POS 2 | ||
171 | #define DMA_PRIORITY 7 | ||
172 | #endif | ||
173 | |||
174 | #if defined (CONFIG_CPU_SUBTYPE_SH7707) || defined (CONFIG_CPU_SUBTYPE_SH7708) || \ | ||
175 | defined (CONFIG_CPU_SUBTYPE_SH7709) || defined (CONFIG_CPU_SUBTYPE_SH7750) || \ | ||
176 | defined (CONFIG_CPU_SUBTYPE_SH7751) || defined (CONFIG_CPU_SUBTYPE_SH7706) | ||
177 | #define SCI_ERI_IRQ 23 | ||
178 | #define SCI_RXI_IRQ 24 | ||
179 | #define SCI_TXI_IRQ 25 | ||
180 | #define SCI_IPR_ADDR INTC_IPRB | ||
181 | #define SCI_IPR_POS 1 | ||
182 | #define SCI_PRIORITY 3 | ||
183 | #endif | ||
184 | |||
185 | #if defined(CONFIG_CPU_SUBTYPE_SH7300) | ||
186 | #define SCIF0_IRQ 80 | ||
187 | #define SCIF0_IPR_ADDR INTC_IPRG | ||
188 | #define SCIF0_IPR_POS 3 | ||
189 | #define SCIF0_PRIORITY 3 | ||
190 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | ||
191 | defined(CONFIG_CPU_SUBTYPE_SH7706) || \ | ||
192 | defined(CONFIG_CPU_SUBTYPE_SH7707) || \ | ||
193 | defined(CONFIG_CPU_SUBTYPE_SH7709) | ||
194 | #define SCIF_ERI_IRQ 56 | ||
195 | #define SCIF_RXI_IRQ 57 | ||
196 | #define SCIF_BRI_IRQ 58 | ||
197 | #define SCIF_TXI_IRQ 59 | ||
198 | #define SCIF_IPR_ADDR INTC_IPRE | ||
199 | #define SCIF_IPR_POS 1 | ||
200 | #define SCIF_PRIORITY 3 | ||
201 | |||
202 | #define IRDA_ERI_IRQ 52 | ||
203 | #define IRDA_RXI_IRQ 53 | ||
204 | #define IRDA_BRI_IRQ 54 | ||
205 | #define IRDA_TXI_IRQ 55 | ||
206 | #define IRDA_IPR_ADDR INTC_IPRE | ||
207 | #define IRDA_IPR_POS 2 | ||
208 | #define IRDA_PRIORITY 3 | ||
209 | #elif defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \ | ||
210 | defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202) | ||
211 | #define SCIF_ERI_IRQ 40 | ||
212 | #define SCIF_RXI_IRQ 41 | ||
213 | #define SCIF_BRI_IRQ 42 | ||
214 | #define SCIF_TXI_IRQ 43 | ||
215 | #define SCIF_IPR_ADDR INTC_IPRC | ||
216 | #define SCIF_IPR_POS 1 | ||
217 | #define SCIF_PRIORITY 3 | ||
218 | #if defined(CONFIG_CPU_SUBTYPE_ST40STB1) | ||
219 | #define SCIF1_ERI_IRQ 23 | ||
220 | #define SCIF1_RXI_IRQ 24 | ||
221 | #define SCIF1_BRI_IRQ 25 | ||
222 | #define SCIF1_TXI_IRQ 26 | ||
223 | #define SCIF1_IPR_ADDR INTC_IPRB | ||
224 | #define SCIF1_IPR_POS 1 | ||
225 | #define SCIF1_PRIORITY 3 | ||
226 | #endif /* ST40STB1 */ | ||
227 | |||
228 | #endif /* 775x / SH4-202 / ST40STB1 */ | ||
229 | #endif /* 7780 */ | ||
230 | |||
231 | /* NR_IRQS is made from three components: | 7 | /* NR_IRQS is made from three components: |
232 | * 1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules | 8 | * 1. ONCHIP_NR_IRQS - number of IRLS + on-chip peripherial modules |
233 | * 2. PINT_NR_IRQS - number of PINT interrupts | 9 | * 2. PINT_NR_IRQS - number of PINT interrupts |
@@ -265,6 +41,10 @@ | |||
265 | # define ONCHIP_NR_IRQS 109 | 41 | # define ONCHIP_NR_IRQS 109 |
266 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | 42 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) |
267 | # define ONCHIP_NR_IRQS 111 | 43 | # define ONCHIP_NR_IRQS 111 |
44 | #elif defined(CONFIG_CPU_SUBTYPE_SH7206) | ||
45 | # define ONCHIP_NR_IRQS 256 | ||
46 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | ||
47 | # define ONCHIP_NR_IRQS 128 | ||
268 | #elif defined(CONFIG_SH_UNKNOWN) /* Most be last */ | 48 | #elif defined(CONFIG_SH_UNKNOWN) /* Most be last */ |
269 | # define ONCHIP_NR_IRQS 144 | 49 | # define ONCHIP_NR_IRQS 144 |
270 | #endif | 50 | #endif |
@@ -312,9 +92,11 @@ | |||
312 | /* NR_IRQS. 1+2+3 */ | 92 | /* NR_IRQS. 1+2+3 */ |
313 | #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS) | 93 | #define NR_IRQS (ONCHIP_NR_IRQS + PINT_NR_IRQS + OFFCHIP_NR_IRQS) |
314 | 94 | ||
315 | extern void disable_irq(unsigned int); | 95 | /* |
316 | extern void disable_irq_nosync(unsigned int); | 96 | * Convert back and forth between INTEVT and IRQ values. |
317 | extern void enable_irq(unsigned int); | 97 | */ |
98 | #define evt2irq(evt) (((evt) >> 5) - 16) | ||
99 | #define irq2evt(irq) (((irq) + 16) << 5) | ||
318 | 100 | ||
319 | /* | 101 | /* |
320 | * Simple Mask Register Support | 102 | * Simple Mask Register Support |
@@ -327,362 +109,36 @@ extern unsigned short *irq_mask_register; | |||
327 | */ | 109 | */ |
328 | void init_IRQ_pint(void); | 110 | void init_IRQ_pint(void); |
329 | 111 | ||
112 | /* | ||
113 | * The shift value is now the number of bits to shift, not the number of | ||
114 | * bits/4. This is to make it easier to read the value directly from the | ||
115 | * datasheets. The IPR address, addr, will be set from ipr_idx via the | ||
116 | * map_ipridx_to_addr function. | ||
117 | */ | ||
330 | struct ipr_data { | 118 | struct ipr_data { |
331 | unsigned int irq; | 119 | unsigned int irq; |
332 | unsigned int addr; /* Address of Interrupt Priority Register */ | 120 | int ipr_idx; /* Index for the IPR registered */ |
333 | int shift; /* Shifts of the 16-bit data */ | 121 | int shift; /* Number of bits to shift the data */ |
334 | int priority; /* The priority */ | 122 | int priority; /* The priority */ |
123 | unsigned int addr; /* Address of Interrupt Priority Register */ | ||
335 | }; | 124 | }; |
336 | 125 | ||
337 | /* | 126 | /* |
338 | * Function for "on chip support modules". | 127 | * Given an IPR IDX, map the value to an IPR register address. |
339 | */ | 128 | */ |
340 | extern void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs); | 129 | unsigned int map_ipridx_to_addr(int idx); |
341 | extern void make_imask_irq(unsigned int irq); | ||
342 | |||
343 | #if defined(CONFIG_CPU_SUBTYPE_SH7300) | ||
344 | #undef INTC_IPRA | ||
345 | #undef INTC_IPRB | ||
346 | #define INTC_IPRA 0xA414FEE2UL | ||
347 | #define INTC_IPRB 0xA414FEE4UL | ||
348 | #define INTC_IPRC 0xA4140016UL | ||
349 | #define INTC_IPRD 0xA4140018UL | ||
350 | #define INTC_IPRE 0xA414001AUL | ||
351 | #define INTC_IPRF 0xA4080000UL | ||
352 | #define INTC_IPRG 0xA4080002UL | ||
353 | #define INTC_IPRH 0xA4080004UL | ||
354 | #define INTC_IPRI 0xA4080006UL | ||
355 | #define INTC_IPRJ 0xA4080008UL | ||
356 | |||
357 | #define INTC_IMR0 0xA4080040UL | ||
358 | #define INTC_IMR1 0xA4080042UL | ||
359 | #define INTC_IMR2 0xA4080044UL | ||
360 | #define INTC_IMR3 0xA4080046UL | ||
361 | #define INTC_IMR4 0xA4080048UL | ||
362 | #define INTC_IMR5 0xA408004AUL | ||
363 | #define INTC_IMR6 0xA408004CUL | ||
364 | #define INTC_IMR7 0xA408004EUL | ||
365 | #define INTC_IMR8 0xA4080050UL | ||
366 | #define INTC_IMR9 0xA4080052UL | ||
367 | #define INTC_IMR10 0xA4080054UL | ||
368 | |||
369 | #define INTC_IMCR0 0xA4080060UL | ||
370 | #define INTC_IMCR1 0xA4080062UL | ||
371 | #define INTC_IMCR2 0xA4080064UL | ||
372 | #define INTC_IMCR3 0xA4080066UL | ||
373 | #define INTC_IMCR4 0xA4080068UL | ||
374 | #define INTC_IMCR5 0xA408006AUL | ||
375 | #define INTC_IMCR6 0xA408006CUL | ||
376 | #define INTC_IMCR7 0xA408006EUL | ||
377 | #define INTC_IMCR8 0xA4080070UL | ||
378 | #define INTC_IMCR9 0xA4080072UL | ||
379 | #define INTC_IMCR10 0xA4080074UL | ||
380 | |||
381 | #define INTC_ICR0 0xA414FEE0UL | ||
382 | #define INTC_ICR1 0xA4140010UL | ||
383 | |||
384 | #define INTC_IRR0 0xA4140004UL | ||
385 | |||
386 | #define PORT_PACR 0xA4050100UL | ||
387 | #define PORT_PBCR 0xA4050102UL | ||
388 | #define PORT_PCCR 0xA4050104UL | ||
389 | #define PORT_PDCR 0xA4050106UL | ||
390 | #define PORT_PECR 0xA4050108UL | ||
391 | #define PORT_PFCR 0xA405010AUL | ||
392 | #define PORT_PGCR 0xA405010CUL | ||
393 | #define PORT_PHCR 0xA405010EUL | ||
394 | #define PORT_PJCR 0xA4050110UL | ||
395 | #define PORT_PKCR 0xA4050112UL | ||
396 | #define PORT_PLCR 0xA4050114UL | ||
397 | #define PORT_SCPCR 0xA4050116UL | ||
398 | #define PORT_PMCR 0xA4050118UL | ||
399 | #define PORT_PNCR 0xA405011AUL | ||
400 | #define PORT_PQCR 0xA405011CUL | ||
401 | |||
402 | #define PORT_PSELA 0xA4050140UL | ||
403 | #define PORT_PSELB 0xA4050142UL | ||
404 | #define PORT_PSELC 0xA4050144UL | ||
405 | |||
406 | #define PORT_HIZCRA 0xA4050146UL | ||
407 | #define PORT_HIZCRB 0xA4050148UL | ||
408 | #define PORT_DRVCR 0xA4050150UL | ||
409 | |||
410 | #define PORT_PADR 0xA4050120UL | ||
411 | #define PORT_PBDR 0xA4050122UL | ||
412 | #define PORT_PCDR 0xA4050124UL | ||
413 | #define PORT_PDDR 0xA4050126UL | ||
414 | #define PORT_PEDR 0xA4050128UL | ||
415 | #define PORT_PFDR 0xA405012AUL | ||
416 | #define PORT_PGDR 0xA405012CUL | ||
417 | #define PORT_PHDR 0xA405012EUL | ||
418 | #define PORT_PJDR 0xA4050130UL | ||
419 | #define PORT_PKDR 0xA4050132UL | ||
420 | #define PORT_PLDR 0xA4050134UL | ||
421 | #define PORT_SCPDR 0xA4050136UL | ||
422 | #define PORT_PMDR 0xA4050138UL | ||
423 | #define PORT_PNDR 0xA405013AUL | ||
424 | #define PORT_PQDR 0xA405013CUL | ||
425 | |||
426 | #define IRQ0_IRQ 32 | ||
427 | #define IRQ1_IRQ 33 | ||
428 | #define IRQ2_IRQ 34 | ||
429 | #define IRQ3_IRQ 35 | ||
430 | #define IRQ4_IRQ 36 | ||
431 | #define IRQ5_IRQ 37 | ||
432 | |||
433 | #define IRQ0_IPR_ADDR INTC_IPRC | ||
434 | #define IRQ1_IPR_ADDR INTC_IPRC | ||
435 | #define IRQ2_IPR_ADDR INTC_IPRC | ||
436 | #define IRQ3_IPR_ADDR INTC_IPRC | ||
437 | #define IRQ4_IPR_ADDR INTC_IPRD | ||
438 | #define IRQ5_IPR_ADDR INTC_IPRD | ||
439 | |||
440 | #define IRQ0_IPR_POS 0 | ||
441 | #define IRQ1_IPR_POS 1 | ||
442 | #define IRQ2_IPR_POS 2 | ||
443 | #define IRQ3_IPR_POS 3 | ||
444 | #define IRQ4_IPR_POS 0 | ||
445 | #define IRQ5_IPR_POS 1 | ||
446 | 130 | ||
447 | #define IRQ0_PRIORITY 1 | 131 | /* |
448 | #define IRQ1_PRIORITY 1 | 132 | * Enable individual interrupt mode for external IPR IRQs. |
449 | #define IRQ2_PRIORITY 1 | 133 | */ |
450 | #define IRQ3_PRIORITY 1 | 134 | void ipr_irq_enable_irlm(void); |
451 | #define IRQ4_PRIORITY 1 | ||
452 | #define IRQ5_PRIORITY 1 | ||
453 | |||
454 | extern int ipr_irq_demux(int irq); | ||
455 | #define __irq_demux(irq) ipr_irq_demux(irq) | ||
456 | |||
457 | #elif defined(CONFIG_CPU_SUBTYPE_SH7604) | ||
458 | #define INTC_IPRA 0xfffffee2UL | ||
459 | #define INTC_IPRB 0xfffffe60UL | ||
460 | |||
461 | #define INTC_VCRA 0xfffffe62UL | ||
462 | #define INTC_VCRB 0xfffffe64UL | ||
463 | #define INTC_VCRC 0xfffffe66UL | ||
464 | #define INTC_VCRD 0xfffffe68UL | ||
465 | |||
466 | #define INTC_VCRWDT 0xfffffee4UL | ||
467 | #define INTC_VCRDIV 0xffffff0cUL | ||
468 | #define INTC_VCRDMA0 0xffffffa0UL | ||
469 | #define INTC_VCRDMA1 0xffffffa8UL | ||
470 | |||
471 | #define INTC_ICR 0xfffffee0UL | ||
472 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) || \ | ||
473 | defined(CONFIG_CPU_SUBTYPE_SH7706) || \ | ||
474 | defined(CONFIG_CPU_SUBTYPE_SH7707) || \ | ||
475 | defined(CONFIG_CPU_SUBTYPE_SH7709) || \ | ||
476 | defined(CONFIG_CPU_SUBTYPE_SH7710) | ||
477 | #define INTC_IRR0 0xa4000004UL | ||
478 | #define INTC_IRR1 0xa4000006UL | ||
479 | #define INTC_IRR2 0xa4000008UL | ||
480 | |||
481 | #define INTC_ICR0 0xfffffee0UL | ||
482 | #define INTC_ICR1 0xa4000010UL | ||
483 | #define INTC_ICR2 0xa4000012UL | ||
484 | #define INTC_INTER 0xa4000014UL | ||
485 | |||
486 | #define INTC_IPRC 0xa4000016UL | ||
487 | #define INTC_IPRD 0xa4000018UL | ||
488 | #define INTC_IPRE 0xa400001aUL | ||
489 | #if defined(CONFIG_CPU_SUBTYPE_SH7707) | ||
490 | #define INTC_IPRF 0xa400001cUL | ||
491 | #elif defined(CONFIG_CPU_SUBTYPE_SH7705) | ||
492 | #define INTC_IPRF 0xa4080000UL | ||
493 | #define INTC_IPRG 0xa4080002UL | ||
494 | #define INTC_IPRH 0xa4080004UL | ||
495 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) | ||
496 | /* Interrupt Controller Registers */ | ||
497 | #undef INTC_IPRA | ||
498 | #undef INTC_IPRB | ||
499 | #define INTC_IPRA 0xA414FEE2UL | ||
500 | #define INTC_IPRB 0xA414FEE4UL | ||
501 | #define INTC_IPRF 0xA4080000UL | ||
502 | #define INTC_IPRG 0xA4080002UL | ||
503 | #define INTC_IPRH 0xA4080004UL | ||
504 | #define INTC_IPRI 0xA4080006UL | ||
505 | |||
506 | #undef INTC_ICR0 | ||
507 | #undef INTC_ICR1 | ||
508 | #define INTC_ICR0 0xA414FEE0UL | ||
509 | #define INTC_ICR1 0xA4140010UL | ||
510 | |||
511 | #define INTC_IRR0 0xa4000004UL | ||
512 | #define INTC_IRR1 0xa4000006UL | ||
513 | #define INTC_IRR2 0xa4000008UL | ||
514 | #define INTC_IRR3 0xa400000AUL | ||
515 | #define INTC_IRR4 0xa400000CUL | ||
516 | #define INTC_IRR5 0xa4080020UL | ||
517 | #define INTC_IRR7 0xa4080024UL | ||
518 | #define INTC_IRR8 0xa4080026UL | ||
519 | |||
520 | /* Interrupt numbers */ | ||
521 | #define TIMER2_IRQ 18 | ||
522 | #define TIMER2_IPR_ADDR INTC_IPRA | ||
523 | #define TIMER2_IPR_POS 1 | ||
524 | #define TIMER2_PRIORITY 2 | ||
525 | |||
526 | /* WDT */ | ||
527 | #define WDT_IRQ 27 | ||
528 | #define WDT_IPR_ADDR INTC_IPRB | ||
529 | #define WDT_IPR_POS 3 | ||
530 | #define WDT_PRIORITY 2 | ||
531 | |||
532 | #define SCIF0_ERI_IRQ 52 | ||
533 | #define SCIF0_RXI_IRQ 53 | ||
534 | #define SCIF0_BRI_IRQ 54 | ||
535 | #define SCIF0_TXI_IRQ 55 | ||
536 | #define SCIF0_IPR_ADDR INTC_IPRE | ||
537 | #define SCIF0_IPR_POS 2 | ||
538 | #define SCIF0_PRIORITY 3 | ||
539 | |||
540 | #define DMTE4_IRQ 76 | ||
541 | #define DMTE5_IRQ 77 | ||
542 | #define DMA2_IPR_ADDR INTC_IPRF | ||
543 | #define DMA2_IPR_POS 2 | ||
544 | #define DMA2_PRIORITY 7 | ||
545 | |||
546 | #define IPSEC_IRQ 79 | ||
547 | #define IPSEC_IPR_ADDR INTC_IPRF | ||
548 | #define IPSEC_IPR_POS 3 | ||
549 | #define IPSEC_PRIORITY 3 | ||
550 | |||
551 | /* EDMAC */ | ||
552 | #define EDMAC0_IRQ 80 | ||
553 | #define EDMAC0_IPR_ADDR INTC_IPRG | ||
554 | #define EDMAC0_IPR_POS 3 | ||
555 | #define EDMAC0_PRIORITY 3 | ||
556 | |||
557 | #define EDMAC1_IRQ 81 | ||
558 | #define EDMAC1_IPR_ADDR INTC_IPRG | ||
559 | #define EDMAC1_IPR_POS 2 | ||
560 | #define EDMAC1_PRIORITY 3 | ||
561 | |||
562 | #define EDMAC2_IRQ 82 | ||
563 | #define EDMAC2_IPR_ADDR INTC_IPRG | ||
564 | #define EDMAC2_IPR_POS 1 | ||
565 | #define EDMAC2_PRIORITY 3 | ||
566 | |||
567 | /* SIOF */ | ||
568 | #define SIOF0_ERI_IRQ 96 | ||
569 | #define SIOF0_TXI_IRQ 97 | ||
570 | #define SIOF0_RXI_IRQ 98 | ||
571 | #define SIOF0_CCI_IRQ 99 | ||
572 | #define SIOF0_IPR_ADDR INTC_IPRH | ||
573 | #define SIOF0_IPR_POS 0 | ||
574 | #define SIOF0_PRIORITY 7 | ||
575 | |||
576 | #define SIOF1_ERI_IRQ 100 | ||
577 | #define SIOF1_TXI_IRQ 101 | ||
578 | #define SIOF1_RXI_IRQ 102 | ||
579 | #define SIOF1_CCI_IRQ 103 | ||
580 | #define SIOF1_IPR_ADDR INTC_IPRI | ||
581 | #define SIOF1_IPR_POS 1 | ||
582 | #define SIOF1_PRIORITY 7 | ||
583 | #endif /* CONFIG_CPU_SUBTYPE_SH7710 */ | ||
584 | |||
585 | #if defined(CONFIG_CPU_SUBTYPE_SH7710) | ||
586 | #define PORT_PACR 0xa4050100UL | ||
587 | #define PORT_PBCR 0xa4050102UL | ||
588 | #define PORT_PCCR 0xa4050104UL | ||
589 | #define PORT_PETCR 0xa4050106UL | ||
590 | #define PORT_PADR 0xa4050120UL | ||
591 | #define PORT_PBDR 0xa4050122UL | ||
592 | #define PORT_PCDR 0xa4050124UL | ||
593 | #else | ||
594 | #define PORT_PACR 0xa4000100UL | ||
595 | #define PORT_PBCR 0xa4000102UL | ||
596 | #define PORT_PCCR 0xa4000104UL | ||
597 | #define PORT_PFCR 0xa400010aUL | ||
598 | #define PORT_PADR 0xa4000120UL | ||
599 | #define PORT_PBDR 0xa4000122UL | ||
600 | #define PORT_PCDR 0xa4000124UL | ||
601 | #define PORT_PFDR 0xa400012aUL | ||
602 | #endif | ||
603 | |||
604 | #define IRQ0_IRQ 32 | ||
605 | #define IRQ1_IRQ 33 | ||
606 | #define IRQ2_IRQ 34 | ||
607 | #define IRQ3_IRQ 35 | ||
608 | #define IRQ4_IRQ 36 | ||
609 | #define IRQ5_IRQ 37 | ||
610 | |||
611 | #define IRQ0_IPR_ADDR INTC_IPRC | ||
612 | #define IRQ1_IPR_ADDR INTC_IPRC | ||
613 | #define IRQ2_IPR_ADDR INTC_IPRC | ||
614 | #define IRQ3_IPR_ADDR INTC_IPRC | ||
615 | #define IRQ4_IPR_ADDR INTC_IPRD | ||
616 | #define IRQ5_IPR_ADDR INTC_IPRD | ||
617 | |||
618 | #define IRQ0_IPR_POS 0 | ||
619 | #define IRQ1_IPR_POS 1 | ||
620 | #define IRQ2_IPR_POS 2 | ||
621 | #define IRQ3_IPR_POS 3 | ||
622 | #define IRQ4_IPR_POS 0 | ||
623 | #define IRQ5_IPR_POS 1 | ||
624 | |||
625 | #define IRQ0_PRIORITY 1 | ||
626 | #define IRQ1_PRIORITY 1 | ||
627 | #define IRQ2_PRIORITY 1 | ||
628 | #define IRQ3_PRIORITY 1 | ||
629 | #define IRQ4_PRIORITY 1 | ||
630 | #define IRQ5_PRIORITY 1 | ||
631 | |||
632 | #define PINT0_IRQ 40 | ||
633 | #define PINT8_IRQ 41 | ||
634 | |||
635 | #define PINT0_IPR_ADDR INTC_IPRD | ||
636 | #define PINT8_IPR_ADDR INTC_IPRD | ||
637 | |||
638 | #define PINT0_IPR_POS 3 | ||
639 | #define PINT8_IPR_POS 2 | ||
640 | #define PINT0_PRIORITY 2 | ||
641 | #define PINT8_PRIORITY 2 | ||
642 | |||
643 | extern int ipr_irq_demux(int irq); | ||
644 | #define __irq_demux(irq) ipr_irq_demux(irq) | ||
645 | #endif /* CONFIG_CPU_SUBTYPE_SH7707 || CONFIG_CPU_SUBTYPE_SH7709 */ | ||
646 | |||
647 | #if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7751) || \ | ||
648 | defined(CONFIG_CPU_SUBTYPE_ST40STB1) || defined(CONFIG_CPU_SUBTYPE_SH4_202) | ||
649 | #define INTC_ICR 0xffd00000 | ||
650 | #define INTC_ICR_NMIL (1<<15) | ||
651 | #define INTC_ICR_MAI (1<<14) | ||
652 | #define INTC_ICR_NMIB (1<<9) | ||
653 | #define INTC_ICR_NMIE (1<<8) | ||
654 | #define INTC_ICR_IRLM (1<<7) | ||
655 | #endif | ||
656 | |||
657 | #ifdef CONFIG_CPU_SUBTYPE_SH7780 | ||
658 | #include <asm/irq-sh7780.h> | ||
659 | #endif | ||
660 | |||
661 | /* SH with INTC2-style interrupts */ | ||
662 | #ifdef CONFIG_CPU_HAS_INTC2_IRQ | ||
663 | #if defined(CONFIG_CPU_SUBTYPE_ST40STB1) | ||
664 | #define INTC2_BASE 0xfe080000 | ||
665 | #define INTC2_FIRST_IRQ 64 | ||
666 | #define INTC2_INTREQ_OFFSET 0x20 | ||
667 | #define INTC2_INTMSK_OFFSET 0x40 | ||
668 | #define INTC2_INTMSKCLR_OFFSET 0x60 | ||
669 | #define NR_INTC2_IRQS 25 | ||
670 | #elif defined(CONFIG_CPU_SUBTYPE_SH7760) | ||
671 | #define INTC2_BASE 0xfe080000 | ||
672 | #define INTC2_FIRST_IRQ 48 /* INTEVT 0x800 */ | ||
673 | #define INTC2_INTREQ_OFFSET 0x20 | ||
674 | #define INTC2_INTMSK_OFFSET 0x40 | ||
675 | #define INTC2_INTMSKCLR_OFFSET 0x60 | ||
676 | #define NR_INTC2_IRQS 64 | ||
677 | #elif defined(CONFIG_CPU_SUBTYPE_SH7780) | ||
678 | #define INTC2_BASE 0xffd40000 | ||
679 | #define INTC2_FIRST_IRQ 21 | ||
680 | #define INTC2_INTMSK_OFFSET (0x38) | ||
681 | #define INTC2_INTMSKCLR_OFFSET (0x3c) | ||
682 | #define NR_INTC2_IRQS 60 | ||
683 | #endif | ||
684 | 135 | ||
685 | #define INTC2_INTPRI_OFFSET 0x00 | 136 | /* |
137 | * Function for "on chip support modules". | ||
138 | */ | ||
139 | void make_ipr_irq(struct ipr_data *table, unsigned int nr_irqs); | ||
140 | void make_imask_irq(unsigned int irq); | ||
141 | void init_IRQ_ipr(void); | ||
686 | 142 | ||
687 | struct intc2_data { | 143 | struct intc2_data { |
688 | unsigned short irq; | 144 | unsigned short irq; |
@@ -693,20 +149,14 @@ struct intc2_data { | |||
693 | 149 | ||
694 | void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs); | 150 | void make_intc2_irq(struct intc2_data *, unsigned int nr_irqs); |
695 | void init_IRQ_intc2(void); | 151 | void init_IRQ_intc2(void); |
696 | #endif | ||
697 | |||
698 | extern int shmse_irq_demux(int irq); | ||
699 | 152 | ||
700 | static inline int generic_irq_demux(int irq) | 153 | static inline int generic_irq_demux(int irq) |
701 | { | 154 | { |
702 | return irq; | 155 | return irq; |
703 | } | 156 | } |
704 | 157 | ||
705 | #ifndef __irq_demux | ||
706 | #define __irq_demux(irq) (irq) | ||
707 | #endif | ||
708 | #define irq_canonicalize(irq) (irq) | 158 | #define irq_canonicalize(irq) (irq) |
709 | #define irq_demux(irq) __irq_demux(sh_mv.mv_irq_demux(irq)) | 159 | #define irq_demux(irq) sh_mv.mv_irq_demux(irq) |
710 | 160 | ||
711 | #ifdef CONFIG_4KSTACKS | 161 | #ifdef CONFIG_4KSTACKS |
712 | extern void irq_ctx_init(int cpu); | 162 | extern void irq_ctx_init(int cpu); |
@@ -717,12 +167,4 @@ extern void irq_ctx_exit(int cpu); | |||
717 | # define irq_ctx_exit(cpu) do { } while (0) | 167 | # define irq_ctx_exit(cpu) do { } while (0) |
718 | #endif | 168 | #endif |
719 | 169 | ||
720 | #if defined(CONFIG_CPU_SUBTYPE_SH73180) | ||
721 | #include <asm/irq-sh73180.h> | ||
722 | #endif | ||
723 | |||
724 | #if defined(CONFIG_CPU_SUBTYPE_SH7343) | ||
725 | #include <asm/irq-sh7343.h> | ||
726 | #endif | ||
727 | |||
728 | #endif /* __ASM_SH_IRQ_H */ | 170 | #endif /* __ASM_SH_IRQ_H */ |
diff --git a/include/asm-sh/irqflags.h b/include/asm-sh/irqflags.h new file mode 100644 index 000000000000..9dedc1b693e3 --- /dev/null +++ b/include/asm-sh/irqflags.h | |||
@@ -0,0 +1,123 @@ | |||
1 | #ifndef __ASM_SH_IRQFLAGS_H | ||
2 | #define __ASM_SH_IRQFLAGS_H | ||
3 | |||
4 | static inline void raw_local_irq_enable(void) | ||
5 | { | ||
6 | unsigned long __dummy0, __dummy1; | ||
7 | |||
8 | __asm__ __volatile__ ( | ||
9 | "stc sr, %0\n\t" | ||
10 | "and %1, %0\n\t" | ||
11 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
12 | "stc r6_bank, %1\n\t" | ||
13 | "or %1, %0\n\t" | ||
14 | #endif | ||
15 | "ldc %0, sr\n\t" | ||
16 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
17 | : "1" (~0x000000f0) | ||
18 | : "memory" | ||
19 | ); | ||
20 | } | ||
21 | |||
22 | static inline void raw_local_irq_disable(void) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | |||
26 | __asm__ __volatile__ ( | ||
27 | "stc sr, %0\n\t" | ||
28 | "or #0xf0, %0\n\t" | ||
29 | "ldc %0, sr\n\t" | ||
30 | : "=&z" (flags) | ||
31 | : /* no inputs */ | ||
32 | : "memory" | ||
33 | ); | ||
34 | } | ||
35 | |||
36 | static inline void set_bl_bit(void) | ||
37 | { | ||
38 | unsigned long __dummy0, __dummy1; | ||
39 | |||
40 | __asm__ __volatile__ ( | ||
41 | "stc sr, %0\n\t" | ||
42 | "or %2, %0\n\t" | ||
43 | "and %3, %0\n\t" | ||
44 | "ldc %0, sr\n\t" | ||
45 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
46 | : "r" (0x10000000), "r" (0xffffff0f) | ||
47 | : "memory" | ||
48 | ); | ||
49 | } | ||
50 | |||
51 | static inline void clear_bl_bit(void) | ||
52 | { | ||
53 | unsigned long __dummy0, __dummy1; | ||
54 | |||
55 | __asm__ __volatile__ ( | ||
56 | "stc sr, %0\n\t" | ||
57 | "and %2, %0\n\t" | ||
58 | "ldc %0, sr\n\t" | ||
59 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
60 | : "1" (~0x10000000) | ||
61 | : "memory" | ||
62 | ); | ||
63 | } | ||
64 | |||
65 | static inline unsigned long __raw_local_save_flags(void) | ||
66 | { | ||
67 | unsigned long flags; | ||
68 | |||
69 | __asm__ __volatile__ ( | ||
70 | "stc sr, %0\n\t" | ||
71 | "and #0xf0, %0\n\t" | ||
72 | : "=&z" (flags) | ||
73 | : /* no inputs */ | ||
74 | : "memory" | ||
75 | ); | ||
76 | |||
77 | return flags; | ||
78 | } | ||
79 | |||
80 | #define raw_local_save_flags(flags) \ | ||
81 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
82 | |||
83 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
84 | { | ||
85 | return (flags != 0); | ||
86 | } | ||
87 | |||
88 | static inline int raw_irqs_disabled(void) | ||
89 | { | ||
90 | unsigned long flags = __raw_local_save_flags(); | ||
91 | |||
92 | return raw_irqs_disabled_flags(flags); | ||
93 | } | ||
94 | |||
95 | static inline unsigned long __raw_local_irq_save(void) | ||
96 | { | ||
97 | unsigned long flags, __dummy; | ||
98 | |||
99 | __asm__ __volatile__ ( | ||
100 | "stc sr, %1\n\t" | ||
101 | "mov %1, %0\n\t" | ||
102 | "or #0xf0, %0\n\t" | ||
103 | "ldc %0, sr\n\t" | ||
104 | "mov %1, %0\n\t" | ||
105 | "and #0xf0, %0\n\t" | ||
106 | : "=&z" (flags), "=&r" (__dummy) | ||
107 | : /* no inputs */ | ||
108 | : "memory" | ||
109 | ); | ||
110 | |||
111 | return flags; | ||
112 | } | ||
113 | |||
114 | #define raw_local_irq_save(flags) \ | ||
115 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
116 | |||
117 | static inline void raw_local_irq_restore(unsigned long flags) | ||
118 | { | ||
119 | if ((flags & 0xf0) != 0xf0) | ||
120 | raw_local_irq_enable(); | ||
121 | } | ||
122 | |||
123 | #endif /* __ASM_SH_IRQFLAGS_H */ | ||
diff --git a/include/asm-sh/mmu_context.h b/include/asm-sh/mmu_context.h index c7088efe579a..46f04e23bd45 100644 --- a/include/asm-sh/mmu_context.h +++ b/include/asm-sh/mmu_context.h | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <asm/cpu/mmu_context.h> | 11 | #include <asm/cpu/mmu_context.h> |
12 | #include <asm/tlbflush.h> | 12 | #include <asm/tlbflush.h> |
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/io.h> | 14 | #include <asm/io.h> |
16 | 15 | ||
@@ -42,10 +41,8 @@ extern unsigned long mmu_context_cache; | |||
42 | /* | 41 | /* |
43 | * Get MMU context if needed. | 42 | * Get MMU context if needed. |
44 | */ | 43 | */ |
45 | static __inline__ void | 44 | static inline void get_mmu_context(struct mm_struct *mm) |
46 | get_mmu_context(struct mm_struct *mm) | ||
47 | { | 45 | { |
48 | extern void flush_tlb_all(void); | ||
49 | unsigned long mc = mmu_context_cache; | 46 | unsigned long mc = mmu_context_cache; |
50 | 47 | ||
51 | /* Check if we have old version of context. */ | 48 | /* Check if we have old version of context. */ |
@@ -61,6 +58,7 @@ get_mmu_context(struct mm_struct *mm) | |||
61 | * Flush all TLB and start new cycle. | 58 | * Flush all TLB and start new cycle. |
62 | */ | 59 | */ |
63 | flush_tlb_all(); | 60 | flush_tlb_all(); |
61 | |||
64 | /* | 62 | /* |
65 | * Fix version; Note that we avoid version #0 | 63 | * Fix version; Note that we avoid version #0 |
66 | * to distingush NO_CONTEXT. | 64 | * to distingush NO_CONTEXT. |
@@ -75,11 +73,10 @@ get_mmu_context(struct mm_struct *mm) | |||
75 | * Initialize the context related info for a new mm_struct | 73 | * Initialize the context related info for a new mm_struct |
76 | * instance. | 74 | * instance. |
77 | */ | 75 | */ |
78 | static __inline__ int init_new_context(struct task_struct *tsk, | 76 | static inline int init_new_context(struct task_struct *tsk, |
79 | struct mm_struct *mm) | 77 | struct mm_struct *mm) |
80 | { | 78 | { |
81 | mm->context.id = NO_CONTEXT; | 79 | mm->context.id = NO_CONTEXT; |
82 | |||
83 | return 0; | 80 | return 0; |
84 | } | 81 | } |
85 | 82 | ||
@@ -87,12 +84,12 @@ static __inline__ int init_new_context(struct task_struct *tsk, | |||
87 | * Destroy context related info for an mm_struct that is about | 84 | * Destroy context related info for an mm_struct that is about |
88 | * to be put to rest. | 85 | * to be put to rest. |
89 | */ | 86 | */ |
90 | static __inline__ void destroy_context(struct mm_struct *mm) | 87 | static inline void destroy_context(struct mm_struct *mm) |
91 | { | 88 | { |
92 | /* Do nothing */ | 89 | /* Do nothing */ |
93 | } | 90 | } |
94 | 91 | ||
95 | static __inline__ void set_asid(unsigned long asid) | 92 | static inline void set_asid(unsigned long asid) |
96 | { | 93 | { |
97 | unsigned long __dummy; | 94 | unsigned long __dummy; |
98 | 95 | ||
@@ -105,7 +102,7 @@ static __inline__ void set_asid(unsigned long asid) | |||
105 | "r" (0xffffff00)); | 102 | "r" (0xffffff00)); |
106 | } | 103 | } |
107 | 104 | ||
108 | static __inline__ unsigned long get_asid(void) | 105 | static inline unsigned long get_asid(void) |
109 | { | 106 | { |
110 | unsigned long asid; | 107 | unsigned long asid; |
111 | 108 | ||
@@ -120,24 +117,29 @@ static __inline__ unsigned long get_asid(void) | |||
120 | * After we have set current->mm to a new value, this activates | 117 | * After we have set current->mm to a new value, this activates |
121 | * the context for the new mm so we see the new mappings. | 118 | * the context for the new mm so we see the new mappings. |
122 | */ | 119 | */ |
123 | static __inline__ void activate_context(struct mm_struct *mm) | 120 | static inline void activate_context(struct mm_struct *mm) |
124 | { | 121 | { |
125 | get_mmu_context(mm); | 122 | get_mmu_context(mm); |
126 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); | 123 | set_asid(mm->context.id & MMU_CONTEXT_ASID_MASK); |
127 | } | 124 | } |
128 | 125 | ||
129 | /* MMU_TTB can be used for optimizing the fault handling. | 126 | /* MMU_TTB is used for optimizing the fault handling. */ |
130 | (Currently not used) */ | 127 | static inline void set_TTB(pgd_t *pgd) |
131 | static __inline__ void switch_mm(struct mm_struct *prev, | ||
132 | struct mm_struct *next, | ||
133 | struct task_struct *tsk) | ||
134 | { | 128 | { |
135 | if (likely(prev != next)) { | 129 | ctrl_outl((unsigned long)pgd, MMU_TTB); |
136 | unsigned long __pgdir = (unsigned long)next->pgd; | 130 | } |
137 | 131 | ||
138 | __asm__ __volatile__("mov.l %0, %1" | 132 | static inline pgd_t *get_TTB(void) |
139 | : /* no output */ | 133 | { |
140 | : "r" (__pgdir), "m" (__m(MMU_TTB))); | 134 | return (pgd_t *)ctrl_inl(MMU_TTB); |
135 | } | ||
136 | |||
137 | static inline void switch_mm(struct mm_struct *prev, | ||
138 | struct mm_struct *next, | ||
139 | struct task_struct *tsk) | ||
140 | { | ||
141 | if (likely(prev != next)) { | ||
142 | set_TTB(next->pgd); | ||
141 | activate_context(next); | 143 | activate_context(next); |
142 | } | 144 | } |
143 | } | 145 | } |
@@ -147,7 +149,7 @@ static __inline__ void switch_mm(struct mm_struct *prev, | |||
147 | #define activate_mm(prev, next) \ | 149 | #define activate_mm(prev, next) \ |
148 | switch_mm((prev),(next),NULL) | 150 | switch_mm((prev),(next),NULL) |
149 | 151 | ||
150 | static __inline__ void | 152 | static inline void |
151 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 153 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
152 | { | 154 | { |
153 | } | 155 | } |
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index ca8b26d90475..380fd62dd05a 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
@@ -13,9 +13,16 @@ | |||
13 | [ P4 control ] 0xE0000000 | 13 | [ P4 control ] 0xE0000000 |
14 | */ | 14 | */ |
15 | 15 | ||
16 | |||
17 | /* PAGE_SHIFT determines the page size */ | 16 | /* PAGE_SHIFT determines the page size */ |
18 | #define PAGE_SHIFT 12 | 17 | #if defined(CONFIG_PAGE_SIZE_4KB) |
18 | # define PAGE_SHIFT 12 | ||
19 | #elif defined(CONFIG_PAGE_SIZE_8KB) | ||
20 | # define PAGE_SHIFT 13 | ||
21 | #elif defined(CONFIG_PAGE_SIZE_64KB) | ||
22 | # define PAGE_SHIFT 16 | ||
23 | #else | ||
24 | # error "Bogus kernel page size?" | ||
25 | #endif | ||
19 | 26 | ||
20 | #ifdef __ASSEMBLY__ | 27 | #ifdef __ASSEMBLY__ |
21 | #define PAGE_SIZE (1 << PAGE_SHIFT) | 28 | #define PAGE_SIZE (1 << PAGE_SHIFT) |
@@ -28,8 +35,14 @@ | |||
28 | 35 | ||
29 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 36 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
30 | #define HPAGE_SHIFT 16 | 37 | #define HPAGE_SHIFT 16 |
38 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) | ||
39 | #define HPAGE_SHIFT 18 | ||
31 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 40 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
32 | #define HPAGE_SHIFT 20 | 41 | #define HPAGE_SHIFT 20 |
42 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
43 | #define HPAGE_SHIFT 22 | ||
44 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) | ||
45 | #define HPAGE_SHIFT 26 | ||
33 | #endif | 46 | #endif |
34 | 47 | ||
35 | #ifdef CONFIG_HUGETLB_PAGE | 48 | #ifdef CONFIG_HUGETLB_PAGE |
@@ -69,15 +82,25 @@ extern void __copy_user_page(void *to, void *from, void *orig_to); | |||
69 | /* | 82 | /* |
70 | * These are used to make use of C type-checking.. | 83 | * These are used to make use of C type-checking.. |
71 | */ | 84 | */ |
72 | typedef struct { unsigned long pte; } pte_t; | 85 | #ifdef CONFIG_X2TLB |
73 | typedef struct { unsigned long pgd; } pgd_t; | 86 | typedef struct { unsigned long pte_low, pte_high; } pte_t; |
87 | typedef struct { unsigned long long pgprot; } pgprot_t; | ||
88 | #define pte_val(x) \ | ||
89 | ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | ||
90 | #define __pte(x) \ | ||
91 | ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) | ||
92 | #else | ||
93 | typedef struct { unsigned long pte_low; } pte_t; | ||
74 | typedef struct { unsigned long pgprot; } pgprot_t; | 94 | typedef struct { unsigned long pgprot; } pgprot_t; |
95 | #define pte_val(x) ((x).pte_low) | ||
96 | #define __pte(x) ((pte_t) { (x) } ) | ||
97 | #endif | ||
98 | |||
99 | typedef struct { unsigned long pgd; } pgd_t; | ||
75 | 100 | ||
76 | #define pte_val(x) ((x).pte) | ||
77 | #define pgd_val(x) ((x).pgd) | 101 | #define pgd_val(x) ((x).pgd) |
78 | #define pgprot_val(x) ((x).pgprot) | 102 | #define pgprot_val(x) ((x).pgprot) |
79 | 103 | ||
80 | #define __pte(x) ((pte_t) { (x) } ) | ||
81 | #define __pgd(x) ((pgd_t) { (x) } ) | 104 | #define __pgd(x) ((pgd_t) { (x) } ) |
82 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 105 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
83 | 106 | ||
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h index e841465ab4d2..888e4529e6fe 100644 --- a/include/asm-sh/pgalloc.h +++ b/include/asm-sh/pgalloc.h | |||
@@ -1,13 +1,16 @@ | |||
1 | #ifndef __ASM_SH_PGALLOC_H | 1 | #ifndef __ASM_SH_PGALLOC_H |
2 | #define __ASM_SH_PGALLOC_H | 2 | #define __ASM_SH_PGALLOC_H |
3 | 3 | ||
4 | #define pmd_populate_kernel(mm, pmd, pte) \ | 4 | static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, |
5 | set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))) | 5 | pte_t *pte) |
6 | { | ||
7 | set_pmd(pmd, __pmd((unsigned long)pte)); | ||
8 | } | ||
6 | 9 | ||
7 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | 10 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
8 | struct page *pte) | 11 | struct page *pte) |
9 | { | 12 | { |
10 | set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte))); | 13 | set_pmd(pmd, __pmd((unsigned long)page_address(pte))); |
11 | } | 14 | } |
12 | 15 | ||
13 | /* | 16 | /* |
@@ -15,7 +18,16 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
15 | */ | 18 | */ |
16 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
17 | { | 20 | { |
18 | return (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | 21 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); |
22 | |||
23 | if (pgd) { | ||
24 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | ||
25 | memcpy(pgd + USER_PTRS_PER_PGD, | ||
26 | swapper_pg_dir + USER_PTRS_PER_PGD, | ||
27 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | ||
28 | } | ||
29 | |||
30 | return pgd; | ||
19 | } | 31 | } |
20 | 32 | ||
21 | static inline void pgd_free(pgd_t *pgd) | 33 | static inline void pgd_free(pgd_t *pgd) |
diff --git a/include/asm-sh/pgtable-2level.h b/include/asm-sh/pgtable-2level.h deleted file mode 100644 index b525db6f61c6..000000000000 --- a/include/asm-sh/pgtable-2level.h +++ /dev/null | |||
@@ -1,70 +0,0 @@ | |||
1 | #ifndef __ASM_SH_PGTABLE_2LEVEL_H | ||
2 | #define __ASM_SH_PGTABLE_2LEVEL_H | ||
3 | |||
4 | /* | ||
5 | * traditional two-level paging structure: | ||
6 | */ | ||
7 | |||
8 | #define PGDIR_SHIFT 22 | ||
9 | #define PTRS_PER_PGD 1024 | ||
10 | |||
11 | /* | ||
12 | * this is two-level, so we don't really have any | ||
13 | * PMD directory physically. | ||
14 | */ | ||
15 | #define PMD_SHIFT 22 | ||
16 | #define PTRS_PER_PMD 1 | ||
17 | |||
18 | #define PTRS_PER_PTE 1024 | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | #define pte_ERROR(e) \ | ||
22 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
23 | #define pmd_ERROR(e) \ | ||
24 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
25 | #define pgd_ERROR(e) \ | ||
26 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
27 | |||
28 | /* | ||
29 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
30 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
31 | * into the pgd entry) | ||
32 | */ | ||
33 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
34 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
35 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
36 | static inline void pgd_clear (pgd_t * pgdp) { } | ||
37 | |||
38 | /* | ||
39 | * Certain architectures need to do special things when PTEs | ||
40 | * within a page table are directly modified. Thus, the following | ||
41 | * hook is made available. | ||
42 | */ | ||
43 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | ||
44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
45 | |||
46 | /* | ||
47 | * (pmds are folded into pgds so this doesn't get actually called, | ||
48 | * but the define is needed for a generic inline function.) | ||
49 | */ | ||
50 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | ||
51 | #define set_pgd(pgdptr, pgdval) (*(pgdptr) = pgdval) | ||
52 | |||
53 | #define pgd_page_vaddr(pgd) \ | ||
54 | ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) | ||
55 | |||
56 | #define pgd_page(pgd) \ | ||
57 | (phys_to_page(pgd_val(pgd))) | ||
58 | |||
59 | static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) | ||
60 | { | ||
61 | return (pmd_t *) dir; | ||
62 | } | ||
63 | |||
64 | #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) | ||
65 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
66 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
67 | |||
68 | #endif /* !__ASSEMBLY__ */ | ||
69 | |||
70 | #endif /* __ASM_SH_PGTABLE_2LEVEL_H */ | ||
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h index 2c8682ad1012..c84901dbd8e5 100644 --- a/include/asm-sh/pgtable.h +++ b/include/asm-sh/pgtable.h | |||
@@ -15,15 +15,10 @@ | |||
15 | #include <asm-generic/pgtable-nopmd.h> | 15 | #include <asm-generic/pgtable-nopmd.h> |
16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
17 | 17 | ||
18 | #define PTRS_PER_PGD 1024 | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | 18 | #ifndef __ASSEMBLY__ |
21 | #include <asm/addrspace.h> | 19 | #include <asm/addrspace.h> |
22 | #include <asm/fixmap.h> | 20 | #include <asm/fixmap.h> |
23 | 21 | ||
24 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
25 | extern void paging_init(void); | ||
26 | |||
27 | /* | 22 | /* |
28 | * ZERO_PAGE is a global shared page that is always zero: used | 23 | * ZERO_PAGE is a global shared page that is always zero: used |
29 | * for zero-mapped memory areas etc.. | 24 | * for zero-mapped memory areas etc.. |
@@ -33,15 +28,28 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
33 | 28 | ||
34 | #endif /* !__ASSEMBLY__ */ | 29 | #endif /* !__ASSEMBLY__ */ |
35 | 30 | ||
36 | /* traditional two-level paging structure */ | 31 | /* |
37 | #define PGDIR_SHIFT 22 | 32 | * traditional two-level paging structure |
38 | #define PTRS_PER_PMD 1 | 33 | */ |
39 | #define PTRS_PER_PTE 1024 | 34 | /* PTE bits */ |
40 | #define PMD_SIZE (1UL << PMD_SHIFT) | 35 | #ifdef CONFIG_X2TLB |
41 | #define PMD_MASK (~(PMD_SIZE-1)) | 36 | # define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */ |
42 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 37 | #else |
38 | # define PTE_MAGNITUDE 2 /* 32-bit PTEs */ | ||
39 | #endif | ||
40 | #define PTE_SHIFT PAGE_SHIFT | ||
41 | #define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE) | ||
42 | |||
43 | /* PGD bits */ | ||
44 | #define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS) | ||
45 | #define PGDIR_BITS (32 - PGDIR_SHIFT) | ||
46 | #define PGDIR_SIZE (1 << PGDIR_SHIFT) | ||
43 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 47 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
44 | 48 | ||
49 | /* Entries per level */ | ||
50 | #define PTRS_PER_PTE (PAGE_SIZE / 4) | ||
51 | #define PTRS_PER_PGD (PAGE_SIZE / 4) | ||
52 | |||
45 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 53 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
46 | #define FIRST_USER_ADDRESS 0 | 54 | #define FIRST_USER_ADDRESS 0 |
47 | 55 | ||
@@ -49,7 +57,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
49 | 57 | ||
50 | /* | 58 | /* |
51 | * First 1MB map is used by fixed purpose. | 59 | * First 1MB map is used by fixed purpose. |
52 | * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c) | 60 | * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c) |
53 | */ | 61 | */ |
54 | #define VMALLOC_START (P3SEG+0x00100000) | 62 | #define VMALLOC_START (P3SEG+0x00100000) |
55 | #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | 63 | #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) |
@@ -57,7 +65,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
57 | /* | 65 | /* |
58 | * Linux PTEL encoding. | 66 | * Linux PTEL encoding. |
59 | * | 67 | * |
60 | * Hardware and software bit definitions for the PTEL value: | 68 | * Hardware and software bit definitions for the PTEL value (see below for |
69 | * notes on SH-X2 MMUs and 64-bit PTEs): | ||
61 | * | 70 | * |
62 | * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). | 71 | * - Bits 0 and 7 are reserved on SH-3 (_PAGE_WT and _PAGE_SZ1 on SH-4). |
63 | * | 72 | * |
@@ -76,20 +85,57 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
76 | * | 85 | * |
77 | * - Bits 31, 30, and 29 remain unused by everyone and can be used for future | 86 | * - Bits 31, 30, and 29 remain unused by everyone and can be used for future |
78 | * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. | 87 | * software flags, although care must be taken to update _PAGE_CLEAR_FLAGS. |
88 | * | ||
89 | * XXX: Leave the _PAGE_FILE and _PAGE_WT overhaul for a rainy day. | ||
90 | * | ||
91 | * SH-X2 MMUs and extended PTEs | ||
92 | * | ||
93 | * SH-X2 supports an extended mode TLB with split data arrays due to the | ||
94 | * number of bits needed for PR and SZ (now EPR and ESZ) encodings. The PR and | ||
95 | * SZ bit placeholders still exist in data array 1, but are implemented as | ||
96 | * reserved bits, with the real logic existing in data array 2. | ||
97 | * | ||
98 | * The downside to this is that we can no longer fit everything in to a 32-bit | ||
99 | * PTE encoding, so a 64-bit pte_t is necessary for these parts. On the plus | ||
100 | * side, this gives us quite a few spare bits to play with for future usage. | ||
79 | */ | 101 | */ |
102 | /* Legacy and compat mode bits */ | ||
80 | #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ | 103 | #define _PAGE_WT 0x001 /* WT-bit on SH-4, 0 on SH-3 */ |
81 | #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ | 104 | #define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */ |
82 | #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ | 105 | #define _PAGE_DIRTY 0x004 /* D-bit : page changed */ |
83 | #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ | 106 | #define _PAGE_CACHABLE 0x008 /* C-bit : cachable */ |
84 | #define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ | 107 | #ifndef CONFIG_X2TLB |
85 | #define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ | 108 | # define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */ |
86 | #define _PAGE_USER 0x040 /* PR1-bit : user space access allowed */ | 109 | # define _PAGE_RW 0x020 /* PR0-bit : write access allowed */ |
87 | #define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ | 110 | # define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/ |
111 | # define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */ | ||
112 | #endif | ||
88 | #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ | 113 | #define _PAGE_PRESENT 0x100 /* V-bit : page is valid */ |
89 | #define _PAGE_PROTNONE 0x200 /* software: if not present */ | 114 | #define _PAGE_PROTNONE 0x200 /* software: if not present */ |
90 | #define _PAGE_ACCESSED 0x400 /* software: page referenced */ | 115 | #define _PAGE_ACCESSED 0x400 /* software: page referenced */ |
91 | #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ | 116 | #define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */ |
92 | 117 | ||
118 | /* Extended mode bits */ | ||
119 | #define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */ | ||
120 | #define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */ | ||
121 | #define _PAGE_EXT_ESZ2 0x0040 /* ESZ2-bit: Size of page */ | ||
122 | #define _PAGE_EXT_ESZ3 0x0080 /* ESZ3-bit: Size of page */ | ||
123 | |||
124 | #define _PAGE_EXT_USER_EXEC 0x0100 /* EPR0-bit: User space executable */ | ||
125 | #define _PAGE_EXT_USER_WRITE 0x0200 /* EPR1-bit: User space writable */ | ||
126 | #define _PAGE_EXT_USER_READ 0x0400 /* EPR2-bit: User space readable */ | ||
127 | |||
128 | #define _PAGE_EXT_KERN_EXEC 0x0800 /* EPR3-bit: Kernel space executable */ | ||
129 | #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ | ||
130 | #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ | ||
131 | |||
132 | /* Wrapper for extended mode pgprot twiddling */ | ||
133 | #ifdef CONFIG_X2TLB | ||
134 | # define _PAGE_EXT(x) ((unsigned long long)(x) << 32) | ||
135 | #else | ||
136 | # define _PAGE_EXT(x) (0) | ||
137 | #endif | ||
138 | |||
93 | /* software: moves to PTEA.TC (Timing Control) */ | 139 | /* software: moves to PTEA.TC (Timing Control) */ |
94 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ | 140 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ |
95 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ | 141 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ |
@@ -114,37 +160,160 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
114 | 160 | ||
115 | #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) | 161 | #define _PAGE_FLAGS_HARDWARE_MASK (0x1fffffff & ~(_PAGE_CLEAR_FLAGS)) |
116 | 162 | ||
117 | /* Hardware flags: SZ0=1 (4k-byte) */ | 163 | /* Hardware flags, page size encoding */ |
118 | #define _PAGE_FLAGS_HARD _PAGE_SZ0 | 164 | #if defined(CONFIG_X2TLB) |
165 | # if defined(CONFIG_PAGE_SIZE_4KB) | ||
166 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ0) | ||
167 | # elif defined(CONFIG_PAGE_SIZE_8KB) | ||
168 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ1) | ||
169 | # elif defined(CONFIG_PAGE_SIZE_64KB) | ||
170 | # define _PAGE_FLAGS_HARD _PAGE_EXT(_PAGE_EXT_ESZ2) | ||
171 | # endif | ||
172 | #else | ||
173 | # if defined(CONFIG_PAGE_SIZE_4KB) | ||
174 | # define _PAGE_FLAGS_HARD _PAGE_SZ0 | ||
175 | # elif defined(CONFIG_PAGE_SIZE_64KB) | ||
176 | # define _PAGE_FLAGS_HARD _PAGE_SZ1 | ||
177 | # endif | ||
178 | #endif | ||
119 | 179 | ||
120 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | 180 | #if defined(CONFIG_X2TLB) |
121 | #define _PAGE_SZHUGE (_PAGE_SZ1) | 181 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
122 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | 182 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2) |
123 | #define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) | 183 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) |
184 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ2) | ||
185 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
186 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ0 | _PAGE_EXT_ESZ1 | _PAGE_EXT_ESZ2) | ||
187 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | ||
188 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ3) | ||
189 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) | ||
190 | # define _PAGE_SZHUGE (_PAGE_EXT_ESZ2 | _PAGE_EXT_ESZ3) | ||
191 | # endif | ||
192 | #else | ||
193 | # if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
194 | # define _PAGE_SZHUGE (_PAGE_SZ1) | ||
195 | # elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
196 | # define _PAGE_SZHUGE (_PAGE_SZ0 | _PAGE_SZ1) | ||
197 | # endif | ||
198 | #endif | ||
199 | |||
200 | /* | ||
201 | * Stub out _PAGE_SZHUGE if we don't have a good definition for it, | ||
202 | * to make pte_mkhuge() happy. | ||
203 | */ | ||
204 | #ifndef _PAGE_SZHUGE | ||
205 | # define _PAGE_SZHUGE (_PAGE_FLAGS_HARD) | ||
124 | #endif | 206 | #endif |
125 | 207 | ||
126 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 208 | #define _PAGE_CHG_MASK \ |
127 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 209 | (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) |
128 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY) | ||
129 | 210 | ||
130 | #ifndef __ASSEMBLY__ | 211 | #ifndef __ASSEMBLY__ |
131 | 212 | ||
132 | #ifdef CONFIG_MMU | 213 | #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ |
133 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 214 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ |
134 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_CACHABLE |_PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 215 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) |
135 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 216 | |
136 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | 217 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ |
137 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 218 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ |
219 | _PAGE_EXT(_PAGE_EXT_USER_READ | \ | ||
220 | _PAGE_EXT_USER_WRITE)) | ||
221 | |||
222 | #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
223 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
224 | _PAGE_EXT(_PAGE_EXT_USER_EXEC | \ | ||
225 | _PAGE_EXT_USER_READ)) | ||
226 | |||
227 | #define PAGE_COPY PAGE_EXECREAD | ||
228 | |||
229 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
230 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
231 | _PAGE_EXT(_PAGE_EXT_USER_READ)) | ||
232 | |||
233 | #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
234 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
235 | _PAGE_EXT(_PAGE_EXT_USER_WRITE)) | ||
236 | |||
237 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | ||
238 | _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \ | ||
239 | _PAGE_EXT(_PAGE_EXT_USER_WRITE | \ | ||
240 | _PAGE_EXT_USER_READ | \ | ||
241 | _PAGE_EXT_USER_EXEC)) | ||
242 | |||
243 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
244 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
245 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ | ||
246 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
247 | _PAGE_EXT_KERN_WRITE | \ | ||
248 | _PAGE_EXT_KERN_EXEC)) | ||
249 | |||
138 | #define PAGE_KERNEL_NOCACHE \ | 250 | #define PAGE_KERNEL_NOCACHE \ |
139 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 251 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ |
140 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | 252 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ |
253 | _PAGE_FLAGS_HARD | \ | ||
254 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
255 | _PAGE_EXT_KERN_WRITE | \ | ||
256 | _PAGE_EXT_KERN_EXEC)) | ||
257 | |||
258 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
259 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
260 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \ | ||
261 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
262 | _PAGE_EXT_KERN_EXEC)) | ||
263 | |||
264 | #define PAGE_KERNEL_PCC(slot, type) \ | ||
265 | __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ | ||
266 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ | ||
267 | _PAGE_EXT(_PAGE_EXT_KERN_READ | \ | ||
268 | _PAGE_EXT_KERN_WRITE | \ | ||
269 | _PAGE_EXT_KERN_EXEC) \ | ||
270 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ | ||
271 | (type)) | ||
272 | |||
273 | #elif defined(CONFIG_MMU) /* SH-X TLB */ | ||
274 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \ | ||
275 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
276 | |||
277 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
278 | _PAGE_CACHABLE | _PAGE_ACCESSED | \ | ||
279 | _PAGE_FLAGS_HARD) | ||
280 | |||
281 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ | ||
282 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
283 | |||
284 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_CACHABLE | \ | ||
285 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD) | ||
286 | |||
287 | #define PAGE_EXECREAD PAGE_READONLY | ||
288 | #define PAGE_RWX PAGE_SHARED | ||
289 | #define PAGE_WRITEONLY PAGE_SHARED | ||
290 | |||
291 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | \ | ||
292 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
293 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | ||
294 | |||
295 | #define PAGE_KERNEL_NOCACHE \ | ||
296 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ | ||
297 | _PAGE_ACCESSED | _PAGE_HW_SHARED | \ | ||
298 | _PAGE_FLAGS_HARD) | ||
299 | |||
300 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \ | ||
301 | _PAGE_DIRTY | _PAGE_ACCESSED | \ | ||
302 | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD) | ||
303 | |||
141 | #define PAGE_KERNEL_PCC(slot, type) \ | 304 | #define PAGE_KERNEL_PCC(slot, type) \ |
142 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | (type)) | 305 | __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | \ |
306 | _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \ | ||
307 | (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \ | ||
308 | (type)) | ||
143 | #else /* no mmu */ | 309 | #else /* no mmu */ |
144 | #define PAGE_NONE __pgprot(0) | 310 | #define PAGE_NONE __pgprot(0) |
145 | #define PAGE_SHARED __pgprot(0) | 311 | #define PAGE_SHARED __pgprot(0) |
146 | #define PAGE_COPY __pgprot(0) | 312 | #define PAGE_COPY __pgprot(0) |
313 | #define PAGE_EXECREAD __pgprot(0) | ||
314 | #define PAGE_RWX __pgprot(0) | ||
147 | #define PAGE_READONLY __pgprot(0) | 315 | #define PAGE_READONLY __pgprot(0) |
316 | #define PAGE_WRITEONLY __pgprot(0) | ||
148 | #define PAGE_KERNEL __pgprot(0) | 317 | #define PAGE_KERNEL __pgprot(0) |
149 | #define PAGE_KERNEL_NOCACHE __pgprot(0) | 318 | #define PAGE_KERNEL_NOCACHE __pgprot(0) |
150 | #define PAGE_KERNEL_RO __pgprot(0) | 319 | #define PAGE_KERNEL_RO __pgprot(0) |
@@ -154,27 +323,32 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
154 | #endif /* __ASSEMBLY__ */ | 323 | #endif /* __ASSEMBLY__ */ |
155 | 324 | ||
156 | /* | 325 | /* |
157 | * As i386 and MIPS, SuperH can't do page protection for execute, and | 326 | * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page |
158 | * considers that the same as a read. Also, write permissions imply | 327 | * protection for execute, and considers it the same as a read. Also, write |
159 | * read permissions. This is the closest we can get.. | 328 | * permission implies read permission. This is the closest we can get.. |
329 | * | ||
330 | * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, | ||
331 | * not only supporting separate execute, read, and write bits, but having | ||
332 | * completely separate permission bits for user and kernel space. | ||
160 | */ | 333 | */ |
334 | /*xwr*/ | ||
161 | #define __P000 PAGE_NONE | 335 | #define __P000 PAGE_NONE |
162 | #define __P001 PAGE_READONLY | 336 | #define __P001 PAGE_READONLY |
163 | #define __P010 PAGE_COPY | 337 | #define __P010 PAGE_COPY |
164 | #define __P011 PAGE_COPY | 338 | #define __P011 PAGE_COPY |
165 | #define __P100 PAGE_READONLY | 339 | #define __P100 PAGE_EXECREAD |
166 | #define __P101 PAGE_READONLY | 340 | #define __P101 PAGE_EXECREAD |
167 | #define __P110 PAGE_COPY | 341 | #define __P110 PAGE_COPY |
168 | #define __P111 PAGE_COPY | 342 | #define __P111 PAGE_COPY |
169 | 343 | ||
170 | #define __S000 PAGE_NONE | 344 | #define __S000 PAGE_NONE |
171 | #define __S001 PAGE_READONLY | 345 | #define __S001 PAGE_READONLY |
172 | #define __S010 PAGE_SHARED | 346 | #define __S010 PAGE_WRITEONLY |
173 | #define __S011 PAGE_SHARED | 347 | #define __S011 PAGE_SHARED |
174 | #define __S100 PAGE_READONLY | 348 | #define __S100 PAGE_EXECREAD |
175 | #define __S101 PAGE_READONLY | 349 | #define __S101 PAGE_EXECREAD |
176 | #define __S110 PAGE_SHARED | 350 | #define __S110 PAGE_RWX |
177 | #define __S111 PAGE_SHARED | 351 | #define __S111 PAGE_RWX |
178 | 352 | ||
179 | #ifndef __ASSEMBLY__ | 353 | #ifndef __ASSEMBLY__ |
180 | 354 | ||
@@ -183,7 +357,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
183 | * within a page table are directly modified. Thus, the following | 357 | * within a page table are directly modified. Thus, the following |
184 | * hook is made available. | 358 | * hook is made available. |
185 | */ | 359 | */ |
360 | #ifdef CONFIG_X2TLB | ||
361 | static inline void set_pte(pte_t *ptep, pte_t pte) | ||
362 | { | ||
363 | ptep->pte_high = pte.pte_high; | ||
364 | smp_wmb(); | ||
365 | ptep->pte_low = pte.pte_low; | ||
366 | } | ||
367 | #else | ||
186 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | 368 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
369 | #endif | ||
370 | |||
187 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 371 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
188 | 372 | ||
189 | /* | 373 | /* |
@@ -192,18 +376,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
192 | */ | 376 | */ |
193 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | 377 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
194 | 378 | ||
195 | #define pte_pfn(x) ((unsigned long)(((x).pte >> PAGE_SHIFT))) | 379 | #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) |
196 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 380 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
197 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 381 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
198 | 382 | ||
199 | #define pte_none(x) (!pte_val(x)) | 383 | #define pte_none(x) (!pte_val(x)) |
200 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 384 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
201 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 385 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
202 | 386 | ||
203 | #define pmd_none(x) (!pmd_val(x)) | 387 | #define pmd_none(x) (!pmd_val(x)) |
204 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 388 | #define pmd_present(x) (pmd_val(x)) |
205 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 389 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
206 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | 390 | #define pmd_bad(x) (pmd_val(x) & ~PAGE_MASK) |
207 | 391 | ||
208 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | 392 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
209 | #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) | 393 | #define pte_page(x) phys_to_page(pte_val(x)&PTE_PHYS_MASK) |
@@ -212,28 +396,52 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
212 | * The following only work if pte_present() is true. | 396 | * The following only work if pte_present() is true. |
213 | * Undefined behaviour if not.. | 397 | * Undefined behaviour if not.. |
214 | */ | 398 | */ |
215 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | 399 | #define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT)) |
216 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | 400 | #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) |
217 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | 401 | #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) |
218 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | 402 | #define pte_file(pte) (pte_val(pte) & _PAGE_FILE) |
219 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 403 | |
220 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_RW; } | 404 | #ifdef CONFIG_X2TLB |
221 | static inline int pte_not_present(pte_t pte){ return !(pte_val(pte) & _PAGE_PRESENT); } | 405 | #define pte_read(pte) ((pte).pte_high & _PAGE_EXT_USER_READ) |
222 | 406 | #define pte_exec(pte) ((pte).pte_high & _PAGE_EXT_USER_EXEC) | |
223 | static inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 407 | #define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE) |
224 | static inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | 408 | #else |
225 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | 409 | #define pte_read(pte) (pte_val(pte) & _PAGE_USER) |
226 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | 410 | #define pte_exec(pte) (pte_val(pte) & _PAGE_USER) |
227 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } | 411 | #define pte_write(pte) (pte_val(pte) & _PAGE_RW) |
228 | static inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
229 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
230 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
231 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
232 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | ||
233 | #ifdef CONFIG_HUGETLB_PAGE | ||
234 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | ||
235 | #endif | 412 | #endif |
236 | 413 | ||
414 | #define PTE_BIT_FUNC(h,fn,op) \ | ||
415 | static inline pte_t pte_##fn(pte_t pte) { pte.pte_##h op; return pte; } | ||
416 | |||
417 | #ifdef CONFIG_X2TLB | ||
418 | /* | ||
419 | * We cheat a bit in the SH-X2 TLB case. As the permission bits are | ||
420 | * individually toggled (and user permissions are entirely decoupled from | ||
421 | * kernel permissions), we attempt to couple them a bit more sanely here. | ||
422 | */ | ||
423 | PTE_BIT_FUNC(high, rdprotect, &= ~_PAGE_EXT_USER_READ); | ||
424 | PTE_BIT_FUNC(high, mkread, |= _PAGE_EXT_USER_READ | _PAGE_EXT_KERN_READ); | ||
425 | PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE); | ||
426 | PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE); | ||
427 | PTE_BIT_FUNC(high, exprotect, &= ~_PAGE_EXT_USER_EXEC); | ||
428 | PTE_BIT_FUNC(high, mkexec, |= _PAGE_EXT_USER_EXEC | _PAGE_EXT_KERN_EXEC); | ||
429 | PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE); | ||
430 | #else | ||
431 | PTE_BIT_FUNC(low, rdprotect, &= ~_PAGE_USER); | ||
432 | PTE_BIT_FUNC(low, mkread, |= _PAGE_USER); | ||
433 | PTE_BIT_FUNC(low, wrprotect, &= ~_PAGE_RW); | ||
434 | PTE_BIT_FUNC(low, mkwrite, |= _PAGE_RW); | ||
435 | PTE_BIT_FUNC(low, exprotect, &= ~_PAGE_USER); | ||
436 | PTE_BIT_FUNC(low, mkexec, |= _PAGE_USER); | ||
437 | PTE_BIT_FUNC(low, mkhuge, |= _PAGE_SZHUGE); | ||
438 | #endif | ||
439 | |||
440 | PTE_BIT_FUNC(low, mkclean, &= ~_PAGE_DIRTY); | ||
441 | PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY); | ||
442 | PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED); | ||
443 | PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED); | ||
444 | |||
237 | /* | 445 | /* |
238 | * Macro and implementation to make a page protection as uncachable. | 446 | * Macro and implementation to make a page protection as uncachable. |
239 | */ | 447 | */ |
@@ -258,13 +466,14 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot) | |||
258 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 466 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
259 | 467 | ||
260 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 468 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
261 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } | 469 | { |
262 | 470 | set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | | |
263 | #define pmd_page_vaddr(pmd) \ | 471 | pgprot_val(newprot))); |
264 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | 472 | return pte; |
473 | } | ||
265 | 474 | ||
266 | #define pmd_page(pmd) \ | 475 | #define pmd_page_vaddr(pmd) pmd_val(pmd) |
267 | (phys_to_page(pmd_val(pmd))) | 476 | #define pmd_page(pmd) (virt_to_page(pmd_val(pmd))) |
268 | 477 | ||
269 | /* to find an entry in a page-table-directory. */ | 478 | /* to find an entry in a page-table-directory. */ |
270 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 479 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
@@ -283,8 +492,15 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
283 | #define pte_unmap(pte) do { } while (0) | 492 | #define pte_unmap(pte) do { } while (0) |
284 | #define pte_unmap_nested(pte) do { } while (0) | 493 | #define pte_unmap_nested(pte) do { } while (0) |
285 | 494 | ||
495 | #ifdef CONFIG_X2TLB | ||
496 | #define pte_ERROR(e) \ | ||
497 | printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \ | ||
498 | &(e), (e).pte_high, (e).pte_low) | ||
499 | #else | ||
286 | #define pte_ERROR(e) \ | 500 | #define pte_ERROR(e) \ |
287 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 501 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
502 | #endif | ||
503 | |||
288 | #define pgd_ERROR(e) \ | 504 | #define pgd_ERROR(e) \ |
289 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 505 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
290 | 506 | ||
@@ -337,6 +553,9 @@ extern unsigned int kobjsize(const void *objp); | |||
337 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 553 | extern pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
338 | #endif | 554 | #endif |
339 | 555 | ||
556 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
557 | extern void paging_init(void); | ||
558 | |||
340 | #include <asm-generic/pgtable.h> | 559 | #include <asm-generic/pgtable.h> |
341 | 560 | ||
342 | #endif /* !__ASSEMBLY__ */ | 561 | #endif /* !__ASSEMBLY__ */ |
diff --git a/include/asm-sh/processor.h b/include/asm-sh/processor.h index 45bb74e35d32..6f1dd7ca1b1d 100644 --- a/include/asm-sh/processor.h +++ b/include/asm-sh/processor.h | |||
@@ -36,7 +36,10 @@ | |||
36 | */ | 36 | */ |
37 | enum cpu_type { | 37 | enum cpu_type { |
38 | /* SH-2 types */ | 38 | /* SH-2 types */ |
39 | CPU_SH7604, | 39 | CPU_SH7604, CPU_SH7619, |
40 | |||
41 | /* SH-2A types */ | ||
42 | CPU_SH7206, | ||
40 | 43 | ||
41 | /* SH-3 types */ | 44 | /* SH-3 types */ |
42 | CPU_SH7705, CPU_SH7706, CPU_SH7707, | 45 | CPU_SH7705, CPU_SH7706, CPU_SH7707, |
@@ -47,7 +50,10 @@ enum cpu_type { | |||
47 | /* SH-4 types */ | 50 | /* SH-4 types */ |
48 | CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, | 51 | CPU_SH7750, CPU_SH7750S, CPU_SH7750R, CPU_SH7751, CPU_SH7751R, |
49 | CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, | 52 | CPU_SH7760, CPU_ST40RA, CPU_ST40GX1, CPU_SH4_202, CPU_SH4_501, |
53 | |||
54 | /* SH-4A types */ | ||
50 | CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, | 55 | CPU_SH73180, CPU_SH7343, CPU_SH7770, CPU_SH7780, CPU_SH7781, |
56 | CPU_SH7785, | ||
51 | 57 | ||
52 | /* Unknown subtype */ | 58 | /* Unknown subtype */ |
53 | CPU_SH_NONE | 59 | CPU_SH_NONE |
@@ -130,12 +136,11 @@ union sh_fpu_union { | |||
130 | }; | 136 | }; |
131 | 137 | ||
132 | struct thread_struct { | 138 | struct thread_struct { |
139 | /* Saved registers when thread is descheduled */ | ||
133 | unsigned long sp; | 140 | unsigned long sp; |
134 | unsigned long pc; | 141 | unsigned long pc; |
135 | 142 | ||
136 | unsigned long trap_no, error_code; | 143 | /* Hardware debugging registers */ |
137 | unsigned long address; | ||
138 | /* Hardware debugging registers may come here */ | ||
139 | unsigned long ubc_pc; | 144 | unsigned long ubc_pc; |
140 | 145 | ||
141 | /* floating point info */ | 146 | /* floating point info */ |
@@ -150,12 +155,7 @@ typedef struct { | |||
150 | extern int ubc_usercnt; | 155 | extern int ubc_usercnt; |
151 | 156 | ||
152 | #define INIT_THREAD { \ | 157 | #define INIT_THREAD { \ |
153 | sizeof(init_stack) + (long) &init_stack, /* sp */ \ | 158 | .sp = sizeof(init_stack) + (long) &init_stack, \ |
154 | 0, /* pc */ \ | ||
155 | 0, 0, \ | ||
156 | 0, \ | ||
157 | 0, \ | ||
158 | {{{0,}},} /* fpu state */ \ | ||
159 | } | 159 | } |
160 | 160 | ||
161 | /* | 161 | /* |
@@ -259,8 +259,8 @@ void show_trace(struct task_struct *tsk, unsigned long *sp, | |||
259 | struct pt_regs *regs); | 259 | struct pt_regs *regs); |
260 | extern unsigned long get_wchan(struct task_struct *p); | 260 | extern unsigned long get_wchan(struct task_struct *p); |
261 | 261 | ||
262 | #define KSTK_EIP(tsk) ((tsk)->thread.pc) | 262 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) |
263 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 263 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[15]) |
264 | 264 | ||
265 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") | 265 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") |
266 | #define cpu_relax() barrier() | 266 | #define cpu_relax() barrier() |
diff --git a/include/asm-sh/push-switch.h b/include/asm-sh/push-switch.h new file mode 100644 index 000000000000..dfc6bad567f0 --- /dev/null +++ b/include/asm-sh/push-switch.h | |||
@@ -0,0 +1,28 @@ | |||
1 | #ifndef __ASM_SH_PUSH_SWITCH_H | ||
2 | #define __ASM_SH_PUSH_SWITCH_H | ||
3 | |||
4 | #include <linux/timer.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/workqueue.h> | ||
7 | |||
8 | struct push_switch { | ||
9 | /* switch state */ | ||
10 | unsigned int state:1; | ||
11 | /* debounce timer */ | ||
12 | struct timer_list debounce; | ||
13 | /* workqueue */ | ||
14 | struct work_struct work; | ||
15 | }; | ||
16 | |||
17 | struct push_switch_platform_info { | ||
18 | /* IRQ handler */ | ||
19 | irqreturn_t (*irq_handler)(int irq, void *data); | ||
20 | /* Special IRQ flags */ | ||
21 | unsigned int irq_flags; | ||
22 | /* Bit location of switch */ | ||
23 | unsigned int bit; | ||
24 | /* Symbolic switch name */ | ||
25 | const char *name; | ||
26 | }; | ||
27 | |||
28 | #endif /* __ASM_SH_PUSH_SWITCH_H */ | ||
diff --git a/include/asm-sh/rwsem.h b/include/asm-sh/rwsem.h index 9d2aea5e8488..4931ba817d73 100644 --- a/include/asm-sh/rwsem.h +++ b/include/asm-sh/rwsem.h | |||
@@ -25,11 +25,21 @@ struct rw_semaphore { | |||
25 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 25 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
26 | spinlock_t wait_lock; | 26 | spinlock_t wait_lock; |
27 | struct list_head wait_list; | 27 | struct list_head wait_list; |
28 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
29 | struct lockdep_map dep_map; | ||
30 | #endif | ||
28 | }; | 31 | }; |
29 | 32 | ||
33 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
34 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
35 | #else | ||
36 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
37 | #endif | ||
38 | |||
30 | #define __RWSEM_INITIALIZER(name) \ | 39 | #define __RWSEM_INITIALIZER(name) \ |
31 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ | 40 | { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ |
32 | LIST_HEAD_INIT((name).wait_list) } | 41 | LIST_HEAD_INIT((name).wait_list) \ |
42 | __RWSEM_DEP_MAP_INIT(name) } | ||
33 | 43 | ||
34 | #define DECLARE_RWSEM(name) \ | 44 | #define DECLARE_RWSEM(name) \ |
35 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | 45 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
@@ -39,6 +49,16 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | |||
39 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | 49 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); |
40 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | 50 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
41 | 51 | ||
52 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
53 | struct lock_class_key *key); | ||
54 | |||
55 | #define init_rwsem(sem) \ | ||
56 | do { \ | ||
57 | static struct lock_class_key __key; \ | ||
58 | \ | ||
59 | __init_rwsem((sem), #sem, &__key); \ | ||
60 | } while (0) | ||
61 | |||
42 | static inline void init_rwsem(struct rw_semaphore *sem) | 62 | static inline void init_rwsem(struct rw_semaphore *sem) |
43 | { | 63 | { |
44 | sem->count = RWSEM_UNLOCKED_VALUE; | 64 | sem->count = RWSEM_UNLOCKED_VALUE; |
@@ -141,6 +161,11 @@ static inline void __downgrade_write(struct rw_semaphore *sem) | |||
141 | rwsem_downgrade_wake(sem); | 161 | rwsem_downgrade_wake(sem); |
142 | } | 162 | } |
143 | 163 | ||
164 | static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) | ||
165 | { | ||
166 | __down_write(sem); | ||
167 | } | ||
168 | |||
144 | /* | 169 | /* |
145 | * implement exchange and add functionality | 170 | * implement exchange and add functionality |
146 | */ | 171 | */ |
diff --git a/include/asm-sh/se7206.h b/include/asm-sh/se7206.h new file mode 100644 index 000000000000..698eb80389ab --- /dev/null +++ b/include/asm-sh/se7206.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_SH_SE7206_H | ||
2 | #define __ASM_SH_SE7206_H | ||
3 | |||
4 | #define PA_SMSC 0x30000000 | ||
5 | #define PA_MRSHPC 0x34000000 | ||
6 | #define PA_LED 0x31400000 | ||
7 | |||
8 | void init_se7206_IRQ(void); | ||
9 | |||
10 | #define __IO_PREFIX se7206 | ||
11 | #include <asm/io_generic.h> | ||
12 | |||
13 | #endif /* __ASM_SH_SE7206_H */ | ||
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 3340126f4e0f..b1e42e7f998b 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2002 Paul Mundt | 6 | * Copyright (C) 2002 Paul Mundt |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/irqflags.h> | ||
9 | #include <asm/types.h> | 10 | #include <asm/types.h> |
10 | 11 | ||
11 | /* | 12 | /* |
@@ -131,103 +132,6 @@ static inline unsigned long tas(volatile int *m) | |||
131 | 132 | ||
132 | #define set_mb(var, value) do { xchg(&var, value); } while (0) | 133 | #define set_mb(var, value) do { xchg(&var, value); } while (0) |
133 | 134 | ||
134 | /* Interrupt Control */ | ||
135 | #ifdef CONFIG_CPU_HAS_SR_RB | ||
136 | static inline void local_irq_enable(void) | ||
137 | { | ||
138 | unsigned long __dummy0, __dummy1; | ||
139 | |||
140 | __asm__ __volatile__("stc sr, %0\n\t" | ||
141 | "and %1, %0\n\t" | ||
142 | "stc r6_bank, %1\n\t" | ||
143 | "or %1, %0\n\t" | ||
144 | "ldc %0, sr" | ||
145 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
146 | : "1" (~0x000000f0) | ||
147 | : "memory"); | ||
148 | } | ||
149 | #else | ||
150 | static inline void local_irq_enable(void) | ||
151 | { | ||
152 | unsigned long __dummy0, __dummy1; | ||
153 | |||
154 | __asm__ __volatile__ ( | ||
155 | "stc sr, %0\n\t" | ||
156 | "and %1, %0\n\t" | ||
157 | "ldc %0, sr\n\t" | ||
158 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
159 | : "1" (~0x000000f0) | ||
160 | : "memory"); | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | static inline void local_irq_disable(void) | ||
165 | { | ||
166 | unsigned long __dummy; | ||
167 | __asm__ __volatile__("stc sr, %0\n\t" | ||
168 | "or #0xf0, %0\n\t" | ||
169 | "ldc %0, sr" | ||
170 | : "=&z" (__dummy) | ||
171 | : /* no inputs */ | ||
172 | : "memory"); | ||
173 | } | ||
174 | |||
175 | static inline void set_bl_bit(void) | ||
176 | { | ||
177 | unsigned long __dummy0, __dummy1; | ||
178 | |||
179 | __asm__ __volatile__ ("stc sr, %0\n\t" | ||
180 | "or %2, %0\n\t" | ||
181 | "and %3, %0\n\t" | ||
182 | "ldc %0, sr" | ||
183 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
184 | : "r" (0x10000000), "r" (0xffffff0f) | ||
185 | : "memory"); | ||
186 | } | ||
187 | |||
188 | static inline void clear_bl_bit(void) | ||
189 | { | ||
190 | unsigned long __dummy0, __dummy1; | ||
191 | |||
192 | __asm__ __volatile__ ("stc sr, %0\n\t" | ||
193 | "and %2, %0\n\t" | ||
194 | "ldc %0, sr" | ||
195 | : "=&r" (__dummy0), "=r" (__dummy1) | ||
196 | : "1" (~0x10000000) | ||
197 | : "memory"); | ||
198 | } | ||
199 | |||
200 | #define local_save_flags(x) \ | ||
201 | __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" ) | ||
202 | |||
203 | #define irqs_disabled() \ | ||
204 | ({ \ | ||
205 | unsigned long flags; \ | ||
206 | local_save_flags(flags); \ | ||
207 | (flags != 0); \ | ||
208 | }) | ||
209 | |||
210 | static inline unsigned long local_irq_save(void) | ||
211 | { | ||
212 | unsigned long flags, __dummy; | ||
213 | |||
214 | __asm__ __volatile__("stc sr, %1\n\t" | ||
215 | "mov %1, %0\n\t" | ||
216 | "or #0xf0, %0\n\t" | ||
217 | "ldc %0, sr\n\t" | ||
218 | "mov %1, %0\n\t" | ||
219 | "and #0xf0, %0" | ||
220 | : "=&z" (flags), "=&r" (__dummy) | ||
221 | :/**/ | ||
222 | : "memory" ); | ||
223 | return flags; | ||
224 | } | ||
225 | |||
226 | #define local_irq_restore(x) do { \ | ||
227 | if ((x & 0x000000f0) != 0x000000f0) \ | ||
228 | local_irq_enable(); \ | ||
229 | } while (0) | ||
230 | |||
231 | /* | 135 | /* |
232 | * Jump to P2 area. | 136 | * Jump to P2 area. |
233 | * When handling TLB or caches, we need to do it from P2 area. | 137 | * When handling TLB or caches, we need to do it from P2 area. |
@@ -264,9 +168,6 @@ do { \ | |||
264 | : "=&r" (__dummy)); \ | 168 | : "=&r" (__dummy)); \ |
265 | } while (0) | 169 | } while (0) |
266 | 170 | ||
267 | /* For spinlocks etc */ | ||
268 | #define local_irq_save(x) x = local_irq_save() | ||
269 | |||
270 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | 171 | static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) |
271 | { | 172 | { |
272 | unsigned long flags, retval; | 173 | unsigned long flags, retval; |
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index 3ebc3f9039eb..0c01dc550819 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h | |||
@@ -90,13 +90,7 @@ static inline struct thread_info *current_thread_info(void) | |||
90 | #endif | 90 | #endif |
91 | #define free_thread_info(ti) kfree(ti) | 91 | #define free_thread_info(ti) kfree(ti) |
92 | 92 | ||
93 | #else /* !__ASSEMBLY__ */ | 93 | #endif /* __ASSEMBLY__ */ |
94 | |||
95 | /* how to get the thread information struct from ASM */ | ||
96 | #define GET_THREAD_INFO(reg) \ | ||
97 | stc r7_bank, reg | ||
98 | |||
99 | #endif | ||
100 | 94 | ||
101 | /* | 95 | /* |
102 | * thread information flags | 96 | * thread information flags |
diff --git a/include/asm-sh/timer.h b/include/asm-sh/timer.h index 5df842bcf7b6..17b5e76a4c31 100644 --- a/include/asm-sh/timer.h +++ b/include/asm-sh/timer.h | |||
@@ -18,11 +18,32 @@ struct sys_timer { | |||
18 | 18 | ||
19 | struct sys_device dev; | 19 | struct sys_device dev; |
20 | struct sys_timer_ops *ops; | 20 | struct sys_timer_ops *ops; |
21 | |||
22 | #ifdef CONFIG_NO_IDLE_HZ | ||
23 | struct dyn_tick_timer *dyn_tick; | ||
24 | #endif | ||
21 | }; | 25 | }; |
22 | 26 | ||
27 | #ifdef CONFIG_NO_IDLE_HZ | ||
28 | #define DYN_TICK_ENABLED (1 << 1) | ||
29 | |||
30 | struct dyn_tick_timer { | ||
31 | spinlock_t lock; | ||
32 | unsigned int state; /* Current state */ | ||
33 | int (*enable)(void); /* Enables dynamic tick */ | ||
34 | int (*disable)(void); /* Disables dynamic tick */ | ||
35 | void (*reprogram)(unsigned long); /* Reprograms the timer */ | ||
36 | int (*handler)(int, void *); | ||
37 | }; | ||
38 | |||
39 | void timer_dyn_reprogram(void); | ||
40 | #else | ||
41 | #define timer_dyn_reprogram() do { } while (0) | ||
42 | #endif | ||
43 | |||
23 | #define TICK_SIZE (tick_nsec / 1000) | 44 | #define TICK_SIZE (tick_nsec / 1000) |
24 | 45 | ||
25 | extern struct sys_timer tmu_timer; | 46 | extern struct sys_timer tmu_timer, cmt_timer, mtu2_timer; |
26 | extern struct sys_timer *sys_timer; | 47 | extern struct sys_timer *sys_timer; |
27 | 48 | ||
28 | #ifndef CONFIG_GENERIC_TIME | 49 | #ifndef CONFIG_GENERIC_TIME |
diff --git a/include/asm-sh/titan.h b/include/asm-sh/titan.h index 270a4f4bc8a9..03f3583c8918 100644 --- a/include/asm-sh/titan.h +++ b/include/asm-sh/titan.h | |||
@@ -1,9 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Platform defintions for Titan | 2 | * Platform defintions for Titan |
3 | */ | 3 | */ |
4 | 4 | #ifndef _ASM_SH_TITAN_H | |
5 | #ifndef _ASM_SH_TITAN_TITAN_H | 5 | #define _ASM_SH_TITAN_H |
6 | #define _ASM_SH_TITAN_TITAN_H | ||
7 | 6 | ||
8 | #define __IO_PREFIX titan | 7 | #define __IO_PREFIX titan |
9 | #include <asm/io_generic.h> | 8 | #include <asm/io_generic.h> |
@@ -15,29 +14,4 @@ | |||
15 | #define TITAN_IRQ_MPCIB 11 /* mPCI B */ | 14 | #define TITAN_IRQ_MPCIB 11 /* mPCI B */ |
16 | #define TITAN_IRQ_USB 11 /* USB */ | 15 | #define TITAN_IRQ_USB 11 /* USB */ |
17 | 16 | ||
18 | /* | 17 | #endif /* __ASM_SH_TITAN_H */ |
19 | * The external interrupt lines, these take up ints 0 - 15 inclusive | ||
20 | * depending on the priority for the interrupt. In fact the priority | ||
21 | * is the interrupt :-) | ||
22 | */ | ||
23 | #define IRL0_IRQ 0 | ||
24 | #define IRL0_IPR_ADDR INTC_IPRD | ||
25 | #define IRL0_IPR_POS 3 | ||
26 | #define IRL0_PRIORITY 8 | ||
27 | |||
28 | #define IRL1_IRQ 1 | ||
29 | #define IRL1_IPR_ADDR INTC_IPRD | ||
30 | #define IRL1_IPR_POS 2 | ||
31 | #define IRL1_PRIORITY 8 | ||
32 | |||
33 | #define IRL2_IRQ 2 | ||
34 | #define IRL2_IPR_ADDR INTC_IPRD | ||
35 | #define IRL2_IPR_POS 1 | ||
36 | #define IRL2_PRIORITY 8 | ||
37 | |||
38 | #define IRL3_IRQ 3 | ||
39 | #define IRL3_IPR_ADDR INTC_IPRD | ||
40 | #define IRL3_IPR_POS 0 | ||
41 | #define IRL3_PRIORITY 8 | ||
42 | |||
43 | #endif | ||
diff --git a/include/asm-sh/unistd.h b/include/asm-sh/unistd.h index 1c2abde122cd..0cae1d248761 100644 --- a/include/asm-sh/unistd.h +++ b/include/asm-sh/unistd.h | |||
@@ -349,12 +349,30 @@ do { \ | |||
349 | return (type) (res); \ | 349 | return (type) (res); \ |
350 | } while (0) | 350 | } while (0) |
351 | 351 | ||
352 | #if defined(__sh2__) || defined(__SH2E__) || defined(__SH2A__) | ||
353 | #define SYSCALL_ARG0 "trapa #0x20" | ||
354 | #define SYSCALL_ARG1 "trapa #0x21" | ||
355 | #define SYSCALL_ARG2 "trapa #0x22" | ||
356 | #define SYSCALL_ARG3 "trapa #0x23" | ||
357 | #define SYSCALL_ARG4 "trapa #0x24" | ||
358 | #define SYSCALL_ARG5 "trapa #0x25" | ||
359 | #define SYSCALL_ARG6 "trapa #0x26" | ||
360 | #else | ||
361 | #define SYSCALL_ARG0 "trapa #0x10" | ||
362 | #define SYSCALL_ARG1 "trapa #0x11" | ||
363 | #define SYSCALL_ARG2 "trapa #0x12" | ||
364 | #define SYSCALL_ARG3 "trapa #0x13" | ||
365 | #define SYSCALL_ARG4 "trapa #0x14" | ||
366 | #define SYSCALL_ARG5 "trapa #0x15" | ||
367 | #define SYSCALL_ARG6 "trapa #0x16" | ||
368 | #endif | ||
369 | |||
352 | /* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ | 370 | /* XXX - _foo needs to be __foo, while __NR_bar could be _NR_bar. */ |
353 | #define _syscall0(type,name) \ | 371 | #define _syscall0(type,name) \ |
354 | type name(void) \ | 372 | type name(void) \ |
355 | { \ | 373 | { \ |
356 | register long __sc0 __asm__ ("r3") = __NR_##name; \ | 374 | register long __sc0 __asm__ ("r3") = __NR_##name; \ |
357 | __asm__ __volatile__ ("trapa #0x10" \ | 375 | __asm__ __volatile__ (SYSCALL_ARG0 \ |
358 | : "=z" (__sc0) \ | 376 | : "=z" (__sc0) \ |
359 | : "0" (__sc0) \ | 377 | : "0" (__sc0) \ |
360 | : "memory" ); \ | 378 | : "memory" ); \ |
@@ -366,7 +384,7 @@ type name(type1 arg1) \ | |||
366 | { \ | 384 | { \ |
367 | register long __sc0 __asm__ ("r3") = __NR_##name; \ | 385 | register long __sc0 __asm__ ("r3") = __NR_##name; \ |
368 | register long __sc4 __asm__ ("r4") = (long) arg1; \ | 386 | register long __sc4 __asm__ ("r4") = (long) arg1; \ |
369 | __asm__ __volatile__ ("trapa #0x11" \ | 387 | __asm__ __volatile__ (SYSCALL_ARG1 \ |
370 | : "=z" (__sc0) \ | 388 | : "=z" (__sc0) \ |
371 | : "0" (__sc0), "r" (__sc4) \ | 389 | : "0" (__sc0), "r" (__sc4) \ |
372 | : "memory"); \ | 390 | : "memory"); \ |
@@ -379,7 +397,7 @@ type name(type1 arg1,type2 arg2) \ | |||
379 | register long __sc0 __asm__ ("r3") = __NR_##name; \ | 397 | register long __sc0 __asm__ ("r3") = __NR_##name; \ |
380 | register long __sc4 __asm__ ("r4") = (long) arg1; \ | 398 | register long __sc4 __asm__ ("r4") = (long) arg1; \ |
381 | register long __sc5 __asm__ ("r5") = (long) arg2; \ | 399 | register long __sc5 __asm__ ("r5") = (long) arg2; \ |
382 | __asm__ __volatile__ ("trapa #0x12" \ | 400 | __asm__ __volatile__ (SYSCALL_ARG2 \ |
383 | : "=z" (__sc0) \ | 401 | : "=z" (__sc0) \ |
384 | : "0" (__sc0), "r" (__sc4), "r" (__sc5) \ | 402 | : "0" (__sc0), "r" (__sc4), "r" (__sc5) \ |
385 | : "memory"); \ | 403 | : "memory"); \ |
@@ -393,7 +411,7 @@ register long __sc0 __asm__ ("r3") = __NR_##name; \ | |||
393 | register long __sc4 __asm__ ("r4") = (long) arg1; \ | 411 | register long __sc4 __asm__ ("r4") = (long) arg1; \ |
394 | register long __sc5 __asm__ ("r5") = (long) arg2; \ | 412 | register long __sc5 __asm__ ("r5") = (long) arg2; \ |
395 | register long __sc6 __asm__ ("r6") = (long) arg3; \ | 413 | register long __sc6 __asm__ ("r6") = (long) arg3; \ |
396 | __asm__ __volatile__ ("trapa #0x13" \ | 414 | __asm__ __volatile__ (SYSCALL_ARG3 \ |
397 | : "=z" (__sc0) \ | 415 | : "=z" (__sc0) \ |
398 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \ | 416 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6) \ |
399 | : "memory"); \ | 417 | : "memory"); \ |
@@ -408,7 +426,7 @@ register long __sc4 __asm__ ("r4") = (long) arg1; \ | |||
408 | register long __sc5 __asm__ ("r5") = (long) arg2; \ | 426 | register long __sc5 __asm__ ("r5") = (long) arg2; \ |
409 | register long __sc6 __asm__ ("r6") = (long) arg3; \ | 427 | register long __sc6 __asm__ ("r6") = (long) arg3; \ |
410 | register long __sc7 __asm__ ("r7") = (long) arg4; \ | 428 | register long __sc7 __asm__ ("r7") = (long) arg4; \ |
411 | __asm__ __volatile__ ("trapa #0x14" \ | 429 | __asm__ __volatile__ (SYSCALL_ARG4 \ |
412 | : "=z" (__sc0) \ | 430 | : "=z" (__sc0) \ |
413 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \ | 431 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), \ |
414 | "r" (__sc7) \ | 432 | "r" (__sc7) \ |
@@ -425,7 +443,7 @@ register long __sc5 __asm__ ("r5") = (long) arg2; \ | |||
425 | register long __sc6 __asm__ ("r6") = (long) arg3; \ | 443 | register long __sc6 __asm__ ("r6") = (long) arg3; \ |
426 | register long __sc7 __asm__ ("r7") = (long) arg4; \ | 444 | register long __sc7 __asm__ ("r7") = (long) arg4; \ |
427 | register long __sc0 __asm__ ("r0") = (long) arg5; \ | 445 | register long __sc0 __asm__ ("r0") = (long) arg5; \ |
428 | __asm__ __volatile__ ("trapa #0x15" \ | 446 | __asm__ __volatile__ (SYSCALL_ARG5 \ |
429 | : "=z" (__sc0) \ | 447 | : "=z" (__sc0) \ |
430 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ | 448 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ |
431 | "r" (__sc3) \ | 449 | "r" (__sc3) \ |
@@ -443,7 +461,7 @@ register long __sc6 __asm__ ("r6") = (long) arg3; \ | |||
443 | register long __sc7 __asm__ ("r7") = (long) arg4; \ | 461 | register long __sc7 __asm__ ("r7") = (long) arg4; \ |
444 | register long __sc0 __asm__ ("r0") = (long) arg5; \ | 462 | register long __sc0 __asm__ ("r0") = (long) arg5; \ |
445 | register long __sc1 __asm__ ("r1") = (long) arg6; \ | 463 | register long __sc1 __asm__ ("r1") = (long) arg6; \ |
446 | __asm__ __volatile__ ("trapa #0x16" \ | 464 | __asm__ __volatile__ (SYSCALL_ARG6 \ |
447 | : "=z" (__sc0) \ | 465 | : "=z" (__sc0) \ |
448 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ | 466 | : "0" (__sc0), "r" (__sc4), "r" (__sc5), "r" (__sc6), "r" (__sc7), \ |
449 | "r" (__sc3), "r" (__sc1) \ | 467 | "r" (__sc3), "r" (__sc1) \ |
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 007e88d6d43f..93849f7abc24 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -21,7 +21,7 @@ | |||
21 | * on us. We need to use _exactly_ the address the user gave us, | 21 | * on us. We need to use _exactly_ the address the user gave us, |
22 | * not some alias that contains the same information. | 22 | * not some alias that contains the same information. |
23 | */ | 23 | */ |
24 | typedef struct { volatile int counter; } atomic_t; | 24 | typedef struct { int counter; } atomic_t; |
25 | 25 | ||
26 | #define ATOMIC_INIT(i) { (i) } | 26 | #define ATOMIC_INIT(i) { (i) } |
27 | 27 | ||
diff --git a/include/asm-x86_64/spinlock_types.h b/include/asm-x86_64/spinlock_types.h index 59efe849f351..4da9345c1500 100644 --- a/include/asm-x86_64/spinlock_types.h +++ b/include/asm-x86_64/spinlock_types.h | |||
@@ -6,13 +6,13 @@ | |||
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | typedef struct { | 8 | typedef struct { |
9 | volatile unsigned int slock; | 9 | unsigned int slock; |
10 | } raw_spinlock_t; | 10 | } raw_spinlock_t; |
11 | 11 | ||
12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 12 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
13 | 13 | ||
14 | typedef struct { | 14 | typedef struct { |
15 | volatile unsigned int lock; | 15 | unsigned int lock; |
16 | } raw_rwlock_t; | 16 | } raw_rwlock_t; |
17 | 17 | ||
18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } | 18 | #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } |
diff --git a/include/linux/aio.h b/include/linux/aio.h index 0d71c0041f13..9e350fd44d77 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -194,7 +194,7 @@ struct kioctx { | |||
194 | 194 | ||
195 | struct aio_ring_info ring_info; | 195 | struct aio_ring_info ring_info; |
196 | 196 | ||
197 | struct work_struct wq; | 197 | struct delayed_work wq; |
198 | }; | 198 | }; |
199 | 199 | ||
200 | /* prototypes */ | 200 | /* prototypes */ |
diff --git a/include/linux/connector.h b/include/linux/connector.h index 4c02119c6ab9..3ea1cd58de97 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -133,7 +133,7 @@ struct cn_callback_data { | |||
133 | struct cn_callback_entry { | 133 | struct cn_callback_entry { |
134 | struct list_head callback_entry; | 134 | struct list_head callback_entry; |
135 | struct cn_callback *cb; | 135 | struct cn_callback *cb; |
136 | struct work_struct work; | 136 | struct delayed_work work; |
137 | struct cn_queue_dev *pdev; | 137 | struct cn_queue_dev *pdev; |
138 | 138 | ||
139 | struct cn_callback_id id; | 139 | struct cn_callback_id id; |
@@ -170,7 +170,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev); | |||
170 | 170 | ||
171 | int cn_cb_equal(struct cb_id *, struct cb_id *); | 171 | int cn_cb_equal(struct cb_id *, struct cb_id *); |
172 | 172 | ||
173 | void cn_queue_wrapper(void *data); | 173 | void cn_queue_wrapper(struct work_struct *work); |
174 | 174 | ||
175 | extern int cn_already_initialized; | 175 | extern int cn_already_initialized; |
176 | 176 | ||
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index c115e9e840b4..1fb02e17f6f6 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -461,7 +461,7 @@ struct i2o_driver { | |||
461 | int (*reply) (struct i2o_controller *, u32, struct i2o_message *); | 461 | int (*reply) (struct i2o_controller *, u32, struct i2o_message *); |
462 | 462 | ||
463 | /* Event handler */ | 463 | /* Event handler */ |
464 | void (*event) (struct i2o_event *); | 464 | work_func_t event; |
465 | 465 | ||
466 | struct workqueue_struct *event_queue; /* Event queue */ | 466 | struct workqueue_struct *event_queue; /* Event queue */ |
467 | 467 | ||
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h index efe0ee4cc80b..06c58c423fe1 100644 --- a/include/linux/kbd_kern.h +++ b/include/linux/kbd_kern.h | |||
@@ -158,7 +158,7 @@ static inline void con_schedule_flip(struct tty_struct *t) | |||
158 | if (t->buf.tail != NULL) | 158 | if (t->buf.tail != NULL) |
159 | t->buf.tail->commit = t->buf.tail->used; | 159 | t->buf.tail->commit = t->buf.tail->used; |
160 | spin_unlock_irqrestore(&t->buf.lock, flags); | 160 | spin_unlock_irqrestore(&t->buf.lock, flags); |
161 | schedule_work(&t->buf.work); | 161 | schedule_delayed_work(&t->buf.work, 0); |
162 | } | 162 | } |
163 | 163 | ||
164 | #endif | 164 | #endif |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 202283b5df96..ab2754830322 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -575,8 +575,9 @@ struct ata_port { | |||
575 | struct ata_host *host; | 575 | struct ata_host *host; |
576 | struct device *dev; | 576 | struct device *dev; |
577 | 577 | ||
578 | struct work_struct port_task; | 578 | void *port_task_data; |
579 | struct work_struct hotplug_task; | 579 | struct delayed_work port_task; |
580 | struct delayed_work hotplug_task; | ||
580 | struct work_struct scsi_rescan_task; | 581 | struct work_struct scsi_rescan_task; |
581 | 582 | ||
582 | unsigned int hsm_task_state; | 583 | unsigned int hsm_task_state; |
@@ -755,7 +756,7 @@ extern void ata_host_resume(struct ata_host *host); | |||
755 | extern int ata_ratelimit(void); | 756 | extern int ata_ratelimit(void); |
756 | extern int ata_busy_sleep(struct ata_port *ap, | 757 | extern int ata_busy_sleep(struct ata_port *ap, |
757 | unsigned long timeout_pat, unsigned long timeout); | 758 | unsigned long timeout_pat, unsigned long timeout); |
758 | extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), | 759 | extern void ata_port_queue_task(struct ata_port *ap, work_func_t fn, |
759 | void *data, unsigned long delay); | 760 | void *data, unsigned long delay); |
760 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 761 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, |
761 | unsigned long interval_msec, | 762 | unsigned long interval_msec, |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 528e7d3fecb1..c15ae1986b98 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -110,7 +110,7 @@ struct mmc_host { | |||
110 | struct mmc_card *card_busy; /* the MMC card claiming host */ | 110 | struct mmc_card *card_busy; /* the MMC card claiming host */ |
111 | struct mmc_card *card_selected; /* the selected MMC card */ | 111 | struct mmc_card *card_selected; /* the selected MMC card */ |
112 | 112 | ||
113 | struct work_struct detect; | 113 | struct delayed_work detect; |
114 | 114 | ||
115 | unsigned long private[0] ____cacheline_aligned; | 115 | unsigned long private[0] ____cacheline_aligned; |
116 | }; | 116 | }; |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index b089d9506283..a503052138bd 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -127,10 +127,10 @@ struct ncp_server { | |||
127 | } unexpected_packet; | 127 | } unexpected_packet; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | extern void ncp_tcp_rcv_proc(void *server); | 130 | extern void ncp_tcp_rcv_proc(struct work_struct *work); |
131 | extern void ncp_tcp_tx_proc(void *server); | 131 | extern void ncp_tcp_tx_proc(struct work_struct *work); |
132 | extern void ncpdgram_rcv_proc(void *server); | 132 | extern void ncpdgram_rcv_proc(struct work_struct *work); |
133 | extern void ncpdgram_timeout_proc(void *server); | 133 | extern void ncpdgram_timeout_proc(struct work_struct *work); |
134 | extern void ncpdgram_timeout_call(unsigned long server); | 134 | extern void ncpdgram_timeout_call(unsigned long server); |
135 | extern void ncp_tcp_data_ready(struct sock* sk, int len); | 135 | extern void ncp_tcp_data_ready(struct sock* sk, int len); |
136 | extern void ncp_tcp_write_space(struct sock* sk); | 136 | extern void ncp_tcp_write_space(struct sock* sk); |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 2cc9867b1626..29930b71a9aa 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -32,7 +32,7 @@ struct netpoll_info { | |||
32 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ | 32 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ |
33 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ | 33 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ |
34 | struct sk_buff_head txq; | 34 | struct sk_buff_head txq; |
35 | struct work_struct tx_work; | 35 | struct delayed_work tx_work; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | void netpoll_poll(struct netpoll *np); | 38 | void netpoll_poll(struct netpoll *np); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 7ccfc7ef0a83..95796e6924f1 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -51,7 +51,7 @@ struct nfs_client { | |||
51 | 51 | ||
52 | unsigned long cl_lease_time; | 52 | unsigned long cl_lease_time; |
53 | unsigned long cl_last_renewal; | 53 | unsigned long cl_last_renewal; |
54 | struct work_struct cl_renewd; | 54 | struct delayed_work cl_renewd; |
55 | 55 | ||
56 | struct rpc_wait_queue cl_rpcwaitq; | 56 | struct rpc_wait_queue cl_rpcwaitq; |
57 | 57 | ||
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 62a7169aed8b..3a28742d86f9 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h | |||
@@ -249,7 +249,8 @@ struct reiserfs_journal { | |||
249 | int j_errno; | 249 | int j_errno; |
250 | 250 | ||
251 | /* when flushing ordered buffers, throttle new ordered writers */ | 251 | /* when flushing ordered buffers, throttle new ordered writers */ |
252 | struct work_struct j_work; | 252 | struct delayed_work j_work; |
253 | struct super_block *j_work_sb; | ||
253 | atomic_t j_async_throttle; | 254 | atomic_t j_async_throttle; |
254 | }; | 255 | }; |
255 | 256 | ||
diff --git a/include/linux/relay.h b/include/linux/relay.h index 24accb483849..0e3d91b76996 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
@@ -38,7 +38,7 @@ struct rchan_buf | |||
38 | size_t subbufs_consumed; /* count of sub-buffers consumed */ | 38 | size_t subbufs_consumed; /* count of sub-buffers consumed */ |
39 | struct rchan *chan; /* associated channel */ | 39 | struct rchan *chan; /* associated channel */ |
40 | wait_queue_head_t read_wait; /* reader wait queue */ | 40 | wait_queue_head_t read_wait; /* reader wait queue */ |
41 | struct work_struct wake_readers; /* reader wake-up work struct */ | 41 | struct delayed_work wake_readers; /* reader wake-up work struct */ |
42 | struct dentry *dentry; /* channel file dentry */ | 42 | struct dentry *dentry; /* channel file dentry */ |
43 | struct kref kref; /* channel buffer refcount */ | 43 | struct kref kref; /* channel buffer refcount */ |
44 | struct page **page_array; /* array of current buffer pages */ | 44 | struct page **page_array; /* array of current buffer pages */ |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index a2eb9b4a9de3..4a68125b6de6 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
@@ -30,7 +30,7 @@ struct rpc_inode { | |||
30 | #define RPC_PIPE_WAIT_FOR_OPEN 1 | 30 | #define RPC_PIPE_WAIT_FOR_OPEN 1 |
31 | int flags; | 31 | int flags; |
32 | struct rpc_pipe_ops *ops; | 32 | struct rpc_pipe_ops *ops; |
33 | struct work_struct queue_timeout; | 33 | struct delayed_work queue_timeout; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static inline struct rpc_inode * | 36 | static inline struct rpc_inode * |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 65321f911c1e..f717f0898238 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -53,7 +53,7 @@ struct tty_buffer { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct tty_bufhead { | 55 | struct tty_bufhead { |
56 | struct work_struct work; | 56 | struct delayed_work work; |
57 | struct semaphore pty_sem; | 57 | struct semaphore pty_sem; |
58 | spinlock_t lock; | 58 | spinlock_t lock; |
59 | struct tty_buffer *head; /* Queue head */ | 59 | struct tty_buffer *head; /* Queue head */ |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 0cd73edeef13..aab5b1b72021 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -388,7 +388,7 @@ struct usb_device { | |||
388 | 388 | ||
389 | int pm_usage_cnt; /* usage counter for autosuspend */ | 389 | int pm_usage_cnt; /* usage counter for autosuspend */ |
390 | #ifdef CONFIG_PM | 390 | #ifdef CONFIG_PM |
391 | struct work_struct autosuspend; /* for delayed autosuspends */ | 391 | struct delayed_work autosuspend; /* for delayed autosuspends */ |
392 | struct mutex pm_mutex; /* protects PM operations */ | 392 | struct mutex pm_mutex; /* protects PM operations */ |
393 | 393 | ||
394 | unsigned auto_pm:1; /* autosuspend/resume in progress */ | 394 | unsigned auto_pm:1; /* autosuspend/resume in progress */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9bca3539a1e5..4a3ea83c6d16 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -11,12 +11,23 @@ | |||
11 | 11 | ||
12 | struct workqueue_struct; | 12 | struct workqueue_struct; |
13 | 13 | ||
14 | struct work_struct; | ||
15 | typedef void (*work_func_t)(struct work_struct *work); | ||
16 | |||
14 | struct work_struct { | 17 | struct work_struct { |
15 | unsigned long pending; | 18 | /* the first word is the work queue pointer and the flags rolled into |
19 | * one */ | ||
20 | unsigned long management; | ||
21 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | ||
22 | #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */ | ||
23 | #define WORK_STRUCT_FLAG_MASK (3UL) | ||
24 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | ||
16 | struct list_head entry; | 25 | struct list_head entry; |
17 | void (*func)(void *); | 26 | work_func_t func; |
18 | void *data; | 27 | }; |
19 | void *wq_data; | 28 | |
29 | struct delayed_work { | ||
30 | struct work_struct work; | ||
20 | struct timer_list timer; | 31 | struct timer_list timer; |
21 | }; | 32 | }; |
22 | 33 | ||
@@ -24,36 +35,117 @@ struct execute_work { | |||
24 | struct work_struct work; | 35 | struct work_struct work; |
25 | }; | 36 | }; |
26 | 37 | ||
27 | #define __WORK_INITIALIZER(n, f, d) { \ | 38 | #define __WORK_INITIALIZER(n, f) { \ |
39 | .management = 0, \ | ||
40 | .entry = { &(n).entry, &(n).entry }, \ | ||
41 | .func = (f), \ | ||
42 | } | ||
43 | |||
44 | #define __WORK_INITIALIZER_NAR(n, f) { \ | ||
45 | .management = (1 << WORK_STRUCT_NOAUTOREL), \ | ||
28 | .entry = { &(n).entry, &(n).entry }, \ | 46 | .entry = { &(n).entry, &(n).entry }, \ |
29 | .func = (f), \ | 47 | .func = (f), \ |
30 | .data = (d), \ | 48 | } |
49 | |||
50 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | ||
51 | .work = __WORK_INITIALIZER((n).work, (f)), \ | ||
52 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | ||
53 | } | ||
54 | |||
55 | #define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \ | ||
56 | .work = __WORK_INITIALIZER_NAR((n).work, (f)), \ | ||
31 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ | 57 | .timer = TIMER_INITIALIZER(NULL, 0, 0), \ |
32 | } | 58 | } |
33 | 59 | ||
34 | #define DECLARE_WORK(n, f, d) \ | 60 | #define DECLARE_WORK(n, f) \ |
35 | struct work_struct n = __WORK_INITIALIZER(n, f, d) | 61 | struct work_struct n = __WORK_INITIALIZER(n, f) |
62 | |||
63 | #define DECLARE_WORK_NAR(n, f) \ | ||
64 | struct work_struct n = __WORK_INITIALIZER_NAR(n, f) | ||
65 | |||
66 | #define DECLARE_DELAYED_WORK(n, f) \ | ||
67 | struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) | ||
68 | |||
69 | #define DECLARE_DELAYED_WORK_NAR(n, f) \ | ||
70 | struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f) | ||
36 | 71 | ||
37 | /* | 72 | /* |
38 | * initialize a work-struct's func and data pointers: | 73 | * initialize a work item's function pointer |
39 | */ | 74 | */ |
40 | #define PREPARE_WORK(_work, _func, _data) \ | 75 | #define PREPARE_WORK(_work, _func) \ |
41 | do { \ | 76 | do { \ |
42 | (_work)->func = _func; \ | 77 | (_work)->func = (_func); \ |
43 | (_work)->data = _data; \ | ||
44 | } while (0) | 78 | } while (0) |
45 | 79 | ||
80 | #define PREPARE_DELAYED_WORK(_work, _func) \ | ||
81 | PREPARE_WORK(&(_work)->work, (_func)) | ||
82 | |||
46 | /* | 83 | /* |
47 | * initialize all of a work-struct: | 84 | * initialize all of a work item in one go |
48 | */ | 85 | */ |
49 | #define INIT_WORK(_work, _func, _data) \ | 86 | #define INIT_WORK(_work, _func) \ |
50 | do { \ | 87 | do { \ |
88 | (_work)->management = 0; \ | ||
51 | INIT_LIST_HEAD(&(_work)->entry); \ | 89 | INIT_LIST_HEAD(&(_work)->entry); \ |
52 | (_work)->pending = 0; \ | 90 | PREPARE_WORK((_work), (_func)); \ |
53 | PREPARE_WORK((_work), (_func), (_data)); \ | 91 | } while (0) |
92 | |||
93 | #define INIT_WORK_NAR(_work, _func) \ | ||
94 | do { \ | ||
95 | (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \ | ||
96 | INIT_LIST_HEAD(&(_work)->entry); \ | ||
97 | PREPARE_WORK((_work), (_func)); \ | ||
98 | } while (0) | ||
99 | |||
100 | #define INIT_DELAYED_WORK(_work, _func) \ | ||
101 | do { \ | ||
102 | INIT_WORK(&(_work)->work, (_func)); \ | ||
103 | init_timer(&(_work)->timer); \ | ||
104 | } while (0) | ||
105 | |||
106 | #define INIT_DELAYED_WORK_NAR(_work, _func) \ | ||
107 | do { \ | ||
108 | INIT_WORK_NAR(&(_work)->work, (_func)); \ | ||
54 | init_timer(&(_work)->timer); \ | 109 | init_timer(&(_work)->timer); \ |
55 | } while (0) | 110 | } while (0) |
56 | 111 | ||
112 | /** | ||
113 | * work_pending - Find out whether a work item is currently pending | ||
114 | * @work: The work item in question | ||
115 | */ | ||
116 | #define work_pending(work) \ | ||
117 | test_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
118 | |||
119 | /** | ||
120 | * delayed_work_pending - Find out whether a delayable work item is currently | ||
121 | * pending | ||
122 | * @work: The work item in question | ||
123 | */ | ||
124 | #define delayed_work_pending(work) \ | ||
125 | test_bit(WORK_STRUCT_PENDING, &(work)->work.management) | ||
126 | |||
127 | /** | ||
128 | * work_release - Release a work item under execution | ||
129 | * @work: The work item to release | ||
130 | * | ||
131 | * This is used to release a work item that has been initialised with automatic | ||
132 | * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work | ||
133 | * function the opportunity to grab auxiliary data from the container of the | ||
134 | * work_struct before clearing the pending bit as the work_struct may be | ||
135 | * subject to deallocation the moment the pending bit is cleared. | ||
136 | * | ||
137 | * In such a case, this should be called in the work function after it has | ||
138 | * fetched any data it may require from the containter of the work_struct. | ||
139 | * After this function has been called, the work_struct may be scheduled for | ||
140 | * further execution or it may be deallocated unless other precautions are | ||
141 | * taken. | ||
142 | * | ||
143 | * This should also be used to release a delayed work item. | ||
144 | */ | ||
145 | #define work_release(work) \ | ||
146 | clear_bit(WORK_STRUCT_PENDING, &(work)->management) | ||
147 | |||
148 | |||
57 | extern struct workqueue_struct *__create_workqueue(const char *name, | 149 | extern struct workqueue_struct *__create_workqueue(const char *name, |
58 | int singlethread); | 150 | int singlethread); |
59 | #define create_workqueue(name) __create_workqueue((name), 0) | 151 | #define create_workqueue(name) __create_workqueue((name), 0) |
@@ -62,39 +154,38 @@ extern struct workqueue_struct *__create_workqueue(const char *name, | |||
62 | extern void destroy_workqueue(struct workqueue_struct *wq); | 154 | extern void destroy_workqueue(struct workqueue_struct *wq); |
63 | 155 | ||
64 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); | 156 | extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work)); |
65 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct work_struct *work, unsigned long delay)); | 157 | extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay)); |
66 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 158 | extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
67 | struct work_struct *work, unsigned long delay); | 159 | struct delayed_work *work, unsigned long delay); |
68 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); | 160 | extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq)); |
69 | 161 | ||
70 | extern int FASTCALL(schedule_work(struct work_struct *work)); | 162 | extern int FASTCALL(schedule_work(struct work_struct *work)); |
71 | extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); | 163 | extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay)); |
72 | 164 | ||
73 | extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); | 165 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay); |
74 | extern int schedule_on_each_cpu(void (*func)(void *info), void *info); | 166 | extern int schedule_on_each_cpu(work_func_t func); |
75 | extern void flush_scheduled_work(void); | 167 | extern void flush_scheduled_work(void); |
76 | extern int current_is_keventd(void); | 168 | extern int current_is_keventd(void); |
77 | extern int keventd_up(void); | 169 | extern int keventd_up(void); |
78 | 170 | ||
79 | extern void init_workqueues(void); | 171 | extern void init_workqueues(void); |
80 | void cancel_rearming_delayed_work(struct work_struct *work); | 172 | void cancel_rearming_delayed_work(struct delayed_work *work); |
81 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, | 173 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *, |
82 | struct work_struct *); | 174 | struct delayed_work *); |
83 | int execute_in_process_context(void (*fn)(void *), void *, | 175 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
84 | struct execute_work *); | ||
85 | 176 | ||
86 | /* | 177 | /* |
87 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 178 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
88 | * function may still be running on return from cancel_delayed_work(). Run | 179 | * function may still be running on return from cancel_delayed_work(). Run |
89 | * flush_scheduled_work() to wait on it. | 180 | * flush_scheduled_work() to wait on it. |
90 | */ | 181 | */ |
91 | static inline int cancel_delayed_work(struct work_struct *work) | 182 | static inline int cancel_delayed_work(struct delayed_work *work) |
92 | { | 183 | { |
93 | int ret; | 184 | int ret; |
94 | 185 | ||
95 | ret = del_timer_sync(&work->timer); | 186 | ret = del_timer_sync(&work->timer); |
96 | if (ret) | 187 | if (ret) |
97 | clear_bit(0, &work->pending); | 188 | clear_bit(WORK_STRUCT_PENDING, &work->work.management); |
98 | return ret; | 189 | return ret; |
99 | } | 190 | } |
100 | 191 | ||
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h index 617b672b1132..89119277553d 100644 --- a/include/net/ieee80211softmac.h +++ b/include/net/ieee80211softmac.h | |||
@@ -108,8 +108,8 @@ struct ieee80211softmac_assoc_info { | |||
108 | /* Scan retries remaining */ | 108 | /* Scan retries remaining */ |
109 | int scan_retry; | 109 | int scan_retry; |
110 | 110 | ||
111 | struct work_struct work; | 111 | struct delayed_work work; |
112 | struct work_struct timeout; | 112 | struct delayed_work timeout; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | struct ieee80211softmac_bss_info { | 115 | struct ieee80211softmac_bss_info { |
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 5f48748fe017..f7be1ac73601 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h | |||
@@ -84,7 +84,7 @@ struct inet_timewait_death_row { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | extern void inet_twdr_hangman(unsigned long data); | 86 | extern void inet_twdr_hangman(unsigned long data); |
87 | extern void inet_twdr_twkill_work(void *data); | 87 | extern void inet_twdr_twkill_work(struct work_struct *work); |
88 | extern void inet_twdr_twcal_tick(unsigned long data); | 88 | extern void inet_twdr_twcal_tick(unsigned long data); |
89 | 89 | ||
90 | #if (BITS_PER_LONG == 64) | 90 | #if (BITS_PER_LONG == 64) |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f8cbe40f52c0..c089f93ba591 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -1030,7 +1030,7 @@ void sctp_inq_init(struct sctp_inq *); | |||
1030 | void sctp_inq_free(struct sctp_inq *); | 1030 | void sctp_inq_free(struct sctp_inq *); |
1031 | void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); | 1031 | void sctp_inq_push(struct sctp_inq *, struct sctp_chunk *packet); |
1032 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); | 1032 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *); |
1033 | void sctp_inq_set_th_handler(struct sctp_inq *, void (*)(void *), void *); | 1033 | void sctp_inq_set_th_handler(struct sctp_inq *, work_func_t); |
1034 | 1034 | ||
1035 | /* This is the structure we use to hold outbound chunks. You push | 1035 | /* This is the structure we use to hold outbound chunks. You push |
1036 | * chunks in and they automatically pop out the other end as bundled | 1036 | * chunks in and they automatically pop out the other end as bundled |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 44b2f82a6eec..9233ed5de664 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -201,9 +201,14 @@ struct domain_device { | |||
201 | void *lldd_dev; | 201 | void *lldd_dev; |
202 | }; | 202 | }; |
203 | 203 | ||
204 | struct sas_discovery_event { | ||
205 | struct work_struct work; | ||
206 | struct asd_sas_port *port; | ||
207 | }; | ||
208 | |||
204 | struct sas_discovery { | 209 | struct sas_discovery { |
205 | spinlock_t disc_event_lock; | 210 | spinlock_t disc_event_lock; |
206 | struct work_struct disc_work[DISC_NUM_EVENTS]; | 211 | struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; |
207 | unsigned long pending; | 212 | unsigned long pending; |
208 | u8 fanout_sas_addr[8]; | 213 | u8 fanout_sas_addr[8]; |
209 | u8 eeds_a[8]; | 214 | u8 eeds_a[8]; |
@@ -249,14 +254,19 @@ struct asd_sas_port { | |||
249 | void *lldd_port; /* not touched by the sas class code */ | 254 | void *lldd_port; /* not touched by the sas class code */ |
250 | }; | 255 | }; |
251 | 256 | ||
257 | struct asd_sas_event { | ||
258 | struct work_struct work; | ||
259 | struct asd_sas_phy *phy; | ||
260 | }; | ||
261 | |||
252 | /* The phy pretty much is controlled by the LLDD. | 262 | /* The phy pretty much is controlled by the LLDD. |
253 | * The class only reads those fields. | 263 | * The class only reads those fields. |
254 | */ | 264 | */ |
255 | struct asd_sas_phy { | 265 | struct asd_sas_phy { |
256 | /* private: */ | 266 | /* private: */ |
257 | /* protected by ha->event_lock */ | 267 | /* protected by ha->event_lock */ |
258 | struct work_struct port_events[PORT_NUM_EVENTS]; | 268 | struct asd_sas_event port_events[PORT_NUM_EVENTS]; |
259 | struct work_struct phy_events[PHY_NUM_EVENTS]; | 269 | struct asd_sas_event phy_events[PHY_NUM_EVENTS]; |
260 | 270 | ||
261 | unsigned long port_events_pending; | 271 | unsigned long port_events_pending; |
262 | unsigned long phy_events_pending; | 272 | unsigned long phy_events_pending; |
@@ -308,10 +318,15 @@ struct scsi_core { | |||
308 | int queue_thread_kill; | 318 | int queue_thread_kill; |
309 | }; | 319 | }; |
310 | 320 | ||
321 | struct sas_ha_event { | ||
322 | struct work_struct work; | ||
323 | struct sas_ha_struct *ha; | ||
324 | }; | ||
325 | |||
311 | struct sas_ha_struct { | 326 | struct sas_ha_struct { |
312 | /* private: */ | 327 | /* private: */ |
313 | spinlock_t event_lock; | 328 | spinlock_t event_lock; |
314 | struct work_struct ha_events[HA_NUM_EVENTS]; | 329 | struct sas_ha_event ha_events[HA_NUM_EVENTS]; |
315 | unsigned long pending; | 330 | unsigned long pending; |
316 | 331 | ||
317 | struct scsi_core core; | 332 | struct scsi_core core; |
@@ -631,6 +646,6 @@ void sas_unregister_dev(struct domain_device *); | |||
631 | 646 | ||
632 | void sas_init_dev(struct domain_device *); | 647 | void sas_init_dev(struct domain_device *); |
633 | 648 | ||
634 | void sas_task_abort(struct sas_task *task); | 649 | void sas_task_abort(struct work_struct *); |
635 | 650 | ||
636 | #endif /* _SASLIB_H_ */ | 651 | #endif /* _SASLIB_H_ */ |
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index fd352323378b..798f7c7ee426 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h | |||
@@ -206,9 +206,9 @@ struct fc_rport { /* aka fc_starget_attrs */ | |||
206 | u8 flags; | 206 | u8 flags; |
207 | struct list_head peers; | 207 | struct list_head peers; |
208 | struct device dev; | 208 | struct device dev; |
209 | struct work_struct dev_loss_work; | 209 | struct delayed_work dev_loss_work; |
210 | struct work_struct scan_work; | 210 | struct work_struct scan_work; |
211 | struct work_struct fail_io_work; | 211 | struct delayed_work fail_io_work; |
212 | struct work_struct stgt_delete_work; | 212 | struct work_struct stgt_delete_work; |
213 | struct work_struct rport_delete_work; | 213 | struct work_struct rport_delete_work; |
214 | } __attribute__((aligned(sizeof(unsigned long)))); | 214 | } __attribute__((aligned(sizeof(unsigned long)))); |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 4b95c89c95c9..d5c218ddc527 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
@@ -176,7 +176,7 @@ struct iscsi_cls_session { | |||
176 | 176 | ||
177 | /* recovery fields */ | 177 | /* recovery fields */ |
178 | int recovery_tmo; | 178 | int recovery_tmo; |
179 | struct work_struct recovery_work; | 179 | struct delayed_work recovery_work; |
180 | 180 | ||
181 | int target_id; | 181 | int target_id; |
182 | 182 | ||
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h index 4c43521cc493..33720397a904 100644 --- a/include/sound/ac97_codec.h +++ b/include/sound/ac97_codec.h | |||
@@ -511,7 +511,7 @@ struct snd_ac97 { | |||
511 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 511 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
512 | unsigned int power_up; /* power states */ | 512 | unsigned int power_up; /* power states */ |
513 | struct workqueue_struct *power_workq; | 513 | struct workqueue_struct *power_workq; |
514 | struct work_struct power_work; | 514 | struct delayed_work power_work; |
515 | #endif | 515 | #endif |
516 | struct device dev; | 516 | struct device dev; |
517 | }; | 517 | }; |
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h index 11702aa0bea9..2ee061625fd0 100644 --- a/include/sound/ak4114.h +++ b/include/sound/ak4114.h | |||
@@ -182,7 +182,7 @@ struct ak4114 { | |||
182 | unsigned char rcs0; | 182 | unsigned char rcs0; |
183 | unsigned char rcs1; | 183 | unsigned char rcs1; |
184 | struct workqueue_struct *workqueue; | 184 | struct workqueue_struct *workqueue; |
185 | struct work_struct work; | 185 | struct delayed_work work; |
186 | void *change_callback_private; | 186 | void *change_callback_private; |
187 | void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); | 187 | void (*change_callback)(struct ak4114 *ak4114, unsigned char c0, unsigned char c1); |
188 | }; | 188 | }; |
diff --git a/ipc/util.c b/ipc/util.c index cd8bb14a431f..a9b7a227b8d4 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -514,6 +514,11 @@ void ipc_rcu_getref(void *ptr) | |||
514 | container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; | 514 | container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; |
515 | } | 515 | } |
516 | 516 | ||
517 | static void ipc_do_vfree(struct work_struct *work) | ||
518 | { | ||
519 | vfree(container_of(work, struct ipc_rcu_sched, work)); | ||
520 | } | ||
521 | |||
517 | /** | 522 | /** |
518 | * ipc_schedule_free - free ipc + rcu space | 523 | * ipc_schedule_free - free ipc + rcu space |
519 | * @head: RCU callback structure for queued work | 524 | * @head: RCU callback structure for queued work |
@@ -528,7 +533,7 @@ static void ipc_schedule_free(struct rcu_head *head) | |||
528 | struct ipc_rcu_sched *sched = | 533 | struct ipc_rcu_sched *sched = |
529 | container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); | 534 | container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); |
530 | 535 | ||
531 | INIT_WORK(&sched->work, vfree, sched); | 536 | INIT_WORK(&sched->work, ipc_do_vfree); |
532 | schedule_work(&sched->work); | 537 | schedule_work(&sched->work); |
533 | } | 538 | } |
534 | 539 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 2b76dee28496..8d2bea09a4ec 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -114,6 +114,7 @@ EXPORT_SYMBOL(request_module); | |||
114 | #endif /* CONFIG_KMOD */ | 114 | #endif /* CONFIG_KMOD */ |
115 | 115 | ||
116 | struct subprocess_info { | 116 | struct subprocess_info { |
117 | struct work_struct work; | ||
117 | struct completion *complete; | 118 | struct completion *complete; |
118 | char *path; | 119 | char *path; |
119 | char **argv; | 120 | char **argv; |
@@ -221,9 +222,10 @@ static int wait_for_helper(void *data) | |||
221 | } | 222 | } |
222 | 223 | ||
223 | /* This is run by khelper thread */ | 224 | /* This is run by khelper thread */ |
224 | static void __call_usermodehelper(void *data) | 225 | static void __call_usermodehelper(struct work_struct *work) |
225 | { | 226 | { |
226 | struct subprocess_info *sub_info = data; | 227 | struct subprocess_info *sub_info = |
228 | container_of(work, struct subprocess_info, work); | ||
227 | pid_t pid; | 229 | pid_t pid; |
228 | int wait = sub_info->wait; | 230 | int wait = sub_info->wait; |
229 | 231 | ||
@@ -264,6 +266,8 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
264 | { | 266 | { |
265 | DECLARE_COMPLETION_ONSTACK(done); | 267 | DECLARE_COMPLETION_ONSTACK(done); |
266 | struct subprocess_info sub_info = { | 268 | struct subprocess_info sub_info = { |
269 | .work = __WORK_INITIALIZER(sub_info.work, | ||
270 | __call_usermodehelper), | ||
267 | .complete = &done, | 271 | .complete = &done, |
268 | .path = path, | 272 | .path = path, |
269 | .argv = argv, | 273 | .argv = argv, |
@@ -272,7 +276,6 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
272 | .wait = wait, | 276 | .wait = wait, |
273 | .retval = 0, | 277 | .retval = 0, |
274 | }; | 278 | }; |
275 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
276 | 279 | ||
277 | if (!khelper_wq) | 280 | if (!khelper_wq) |
278 | return -EBUSY; | 281 | return -EBUSY; |
@@ -280,7 +283,7 @@ int call_usermodehelper_keys(char *path, char **argv, char **envp, | |||
280 | if (path[0] == '\0') | 283 | if (path[0] == '\0') |
281 | return 0; | 284 | return 0; |
282 | 285 | ||
283 | queue_work(khelper_wq, &work); | 286 | queue_work(khelper_wq, &sub_info.work); |
284 | wait_for_completion(&done); | 287 | wait_for_completion(&done); |
285 | return sub_info.retval; | 288 | return sub_info.retval; |
286 | } | 289 | } |
@@ -291,6 +294,8 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
291 | { | 294 | { |
292 | DECLARE_COMPLETION(done); | 295 | DECLARE_COMPLETION(done); |
293 | struct subprocess_info sub_info = { | 296 | struct subprocess_info sub_info = { |
297 | .work = __WORK_INITIALIZER(sub_info.work, | ||
298 | __call_usermodehelper), | ||
294 | .complete = &done, | 299 | .complete = &done, |
295 | .path = path, | 300 | .path = path, |
296 | .argv = argv, | 301 | .argv = argv, |
@@ -298,7 +303,6 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
298 | .retval = 0, | 303 | .retval = 0, |
299 | }; | 304 | }; |
300 | struct file *f; | 305 | struct file *f; |
301 | DECLARE_WORK(work, __call_usermodehelper, &sub_info); | ||
302 | 306 | ||
303 | if (!khelper_wq) | 307 | if (!khelper_wq) |
304 | return -EBUSY; | 308 | return -EBUSY; |
@@ -318,7 +322,7 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
318 | } | 322 | } |
319 | sub_info.stdin = f; | 323 | sub_info.stdin = f; |
320 | 324 | ||
321 | queue_work(khelper_wq, &work); | 325 | queue_work(khelper_wq, &sub_info.work); |
322 | wait_for_completion(&done); | 326 | wait_for_completion(&done); |
323 | return sub_info.retval; | 327 | return sub_info.retval; |
324 | } | 328 | } |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 4f9c60ef95e8..1db8c72d0d38 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -31,6 +31,8 @@ struct kthread_create_info | |||
31 | /* Result passed back to kthread_create() from keventd. */ | 31 | /* Result passed back to kthread_create() from keventd. */ |
32 | struct task_struct *result; | 32 | struct task_struct *result; |
33 | struct completion done; | 33 | struct completion done; |
34 | |||
35 | struct work_struct work; | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct kthread_stop_info | 38 | struct kthread_stop_info |
@@ -111,9 +113,10 @@ static int kthread(void *_create) | |||
111 | } | 113 | } |
112 | 114 | ||
113 | /* We are keventd: create a thread. */ | 115 | /* We are keventd: create a thread. */ |
114 | static void keventd_create_kthread(void *_create) | 116 | static void keventd_create_kthread(struct work_struct *work) |
115 | { | 117 | { |
116 | struct kthread_create_info *create = _create; | 118 | struct kthread_create_info *create = |
119 | container_of(work, struct kthread_create_info, work); | ||
117 | int pid; | 120 | int pid; |
118 | 121 | ||
119 | /* We want our own signal handler (we take no signals by default). */ | 122 | /* We want our own signal handler (we take no signals by default). */ |
@@ -154,20 +157,20 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
154 | ...) | 157 | ...) |
155 | { | 158 | { |
156 | struct kthread_create_info create; | 159 | struct kthread_create_info create; |
157 | DECLARE_WORK(work, keventd_create_kthread, &create); | ||
158 | 160 | ||
159 | create.threadfn = threadfn; | 161 | create.threadfn = threadfn; |
160 | create.data = data; | 162 | create.data = data; |
161 | init_completion(&create.started); | 163 | init_completion(&create.started); |
162 | init_completion(&create.done); | 164 | init_completion(&create.done); |
165 | INIT_WORK(&create.work, keventd_create_kthread); | ||
163 | 166 | ||
164 | /* | 167 | /* |
165 | * The workqueue needs to start up first: | 168 | * The workqueue needs to start up first: |
166 | */ | 169 | */ |
167 | if (!helper_wq) | 170 | if (!helper_wq) |
168 | work.func(work.data); | 171 | create.work.func(&create.work); |
169 | else { | 172 | else { |
170 | queue_work(helper_wq, &work); | 173 | queue_work(helper_wq, &create.work); |
171 | wait_for_completion(&create.done); | 174 | wait_for_completion(&create.done); |
172 | } | 175 | } |
173 | if (!IS_ERR(create.result)) { | 176 | if (!IS_ERR(create.result)) { |
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c index f1f900ac3164..678ec736076b 100644 --- a/kernel/power/poweroff.c +++ b/kernel/power/poweroff.c | |||
@@ -16,12 +16,12 @@ | |||
16 | * callback we use. | 16 | * callback we use. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | static void do_poweroff(void *dummy) | 19 | static void do_poweroff(struct work_struct *dummy) |
20 | { | 20 | { |
21 | kernel_power_off(); | 21 | kernel_power_off(); |
22 | } | 22 | } |
23 | 23 | ||
24 | static DECLARE_WORK(poweroff_work, do_poweroff, NULL); | 24 | static DECLARE_WORK(poweroff_work, do_poweroff); |
25 | 25 | ||
26 | static void handle_poweroff(int key, struct tty_struct *tty) | 26 | static void handle_poweroff(int key, struct tty_struct *tty) |
27 | { | 27 | { |
diff --git a/kernel/relay.c b/kernel/relay.c index f04bbdb56ac2..2b92e8ece85b 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -308,9 +308,10 @@ static struct rchan_callbacks default_channel_callbacks = { | |||
308 | * reason waking is deferred is that calling directly from write | 308 | * reason waking is deferred is that calling directly from write |
309 | * causes problems if you're writing from say the scheduler. | 309 | * causes problems if you're writing from say the scheduler. |
310 | */ | 310 | */ |
311 | static void wakeup_readers(void *private) | 311 | static void wakeup_readers(struct work_struct *work) |
312 | { | 312 | { |
313 | struct rchan_buf *buf = private; | 313 | struct rchan_buf *buf = |
314 | container_of(work, struct rchan_buf, wake_readers.work); | ||
314 | wake_up_interruptible(&buf->read_wait); | 315 | wake_up_interruptible(&buf->read_wait); |
315 | } | 316 | } |
316 | 317 | ||
@@ -328,7 +329,7 @@ static inline void __relay_reset(struct rchan_buf *buf, unsigned int init) | |||
328 | if (init) { | 329 | if (init) { |
329 | init_waitqueue_head(&buf->read_wait); | 330 | init_waitqueue_head(&buf->read_wait); |
330 | kref_init(&buf->kref); | 331 | kref_init(&buf->kref); |
331 | INIT_WORK(&buf->wake_readers, NULL, NULL); | 332 | INIT_DELAYED_WORK(&buf->wake_readers, NULL); |
332 | } else { | 333 | } else { |
333 | cancel_delayed_work(&buf->wake_readers); | 334 | cancel_delayed_work(&buf->wake_readers); |
334 | flush_scheduled_work(); | 335 | flush_scheduled_work(); |
@@ -549,7 +550,8 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
549 | buf->padding[old_subbuf]; | 550 | buf->padding[old_subbuf]; |
550 | smp_mb(); | 551 | smp_mb(); |
551 | if (waitqueue_active(&buf->read_wait)) { | 552 | if (waitqueue_active(&buf->read_wait)) { |
552 | PREPARE_WORK(&buf->wake_readers, wakeup_readers, buf); | 553 | PREPARE_DELAYED_WORK(&buf->wake_readers, |
554 | wakeup_readers); | ||
553 | schedule_delayed_work(&buf->wake_readers, 1); | 555 | schedule_delayed_work(&buf->wake_readers, 1); |
554 | } | 556 | } |
555 | } | 557 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 98489d82801b..c87b461de38d 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -880,7 +880,7 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user | |||
880 | return 0; | 880 | return 0; |
881 | } | 881 | } |
882 | 882 | ||
883 | static void deferred_cad(void *dummy) | 883 | static void deferred_cad(struct work_struct *dummy) |
884 | { | 884 | { |
885 | kernel_restart(NULL); | 885 | kernel_restart(NULL); |
886 | } | 886 | } |
@@ -892,7 +892,7 @@ static void deferred_cad(void *dummy) | |||
892 | */ | 892 | */ |
893 | void ctrl_alt_del(void) | 893 | void ctrl_alt_del(void) |
894 | { | 894 | { |
895 | static DECLARE_WORK(cad_work, deferred_cad, NULL); | 895 | static DECLARE_WORK(cad_work, deferred_cad); |
896 | 896 | ||
897 | if (C_A_D) | 897 | if (C_A_D) |
898 | schedule_work(&cad_work); | 898 | schedule_work(&cad_work); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 17c2f03d2c27..8d1e7cb8a51a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -80,6 +80,29 @@ static inline int is_single_threaded(struct workqueue_struct *wq) | |||
80 | return list_empty(&wq->list); | 80 | return list_empty(&wq->list); |
81 | } | 81 | } |
82 | 82 | ||
83 | static inline void set_wq_data(struct work_struct *work, void *wq) | ||
84 | { | ||
85 | unsigned long new, old, res; | ||
86 | |||
87 | /* assume the pending flag is already set and that the task has already | ||
88 | * been queued on this workqueue */ | ||
89 | new = (unsigned long) wq | (1UL << WORK_STRUCT_PENDING); | ||
90 | res = work->management; | ||
91 | if (res != new) { | ||
92 | do { | ||
93 | old = res; | ||
94 | new = (unsigned long) wq; | ||
95 | new |= (old & WORK_STRUCT_FLAG_MASK); | ||
96 | res = cmpxchg(&work->management, old, new); | ||
97 | } while (res != old); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | static inline void *get_wq_data(struct work_struct *work) | ||
102 | { | ||
103 | return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK); | ||
104 | } | ||
105 | |||
83 | /* Preempt must be disabled. */ | 106 | /* Preempt must be disabled. */ |
84 | static void __queue_work(struct cpu_workqueue_struct *cwq, | 107 | static void __queue_work(struct cpu_workqueue_struct *cwq, |
85 | struct work_struct *work) | 108 | struct work_struct *work) |
@@ -87,7 +110,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
87 | unsigned long flags; | 110 | unsigned long flags; |
88 | 111 | ||
89 | spin_lock_irqsave(&cwq->lock, flags); | 112 | spin_lock_irqsave(&cwq->lock, flags); |
90 | work->wq_data = cwq; | 113 | set_wq_data(work, cwq); |
91 | list_add_tail(&work->entry, &cwq->worklist); | 114 | list_add_tail(&work->entry, &cwq->worklist); |
92 | cwq->insert_sequence++; | 115 | cwq->insert_sequence++; |
93 | wake_up(&cwq->more_work); | 116 | wake_up(&cwq->more_work); |
@@ -108,7 +131,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
108 | { | 131 | { |
109 | int ret = 0, cpu = get_cpu(); | 132 | int ret = 0, cpu = get_cpu(); |
110 | 133 | ||
111 | if (!test_and_set_bit(0, &work->pending)) { | 134 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
112 | if (unlikely(is_single_threaded(wq))) | 135 | if (unlikely(is_single_threaded(wq))) |
113 | cpu = singlethread_cpu; | 136 | cpu = singlethread_cpu; |
114 | BUG_ON(!list_empty(&work->entry)); | 137 | BUG_ON(!list_empty(&work->entry)); |
@@ -122,38 +145,42 @@ EXPORT_SYMBOL_GPL(queue_work); | |||
122 | 145 | ||
123 | static void delayed_work_timer_fn(unsigned long __data) | 146 | static void delayed_work_timer_fn(unsigned long __data) |
124 | { | 147 | { |
125 | struct work_struct *work = (struct work_struct *)__data; | 148 | struct delayed_work *dwork = (struct delayed_work *)__data; |
126 | struct workqueue_struct *wq = work->wq_data; | 149 | struct workqueue_struct *wq = get_wq_data(&dwork->work); |
127 | int cpu = smp_processor_id(); | 150 | int cpu = smp_processor_id(); |
128 | 151 | ||
129 | if (unlikely(is_single_threaded(wq))) | 152 | if (unlikely(is_single_threaded(wq))) |
130 | cpu = singlethread_cpu; | 153 | cpu = singlethread_cpu; |
131 | 154 | ||
132 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 155 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), &dwork->work); |
133 | } | 156 | } |
134 | 157 | ||
135 | /** | 158 | /** |
136 | * queue_delayed_work - queue work on a workqueue after delay | 159 | * queue_delayed_work - queue work on a workqueue after delay |
137 | * @wq: workqueue to use | 160 | * @wq: workqueue to use |
138 | * @work: work to queue | 161 | * @work: delayable work to queue |
139 | * @delay: number of jiffies to wait before queueing | 162 | * @delay: number of jiffies to wait before queueing |
140 | * | 163 | * |
141 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 164 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 165 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 166 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 167 | struct delayed_work *dwork, unsigned long delay) |
145 | { | 168 | { |
146 | int ret = 0; | 169 | int ret = 0; |
147 | struct timer_list *timer = &work->timer; | 170 | struct timer_list *timer = &dwork->timer; |
171 | struct work_struct *work = &dwork->work; | ||
172 | |||
173 | if (delay == 0) | ||
174 | return queue_work(wq, work); | ||
148 | 175 | ||
149 | if (!test_and_set_bit(0, &work->pending)) { | 176 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
150 | BUG_ON(timer_pending(timer)); | 177 | BUG_ON(timer_pending(timer)); |
151 | BUG_ON(!list_empty(&work->entry)); | 178 | BUG_ON(!list_empty(&work->entry)); |
152 | 179 | ||
153 | /* This stores wq for the moment, for the timer_fn */ | 180 | /* This stores wq for the moment, for the timer_fn */ |
154 | work->wq_data = wq; | 181 | set_wq_data(work, wq); |
155 | timer->expires = jiffies + delay; | 182 | timer->expires = jiffies + delay; |
156 | timer->data = (unsigned long)work; | 183 | timer->data = (unsigned long)dwork; |
157 | timer->function = delayed_work_timer_fn; | 184 | timer->function = delayed_work_timer_fn; |
158 | add_timer(timer); | 185 | add_timer(timer); |
159 | ret = 1; | 186 | ret = 1; |
@@ -172,19 +199,20 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
172 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 199 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 200 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 201 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 202 | struct delayed_work *dwork, unsigned long delay) |
176 | { | 203 | { |
177 | int ret = 0; | 204 | int ret = 0; |
178 | struct timer_list *timer = &work->timer; | 205 | struct timer_list *timer = &dwork->timer; |
206 | struct work_struct *work = &dwork->work; | ||
179 | 207 | ||
180 | if (!test_and_set_bit(0, &work->pending)) { | 208 | if (!test_and_set_bit(WORK_STRUCT_PENDING, &work->management)) { |
181 | BUG_ON(timer_pending(timer)); | 209 | BUG_ON(timer_pending(timer)); |
182 | BUG_ON(!list_empty(&work->entry)); | 210 | BUG_ON(!list_empty(&work->entry)); |
183 | 211 | ||
184 | /* This stores wq for the moment, for the timer_fn */ | 212 | /* This stores wq for the moment, for the timer_fn */ |
185 | work->wq_data = wq; | 213 | set_wq_data(work, wq); |
186 | timer->expires = jiffies + delay; | 214 | timer->expires = jiffies + delay; |
187 | timer->data = (unsigned long)work; | 215 | timer->data = (unsigned long)dwork; |
188 | timer->function = delayed_work_timer_fn; | 216 | timer->function = delayed_work_timer_fn; |
189 | add_timer_on(timer, cpu); | 217 | add_timer_on(timer, cpu); |
190 | ret = 1; | 218 | ret = 1; |
@@ -212,15 +240,15 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
212 | while (!list_empty(&cwq->worklist)) { | 240 | while (!list_empty(&cwq->worklist)) { |
213 | struct work_struct *work = list_entry(cwq->worklist.next, | 241 | struct work_struct *work = list_entry(cwq->worklist.next, |
214 | struct work_struct, entry); | 242 | struct work_struct, entry); |
215 | void (*f) (void *) = work->func; | 243 | work_func_t f = work->func; |
216 | void *data = work->data; | ||
217 | 244 | ||
218 | list_del_init(cwq->worklist.next); | 245 | list_del_init(cwq->worklist.next); |
219 | spin_unlock_irqrestore(&cwq->lock, flags); | 246 | spin_unlock_irqrestore(&cwq->lock, flags); |
220 | 247 | ||
221 | BUG_ON(work->wq_data != cwq); | 248 | BUG_ON(get_wq_data(work) != cwq); |
222 | clear_bit(0, &work->pending); | 249 | if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management)) |
223 | f(data); | 250 | work_release(work); |
251 | f(work); | ||
224 | 252 | ||
225 | spin_lock_irqsave(&cwq->lock, flags); | 253 | spin_lock_irqsave(&cwq->lock, flags); |
226 | cwq->remove_sequence++; | 254 | cwq->remove_sequence++; |
@@ -468,38 +496,37 @@ EXPORT_SYMBOL(schedule_work); | |||
468 | 496 | ||
469 | /** | 497 | /** |
470 | * schedule_delayed_work - put work task in global workqueue after delay | 498 | * schedule_delayed_work - put work task in global workqueue after delay |
471 | * @work: job to be done | 499 | * @dwork: job to be done |
472 | * @delay: number of jiffies to wait | 500 | * @delay: number of jiffies to wait or 0 for immediate execution |
473 | * | 501 | * |
474 | * After waiting for a given time this puts a job in the kernel-global | 502 | * After waiting for a given time this puts a job in the kernel-global |
475 | * workqueue. | 503 | * workqueue. |
476 | */ | 504 | */ |
477 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 505 | int fastcall schedule_delayed_work(struct delayed_work *dwork, unsigned long delay) |
478 | { | 506 | { |
479 | return queue_delayed_work(keventd_wq, work, delay); | 507 | return queue_delayed_work(keventd_wq, dwork, delay); |
480 | } | 508 | } |
481 | EXPORT_SYMBOL(schedule_delayed_work); | 509 | EXPORT_SYMBOL(schedule_delayed_work); |
482 | 510 | ||
483 | /** | 511 | /** |
484 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | 512 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay |
485 | * @cpu: cpu to use | 513 | * @cpu: cpu to use |
486 | * @work: job to be done | 514 | * @dwork: job to be done |
487 | * @delay: number of jiffies to wait | 515 | * @delay: number of jiffies to wait |
488 | * | 516 | * |
489 | * After waiting for a given time this puts a job in the kernel-global | 517 | * After waiting for a given time this puts a job in the kernel-global |
490 | * workqueue on the specified CPU. | 518 | * workqueue on the specified CPU. |
491 | */ | 519 | */ |
492 | int schedule_delayed_work_on(int cpu, | 520 | int schedule_delayed_work_on(int cpu, |
493 | struct work_struct *work, unsigned long delay) | 521 | struct delayed_work *dwork, unsigned long delay) |
494 | { | 522 | { |
495 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 523 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
496 | } | 524 | } |
497 | EXPORT_SYMBOL(schedule_delayed_work_on); | 525 | EXPORT_SYMBOL(schedule_delayed_work_on); |
498 | 526 | ||
499 | /** | 527 | /** |
500 | * schedule_on_each_cpu - call a function on each online CPU from keventd | 528 | * schedule_on_each_cpu - call a function on each online CPU from keventd |
501 | * @func: the function to call | 529 | * @func: the function to call |
502 | * @info: a pointer to pass to func() | ||
503 | * | 530 | * |
504 | * Returns zero on success. | 531 | * Returns zero on success. |
505 | * Returns -ve errno on failure. | 532 | * Returns -ve errno on failure. |
@@ -508,7 +535,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
508 | * | 535 | * |
509 | * schedule_on_each_cpu() is very slow. | 536 | * schedule_on_each_cpu() is very slow. |
510 | */ | 537 | */ |
511 | int schedule_on_each_cpu(void (*func)(void *info), void *info) | 538 | int schedule_on_each_cpu(work_func_t func) |
512 | { | 539 | { |
513 | int cpu; | 540 | int cpu; |
514 | struct work_struct *works; | 541 | struct work_struct *works; |
@@ -519,7 +546,7 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info) | |||
519 | 546 | ||
520 | mutex_lock(&workqueue_mutex); | 547 | mutex_lock(&workqueue_mutex); |
521 | for_each_online_cpu(cpu) { | 548 | for_each_online_cpu(cpu) { |
522 | INIT_WORK(per_cpu_ptr(works, cpu), func, info); | 549 | INIT_WORK(per_cpu_ptr(works, cpu), func); |
523 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 550 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), |
524 | per_cpu_ptr(works, cpu)); | 551 | per_cpu_ptr(works, cpu)); |
525 | } | 552 | } |
@@ -539,12 +566,12 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
539 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 566 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed |
540 | * work whose handler rearms the delayed work. | 567 | * work whose handler rearms the delayed work. |
541 | * @wq: the controlling workqueue structure | 568 | * @wq: the controlling workqueue structure |
542 | * @work: the delayed work struct | 569 | * @dwork: the delayed work struct |
543 | */ | 570 | */ |
544 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 571 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
545 | struct work_struct *work) | 572 | struct delayed_work *dwork) |
546 | { | 573 | { |
547 | while (!cancel_delayed_work(work)) | 574 | while (!cancel_delayed_work(dwork)) |
548 | flush_workqueue(wq); | 575 | flush_workqueue(wq); |
549 | } | 576 | } |
550 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | 577 | EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); |
@@ -552,18 +579,17 @@ EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); | |||
552 | /** | 579 | /** |
553 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd | 580 | * cancel_rearming_delayed_work - reliably kill off a delayed keventd |
554 | * work whose handler rearms the delayed work. | 581 | * work whose handler rearms the delayed work. |
555 | * @work: the delayed work struct | 582 | * @dwork: the delayed work struct |
556 | */ | 583 | */ |
557 | void cancel_rearming_delayed_work(struct work_struct *work) | 584 | void cancel_rearming_delayed_work(struct delayed_work *dwork) |
558 | { | 585 | { |
559 | cancel_rearming_delayed_workqueue(keventd_wq, work); | 586 | cancel_rearming_delayed_workqueue(keventd_wq, dwork); |
560 | } | 587 | } |
561 | EXPORT_SYMBOL(cancel_rearming_delayed_work); | 588 | EXPORT_SYMBOL(cancel_rearming_delayed_work); |
562 | 589 | ||
563 | /** | 590 | /** |
564 | * execute_in_process_context - reliably execute the routine with user context | 591 | * execute_in_process_context - reliably execute the routine with user context |
565 | * @fn: the function to execute | 592 | * @fn: the function to execute |
566 | * @data: data to pass to the function | ||
567 | * @ew: guaranteed storage for the execute work structure (must | 593 | * @ew: guaranteed storage for the execute work structure (must |
568 | * be available when the work executes) | 594 | * be available when the work executes) |
569 | * | 595 | * |
@@ -573,15 +599,14 @@ EXPORT_SYMBOL(cancel_rearming_delayed_work); | |||
573 | * Returns: 0 - function was executed | 599 | * Returns: 0 - function was executed |
574 | * 1 - function was scheduled for execution | 600 | * 1 - function was scheduled for execution |
575 | */ | 601 | */ |
576 | int execute_in_process_context(void (*fn)(void *data), void *data, | 602 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
577 | struct execute_work *ew) | ||
578 | { | 603 | { |
579 | if (!in_interrupt()) { | 604 | if (!in_interrupt()) { |
580 | fn(data); | 605 | fn(&ew->work); |
581 | return 0; | 606 | return 0; |
582 | } | 607 | } |
583 | 608 | ||
584 | INIT_WORK(&ew->work, fn, data); | 609 | INIT_WORK(&ew->work, fn); |
585 | schedule_work(&ew->work); | 610 | schedule_work(&ew->work); |
586 | 611 | ||
587 | return 1; | 612 | return 1; |
diff --git a/mm/nommu.c b/mm/nommu.c index 8bdde9508f3b..6a2a8aada401 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -497,15 +497,17 @@ static int validate_mmap_request(struct file *file, | |||
497 | (flags & MAP_TYPE) != MAP_SHARED) | 497 | (flags & MAP_TYPE) != MAP_SHARED) |
498 | return -EINVAL; | 498 | return -EINVAL; |
499 | 499 | ||
500 | if (PAGE_ALIGN(len) == 0) | 500 | if (!len) |
501 | return addr; | ||
502 | |||
503 | if (len > TASK_SIZE) | ||
504 | return -EINVAL; | 501 | return -EINVAL; |
505 | 502 | ||
503 | /* Careful about overflows.. */ | ||
504 | len = PAGE_ALIGN(len); | ||
505 | if (!len || len > TASK_SIZE) | ||
506 | return -ENOMEM; | ||
507 | |||
506 | /* offset overflow? */ | 508 | /* offset overflow? */ |
507 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) | 509 | if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) |
508 | return -EINVAL; | 510 | return -EOVERFLOW; |
509 | 511 | ||
510 | if (file) { | 512 | if (file) { |
511 | /* validate file mapping requests */ | 513 | /* validate file mapping requests */ |
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
314 | int node); | 314 | int node); |
315 | static int enable_cpucache(struct kmem_cache *cachep); | 315 | static int enable_cpucache(struct kmem_cache *cachep); |
316 | static void cache_reap(void *unused); | 316 | static void cache_reap(struct work_struct *unused); |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * This function must be completely optimized away if a constant is passed to | 319 | * This function must be completely optimized away if a constant is passed to |
@@ -753,7 +753,7 @@ int slab_is_available(void) | |||
753 | return g_cpucache_up == FULL; | 753 | return g_cpucache_up == FULL; |
754 | } | 754 | } |
755 | 755 | ||
756 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 756 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
757 | 757 | ||
758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
759 | { | 759 | { |
@@ -916,16 +916,16 @@ static void next_reap_node(void) | |||
916 | */ | 916 | */ |
917 | static void __devinit start_cpu_timer(int cpu) | 917 | static void __devinit start_cpu_timer(int cpu) |
918 | { | 918 | { |
919 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 919 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); |
920 | 920 | ||
921 | /* | 921 | /* |
922 | * When this gets called from do_initcalls via cpucache_init(), | 922 | * When this gets called from do_initcalls via cpucache_init(), |
923 | * init_workqueues() has already run, so keventd will be setup | 923 | * init_workqueues() has already run, so keventd will be setup |
924 | * at that time. | 924 | * at that time. |
925 | */ | 925 | */ |
926 | if (keventd_up() && reap_work->func == NULL) { | 926 | if (keventd_up() && reap_work->work.func == NULL) { |
927 | init_reap_node(cpu); | 927 | init_reap_node(cpu); |
928 | INIT_WORK(reap_work, cache_reap, NULL); | 928 | INIT_DELAYED_WORK(reap_work, cache_reap); |
929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); |
930 | } | 930 | } |
931 | } | 931 | } |
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try | 3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try |
3816 | * again on the next iteration. | 3816 | * again on the next iteration. |
3817 | */ | 3817 | */ |
3818 | static void cache_reap(void *unused) | 3818 | static void cache_reap(struct work_struct *unused) |
3819 | { | 3819 | { |
3820 | struct kmem_cache *searchp; | 3820 | struct kmem_cache *searchp; |
3821 | struct kmem_list3 *l3; | 3821 | struct kmem_list3 *l3; |
@@ -216,7 +216,7 @@ void lru_add_drain(void) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | #ifdef CONFIG_NUMA | 218 | #ifdef CONFIG_NUMA |
219 | static void lru_add_drain_per_cpu(void *dummy) | 219 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
220 | { | 220 | { |
221 | lru_add_drain(); | 221 | lru_add_drain(); |
222 | } | 222 | } |
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy) | |||
226 | */ | 226 | */ |
227 | int lru_add_drain_all(void) | 227 | int lru_add_drain_all(void) |
228 | { | 228 | { |
229 | return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | 229 | return schedule_on_each_cpu(lru_add_drain_per_cpu); |
230 | } | 230 | } |
231 | 231 | ||
232 | #else | 232 | #else |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 5946ec63724f..3fc0abeeaf34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1454,7 +1454,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, | |||
1454 | 1454 | ||
1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) | 1455 | #define LEC_ARP_REFRESH_INTERVAL (3*HZ) |
1456 | 1456 | ||
1457 | static void lec_arp_check_expire(void *data); | 1457 | static void lec_arp_check_expire(struct work_struct *work); |
1458 | static void lec_arp_expire_arp(unsigned long data); | 1458 | static void lec_arp_expire_arp(unsigned long data); |
1459 | 1459 | ||
1460 | /* | 1460 | /* |
@@ -1477,7 +1477,7 @@ static void lec_arp_init(struct lec_priv *priv) | |||
1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); | 1477 | INIT_HLIST_HEAD(&priv->lec_no_forward); |
1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); | 1478 | INIT_HLIST_HEAD(&priv->mcast_fwds); |
1479 | spin_lock_init(&priv->lec_arp_lock); | 1479 | spin_lock_init(&priv->lec_arp_lock); |
1480 | INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); | 1480 | INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); |
1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); | 1481 | schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); |
1482 | } | 1482 | } |
1483 | 1483 | ||
@@ -1875,10 +1875,11 @@ static void lec_arp_expire_vcc(unsigned long data) | |||
1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end | 1875 | * to ESI_FORWARD_DIRECT. This causes the flush period to end |
1876 | * regardless of the progress of the flush protocol. | 1876 | * regardless of the progress of the flush protocol. |
1877 | */ | 1877 | */ |
1878 | static void lec_arp_check_expire(void *data) | 1878 | static void lec_arp_check_expire(struct work_struct *work) |
1879 | { | 1879 | { |
1880 | unsigned long flags; | 1880 | unsigned long flags; |
1881 | struct lec_priv *priv = data; | 1881 | struct lec_priv *priv = |
1882 | container_of(work, struct lec_priv, lec_arp_work.work); | ||
1882 | struct hlist_node *node, *next; | 1883 | struct hlist_node *node, *next; |
1883 | struct lec_arp_table *entry; | 1884 | struct lec_arp_table *entry; |
1884 | unsigned long now; | 1885 | unsigned long now; |
diff --git a/net/atm/lec.h b/net/atm/lec.h index 24cc95f86741..99136babd535 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h | |||
@@ -92,7 +92,7 @@ struct lec_priv { | |||
92 | spinlock_t lec_arp_lock; | 92 | spinlock_t lec_arp_lock; |
93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ | 93 | struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ |
94 | struct atm_vcc *lecd; | 94 | struct atm_vcc *lecd; |
95 | struct work_struct lec_arp_work; /* C10 */ | 95 | struct delayed_work lec_arp_work; /* C10 */ |
96 | unsigned int maximum_unknown_frame_count; | 96 | unsigned int maximum_unknown_frame_count; |
97 | /* | 97 | /* |
98 | * Within the period of time defined by this variable, the client will send | 98 | * Within the period of time defined by this variable, the client will send |
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e75..d4c935692ccf 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c | |||
@@ -237,9 +237,9 @@ static void bt_release(struct device *dev) | |||
237 | kfree(data); | 237 | kfree(data); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void add_conn(void *data) | 240 | static void add_conn(struct work_struct *work) |
241 | { | 241 | { |
242 | struct hci_conn *conn = data; | 242 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
243 | int i; | 243 | int i; |
244 | 244 | ||
245 | if (device_register(&conn->dev) < 0) { | 245 | if (device_register(&conn->dev) < 0) { |
@@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) | |||
272 | 272 | ||
273 | dev_set_drvdata(&conn->dev, conn); | 273 | dev_set_drvdata(&conn->dev, conn); |
274 | 274 | ||
275 | INIT_WORK(&conn->work, add_conn, (void *) conn); | 275 | INIT_WORK(&conn->work, add_conn); |
276 | 276 | ||
277 | schedule_work(&conn->work); | 277 | schedule_work(&conn->work); |
278 | } | 278 | } |
279 | 279 | ||
280 | static void del_conn(void *data) | 280 | static void del_conn(struct work_struct *work) |
281 | { | 281 | { |
282 | struct hci_conn *conn = data; | 282 | struct hci_conn *conn = container_of(work, struct hci_conn, work); |
283 | device_del(&conn->dev); | 283 | device_del(&conn->dev); |
284 | } | 284 | } |
285 | 285 | ||
@@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) | |||
287 | { | 287 | { |
288 | BT_DBG("conn %p", conn); | 288 | BT_DBG("conn %p", conn); |
289 | 289 | ||
290 | INIT_WORK(&conn->work, del_conn, (void *) conn); | 290 | INIT_WORK(&conn->work, del_conn); |
291 | 291 | ||
292 | schedule_work(&conn->work); | 292 | schedule_work(&conn->work); |
293 | } | 293 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d2..55bb2634c088 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) | |||
77 | * Called from work queue to allow for calling functions that | 77 | * Called from work queue to allow for calling functions that |
78 | * might sleep (such as speed check), and to debounce. | 78 | * might sleep (such as speed check), and to debounce. |
79 | */ | 79 | */ |
80 | static void port_carrier_check(void *arg) | 80 | static void port_carrier_check(struct work_struct *work) |
81 | { | 81 | { |
82 | struct net_device *dev = arg; | ||
83 | struct net_bridge_port *p; | 82 | struct net_bridge_port *p; |
83 | struct net_device *dev; | ||
84 | struct net_bridge *br; | 84 | struct net_bridge *br; |
85 | 85 | ||
86 | dev = container_of(work, struct net_bridge_port, | ||
87 | carrier_check.work)->dev; | ||
88 | work_release(work); | ||
89 | |||
86 | rtnl_lock(); | 90 | rtnl_lock(); |
87 | p = dev->br_port; | 91 | p = dev->br_port; |
88 | if (!p) | 92 | if (!p) |
@@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, | |||
276 | p->port_no = index; | 280 | p->port_no = index; |
277 | br_init_port(p); | 281 | br_init_port(p); |
278 | p->state = BR_STATE_DISABLED; | 282 | p->state = BR_STATE_DISABLED; |
279 | INIT_WORK(&p->carrier_check, port_carrier_check, dev); | 283 | INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); |
280 | br_stp_port_timer_init(p); | 284 | br_stp_port_timer_init(p); |
281 | 285 | ||
282 | kobject_init(&p->kobj); | 286 | kobject_init(&p->kobj); |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f256..3a534e94c7f3 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -82,7 +82,7 @@ struct net_bridge_port | |||
82 | struct timer_list hold_timer; | 82 | struct timer_list hold_timer; |
83 | struct timer_list message_age_timer; | 83 | struct timer_list message_age_timer; |
84 | struct kobject kobj; | 84 | struct kobject kobj; |
85 | struct work_struct carrier_check; | 85 | struct delayed_work carrier_check; |
86 | struct rcu_head rcu; | 86 | struct rcu_head rcu; |
87 | }; | 87 | }; |
88 | 88 | ||
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c5..549a2ce951b0 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -34,8 +34,8 @@ enum lw_bits { | |||
34 | static unsigned long linkwatch_flags; | 34 | static unsigned long linkwatch_flags; |
35 | static unsigned long linkwatch_nextevent; | 35 | static unsigned long linkwatch_nextevent; |
36 | 36 | ||
37 | static void linkwatch_event(void *dummy); | 37 | static void linkwatch_event(struct work_struct *dummy); |
38 | static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); | 38 | static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); |
39 | 39 | ||
40 | static LIST_HEAD(lweventlist); | 40 | static LIST_HEAD(lweventlist); |
41 | static DEFINE_SPINLOCK(lweventlist_lock); | 41 | static DEFINE_SPINLOCK(lweventlist_lock); |
@@ -127,7 +127,7 @@ void linkwatch_run_queue(void) | |||
127 | } | 127 | } |
128 | 128 | ||
129 | 129 | ||
130 | static void linkwatch_event(void *dummy) | 130 | static void linkwatch_event(struct work_struct *dummy) |
131 | { | 131 | { |
132 | /* Limit the number of linkwatch events to one | 132 | /* Limit the number of linkwatch events to one |
133 | * per second so that a runaway driver does not | 133 | * per second so that a runaway driver does not |
@@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) | |||
171 | unsigned long delay = linkwatch_nextevent - jiffies; | 171 | unsigned long delay = linkwatch_nextevent - jiffies; |
172 | 172 | ||
173 | /* If we wrap around we'll delay it by at most HZ. */ | 173 | /* If we wrap around we'll delay it by at most HZ. */ |
174 | if (!delay || delay > HZ) | 174 | if (delay > HZ) |
175 | schedule_work(&linkwatch_work); | 175 | delay = 0; |
176 | else | 176 | schedule_delayed_work(&linkwatch_work, delay); |
177 | schedule_delayed_work(&linkwatch_work, delay); | ||
178 | } | 177 | } |
179 | } | 178 | } |
180 | } | 179 | } |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 3c58846fcaa5..b3c559b9ac35 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -50,9 +50,10 @@ static atomic_t trapped; | |||
50 | static void zap_completion_queue(void); | 50 | static void zap_completion_queue(void); |
51 | static void arp_reply(struct sk_buff *skb); | 51 | static void arp_reply(struct sk_buff *skb); |
52 | 52 | ||
53 | static void queue_process(void *p) | 53 | static void queue_process(struct work_struct *work) |
54 | { | 54 | { |
55 | struct netpoll_info *npinfo = p; | 55 | struct netpoll_info *npinfo = |
56 | container_of(work, struct netpoll_info, tx_work.work); | ||
56 | struct sk_buff *skb; | 57 | struct sk_buff *skb; |
57 | 58 | ||
58 | while ((skb = skb_dequeue(&npinfo->txq))) { | 59 | while ((skb = skb_dequeue(&npinfo->txq))) { |
@@ -72,8 +73,6 @@ static void queue_process(void *p) | |||
72 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 73 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
73 | return; | 74 | return; |
74 | } | 75 | } |
75 | |||
76 | netif_tx_unlock_bh(dev); | ||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
@@ -263,7 +262,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | |||
263 | 262 | ||
264 | if (status != NETDEV_TX_OK) { | 263 | if (status != NETDEV_TX_OK) { |
265 | skb_queue_tail(&npinfo->txq, skb); | 264 | skb_queue_tail(&npinfo->txq, skb); |
266 | schedule_work(&npinfo->tx_work); | 265 | schedule_delayed_work(&npinfo->tx_work,0); |
267 | } | 266 | } |
268 | } | 267 | } |
269 | 268 | ||
@@ -628,7 +627,7 @@ int netpoll_setup(struct netpoll *np) | |||
628 | spin_lock_init(&npinfo->rx_lock); | 627 | spin_lock_init(&npinfo->rx_lock); |
629 | skb_queue_head_init(&npinfo->arp_tx); | 628 | skb_queue_head_init(&npinfo->arp_tx); |
630 | skb_queue_head_init(&npinfo->txq); | 629 | skb_queue_head_init(&npinfo->txq); |
631 | INIT_WORK(&npinfo->tx_work, queue_process, npinfo); | 630 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
632 | 631 | ||
633 | atomic_set(&npinfo->refcnt, 1); | 632 | atomic_set(&npinfo->refcnt, 1); |
634 | } else { | 633 | } else { |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 7b52f2a03eef..4c9e26775f72 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -32,8 +32,7 @@ struct inet_timewait_death_row dccp_death_row = { | |||
32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 32 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
33 | (unsigned long)&dccp_death_row), | 33 | (unsigned long)&dccp_death_row), |
34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, | 34 | .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, |
35 | inet_twdr_twkill_work, | 35 | inet_twdr_twkill_work), |
36 | &dccp_death_row), | ||
37 | /* Short-time timewait calendar */ | 36 | /* Short-time timewait calendar */ |
38 | 37 | ||
39 | .twcal_hand = -1, | 38 | .twcal_hand = -1, |
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971d..08386c102954 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c | |||
@@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft | |||
58 | } | 58 | } |
59 | 59 | ||
60 | void | 60 | void |
61 | ieee80211softmac_assoc_timeout(void *d) | 61 | ieee80211softmac_assoc_timeout(struct work_struct *work) |
62 | { | 62 | { |
63 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 63 | struct ieee80211softmac_device *mac = |
64 | container_of(work, struct ieee80211softmac_device, | ||
65 | associnfo.timeout.work); | ||
64 | struct ieee80211softmac_network *n; | 66 | struct ieee80211softmac_network *n; |
65 | 67 | ||
66 | mutex_lock(&mac->associnfo.mutex); | 68 | mutex_lock(&mac->associnfo.mutex); |
@@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void | |||
186 | 188 | ||
187 | /* This function is called to handle userspace requests (asynchronously) */ | 189 | /* This function is called to handle userspace requests (asynchronously) */ |
188 | void | 190 | void |
189 | ieee80211softmac_assoc_work(void *d) | 191 | ieee80211softmac_assoc_work(struct work_struct *work) |
190 | { | 192 | { |
191 | struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; | 193 | struct ieee80211softmac_device *mac = |
194 | container_of(work, struct ieee80211softmac_device, | ||
195 | associnfo.work.work); | ||
192 | struct ieee80211softmac_network *found = NULL; | 196 | struct ieee80211softmac_network *found = NULL; |
193 | struct ieee80211_network *net = NULL, *best = NULL; | 197 | struct ieee80211_network *net = NULL, *best = NULL; |
194 | int bssvalid; | 198 | int bssvalid; |
@@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, | |||
412 | network->authenticated = 0; | 416 | network->authenticated = 0; |
413 | /* we don't want to do this more than once ... */ | 417 | /* we don't want to do this more than once ... */ |
414 | network->auth_desynced_once = 1; | 418 | network->auth_desynced_once = 1; |
415 | schedule_work(&mac->associnfo.work); | 419 | schedule_delayed_work(&mac->associnfo.work, 0); |
416 | break; | 420 | break; |
417 | } | 421 | } |
418 | default: | 422 | default: |
@@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
446 | ieee80211softmac_disassoc(mac); | 450 | ieee80211softmac_disassoc(mac); |
447 | 451 | ||
448 | /* try to reassociate */ | 452 | /* try to reassociate */ |
449 | schedule_work(&mac->associnfo.work); | 453 | schedule_delayed_work(&mac->associnfo.work, 0); |
450 | 454 | ||
451 | return 0; | 455 | return 0; |
452 | } | 456 | } |
@@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, | |||
466 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); | 470 | dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); |
467 | return 0; | 471 | return 0; |
468 | } | 472 | } |
469 | schedule_work(&mac->associnfo.work); | 473 | schedule_delayed_work(&mac->associnfo.work, 0); |
470 | 474 | ||
471 | return 0; | 475 | return 0; |
472 | } | 476 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 0612015f1c78..6012705aa4f8 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "ieee80211softmac_priv.h" | 27 | #include "ieee80211softmac_priv.h" |
28 | 28 | ||
29 | static void ieee80211softmac_auth_queue(void *data); | 29 | static void ieee80211softmac_auth_queue(struct work_struct *work); |
30 | 30 | ||
31 | /* Queues an auth request to the desired AP */ | 31 | /* Queues an auth request to the desired AP */ |
32 | int | 32 | int |
@@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
54 | auth->mac = mac; | 54 | auth->mac = mac; |
55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; | 55 | auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; |
56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; | 56 | auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; |
57 | INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); | 57 | INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); |
58 | 58 | ||
59 | /* Lock (for list) */ | 59 | /* Lock (for list) */ |
60 | spin_lock_irqsave(&mac->lock, flags); | 60 | spin_lock_irqsave(&mac->lock, flags); |
61 | 61 | ||
62 | /* add to list */ | 62 | /* add to list */ |
63 | list_add_tail(&auth->list, &mac->auth_queue); | 63 | list_add_tail(&auth->list, &mac->auth_queue); |
64 | schedule_work(&auth->work); | 64 | schedule_delayed_work(&auth->work, 0); |
65 | spin_unlock_irqrestore(&mac->lock, flags); | 65 | spin_unlock_irqrestore(&mac->lock, flags); |
66 | 66 | ||
67 | return 0; | 67 | return 0; |
@@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, | |||
70 | 70 | ||
71 | /* Sends an auth request to the desired AP and handles timeouts */ | 71 | /* Sends an auth request to the desired AP and handles timeouts */ |
72 | static void | 72 | static void |
73 | ieee80211softmac_auth_queue(void *data) | 73 | ieee80211softmac_auth_queue(struct work_struct *work) |
74 | { | 74 | { |
75 | struct ieee80211softmac_device *mac; | 75 | struct ieee80211softmac_device *mac; |
76 | struct ieee80211softmac_auth_queue_item *auth; | 76 | struct ieee80211softmac_auth_queue_item *auth; |
77 | struct ieee80211softmac_network *net; | 77 | struct ieee80211softmac_network *net; |
78 | unsigned long flags; | 78 | unsigned long flags; |
79 | 79 | ||
80 | auth = (struct ieee80211softmac_auth_queue_item *)data; | 80 | auth = container_of(work, struct ieee80211softmac_auth_queue_item, |
81 | work.work); | ||
81 | net = auth->net; | 82 | net = auth->net; |
82 | mac = auth->mac; | 83 | mac = auth->mac; |
83 | 84 | ||
@@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) | |||
118 | 119 | ||
119 | /* Sends a response to an auth challenge (for shared key auth). */ | 120 | /* Sends a response to an auth challenge (for shared key auth). */ |
120 | static void | 121 | static void |
121 | ieee80211softmac_auth_challenge_response(void *_aq) | 122 | ieee80211softmac_auth_challenge_response(struct work_struct *work) |
122 | { | 123 | { |
123 | struct ieee80211softmac_auth_queue_item *aq = _aq; | 124 | struct ieee80211softmac_auth_queue_item *aq = |
125 | container_of(work, struct ieee80211softmac_auth_queue_item, | ||
126 | work.work); | ||
124 | 127 | ||
125 | /* Send our response */ | 128 | /* Send our response */ |
126 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); | 129 | ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); |
@@ -234,8 +237,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) | |||
234 | * we have obviously already sent the initial auth | 237 | * we have obviously already sent the initial auth |
235 | * request. */ | 238 | * request. */ |
236 | cancel_delayed_work(&aq->work); | 239 | cancel_delayed_work(&aq->work); |
237 | INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); | 240 | INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); |
238 | schedule_work(&aq->work); | 241 | schedule_delayed_work(&aq->work, 0); |
239 | spin_unlock_irqrestore(&mac->lock, flags); | 242 | spin_unlock_irqrestore(&mac->lock, flags); |
240 | return 0; | 243 | return 0; |
241 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: | 244 | case IEEE80211SOFTMAC_AUTH_SHARED_PASS: |
@@ -398,6 +401,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de | |||
398 | ieee80211softmac_deauth_from_net(mac, net); | 401 | ieee80211softmac_deauth_from_net(mac, net); |
399 | 402 | ||
400 | /* let's try to re-associate */ | 403 | /* let's try to re-associate */ |
401 | schedule_work(&mac->associnfo.work); | 404 | schedule_delayed_work(&mac->associnfo.work, 0); |
402 | return 0; | 405 | return 0; |
403 | } | 406 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666b..b9015656cfb3 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c | |||
@@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { | |||
73 | 73 | ||
74 | 74 | ||
75 | static void | 75 | static void |
76 | ieee80211softmac_notify_callback(void *d) | 76 | ieee80211softmac_notify_callback(struct work_struct *work) |
77 | { | 77 | { |
78 | struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; | 78 | struct ieee80211softmac_event *pevent = |
79 | kfree(d); | 79 | container_of(work, struct ieee80211softmac_event, work.work); |
80 | struct ieee80211softmac_event event = *pevent; | ||
81 | kfree(pevent); | ||
80 | 82 | ||
81 | event.fun(event.mac->dev, event.event_type, event.context); | 83 | event.fun(event.mac->dev, event.event_type, event.context); |
82 | } | 84 | } |
@@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, | |||
99 | return -ENOMEM; | 101 | return -ENOMEM; |
100 | 102 | ||
101 | eventptr->event_type = event; | 103 | eventptr->event_type = event; |
102 | INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); | 104 | INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); |
103 | eventptr->fun = fun; | 105 | eventptr->fun = fun; |
104 | eventptr->context = context; | 106 | eventptr->context = context; |
105 | eventptr->mac = mac; | 107 | eventptr->mac = mac; |
@@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve | |||
170 | /* User may have subscribed to ANY event, so | 172 | /* User may have subscribed to ANY event, so |
171 | * we tell them which event triggered it. */ | 173 | * we tell them which event triggered it. */ |
172 | eventptr->event_type = event; | 174 | eventptr->event_type = event; |
173 | schedule_work(&eventptr->work); | 175 | schedule_delayed_work(&eventptr->work, 0); |
174 | } | 176 | } |
175 | } | 177 | } |
176 | } | 178 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a471..256207b71dc9 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c | |||
@@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) | |||
58 | INIT_LIST_HEAD(&softmac->events); | 58 | INIT_LIST_HEAD(&softmac->events); |
59 | 59 | ||
60 | mutex_init(&softmac->associnfo.mutex); | 60 | mutex_init(&softmac->associnfo.mutex); |
61 | INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); | 61 | INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); |
62 | INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); | 62 | INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); |
63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; | 63 | softmac->start_scan = ieee80211softmac_start_scan_implementation; |
64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; | 64 | softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; |
65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; | 65 | softmac->stop_scan = ieee80211softmac_stop_scan_implementation; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a7..c0dbe070e548 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h | |||
@@ -78,7 +78,7 @@ | |||
78 | /* private definitions and prototypes */ | 78 | /* private definitions and prototypes */ |
79 | 79 | ||
80 | /*** prototypes from _scan.c */ | 80 | /*** prototypes from _scan.c */ |
81 | void ieee80211softmac_scan(void *sm); | 81 | void ieee80211softmac_scan(struct work_struct *work); |
82 | /* for internal use if scanning is needed */ | 82 | /* for internal use if scanning is needed */ |
83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); | 83 | int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); |
84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); | 84 | void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); |
@@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au | |||
149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); | 149 | int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); |
150 | 150 | ||
151 | /*** prototypes from _assoc.c */ | 151 | /*** prototypes from _assoc.c */ |
152 | void ieee80211softmac_assoc_work(void *d); | 152 | void ieee80211softmac_assoc_work(struct work_struct *work); |
153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, | 153 | int ieee80211softmac_handle_assoc_response(struct net_device * dev, |
154 | struct ieee80211_assoc_response * resp, | 154 | struct ieee80211_assoc_response * resp, |
155 | struct ieee80211_network * network); | 155 | struct ieee80211_network * network); |
@@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, | |||
157 | struct ieee80211_disassoc * disassoc); | 157 | struct ieee80211_disassoc * disassoc); |
158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, | 158 | int ieee80211softmac_handle_reassoc_req(struct net_device * dev, |
159 | struct ieee80211_reassoc_request * reassoc); | 159 | struct ieee80211_reassoc_request * reassoc); |
160 | void ieee80211softmac_assoc_timeout(void *d); | 160 | void ieee80211softmac_assoc_timeout(struct work_struct *work); |
161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); | 161 | void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); |
162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); | 162 | void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); |
163 | 163 | ||
@@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { | |||
207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ | 207 | struct ieee80211softmac_device *mac; /* SoftMAC device */ |
208 | u8 retry; /* Retry limit */ | 208 | u8 retry; /* Retry limit */ |
209 | u8 state; /* Auth State */ | 209 | u8 state; /* Auth State */ |
210 | struct work_struct work; /* Work queue */ | 210 | struct delayed_work work; /* Work queue */ |
211 | }; | 211 | }; |
212 | 212 | ||
213 | /* scanning information */ | 213 | /* scanning information */ |
@@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { | |||
219 | stop:1; | 219 | stop:1; |
220 | u8 skip_flags; | 220 | u8 skip_flags; |
221 | struct completion finished; | 221 | struct completion finished; |
222 | struct work_struct softmac_scan; | 222 | struct delayed_work softmac_scan; |
223 | struct ieee80211softmac_device *mac; | ||
223 | }; | 224 | }; |
224 | 225 | ||
225 | /* private event struct */ | 226 | /* private event struct */ |
@@ -227,7 +228,7 @@ struct ieee80211softmac_event { | |||
227 | struct list_head list; | 228 | struct list_head list; |
228 | int event_type; | 229 | int event_type; |
229 | void *event_context; | 230 | void *event_context; |
230 | struct work_struct work; | 231 | struct delayed_work work; |
231 | notify_function_ptr fun; | 232 | notify_function_ptr fun; |
232 | void *context; | 233 | void *context; |
233 | struct ieee80211softmac_device *mac; | 234 | struct ieee80211softmac_device *mac; |
diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index 5507feab32de..0c85d6c24cdb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c | |||
@@ -90,12 +90,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) | |||
90 | 90 | ||
91 | 91 | ||
92 | /* internal scanning implementation follows */ | 92 | /* internal scanning implementation follows */ |
93 | void ieee80211softmac_scan(void *d) | 93 | void ieee80211softmac_scan(struct work_struct *work) |
94 | { | 94 | { |
95 | int invalid_channel; | 95 | int invalid_channel; |
96 | u8 current_channel_idx; | 96 | u8 current_channel_idx; |
97 | struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; | 97 | struct ieee80211softmac_scaninfo *si = |
98 | struct ieee80211softmac_scaninfo *si = sm->scaninfo; | 98 | container_of(work, struct ieee80211softmac_scaninfo, |
99 | softmac_scan.work); | ||
100 | struct ieee80211softmac_device *sm = si->mac; | ||
99 | unsigned long flags; | 101 | unsigned long flags; |
100 | 102 | ||
101 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { | 103 | while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { |
@@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 | |||
146 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); | 148 | struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); |
147 | if (unlikely(!info)) | 149 | if (unlikely(!info)) |
148 | return NULL; | 150 | return NULL; |
149 | INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); | 151 | INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); |
152 | info->mac = mac; | ||
150 | init_completion(&info->finished); | 153 | init_completion(&info->finished); |
151 | return info; | 154 | return info; |
152 | } | 155 | } |
@@ -187,7 +190,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) | |||
187 | sm->scaninfo->started = 1; | 190 | sm->scaninfo->started = 1; |
188 | sm->scaninfo->stop = 0; | 191 | sm->scaninfo->stop = 0; |
189 | INIT_COMPLETION(sm->scaninfo->finished); | 192 | INIT_COMPLETION(sm->scaninfo->finished); |
190 | schedule_work(&sm->scaninfo->softmac_scan); | 193 | schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); |
191 | spin_unlock_irqrestore(&sm->lock, flags); | 194 | spin_unlock_irqrestore(&sm->lock, flags); |
192 | return 0; | 195 | return 0; |
193 | } | 196 | } |
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7d..2ffaebd21c53 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c | |||
@@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, | |||
122 | 122 | ||
123 | sm->associnfo.associating = 1; | 123 | sm->associnfo.associating = 1; |
124 | /* queue lower level code to do work (if necessary) */ | 124 | /* queue lower level code to do work (if necessary) */ |
125 | schedule_work(&sm->associnfo.work); | 125 | schedule_delayed_work(&sm->associnfo.work, 0); |
126 | out: | 126 | out: |
127 | mutex_unlock(&sm->associnfo.mutex); | 127 | mutex_unlock(&sm->associnfo.mutex); |
128 | 128 | ||
@@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
356 | /* force reassociation */ | 356 | /* force reassociation */ |
357 | mac->associnfo.bssvalid = 0; | 357 | mac->associnfo.bssvalid = 0; |
358 | if (mac->associnfo.associated) | 358 | if (mac->associnfo.associated) |
359 | schedule_work(&mac->associnfo.work); | 359 | schedule_delayed_work(&mac->associnfo.work, 0); |
360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { | 360 | } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { |
361 | /* the bssid we have is no longer fixed */ | 361 | /* the bssid we have is no longer fixed */ |
362 | mac->associnfo.bssfixed = 0; | 362 | mac->associnfo.bssfixed = 0; |
@@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, | |||
373 | /* tell the other code that this bssid should be used no matter what */ | 373 | /* tell the other code that this bssid should be used no matter what */ |
374 | mac->associnfo.bssfixed = 1; | 374 | mac->associnfo.bssfixed = 1; |
375 | /* queue associate if new bssid or (old one again and not associated) */ | 375 | /* queue associate if new bssid or (old one again and not associated) */ |
376 | schedule_work(&mac->associnfo.work); | 376 | schedule_delayed_work(&mac->associnfo.work, 0); |
377 | } | 377 | } |
378 | 378 | ||
379 | out: | 379 | out: |
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c61..8c74f9168b7d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); | |||
197 | 197 | ||
198 | extern void twkill_slots_invalid(void); | 198 | extern void twkill_slots_invalid(void); |
199 | 199 | ||
200 | void inet_twdr_twkill_work(void *data) | 200 | void inet_twdr_twkill_work(struct work_struct *work) |
201 | { | 201 | { |
202 | struct inet_timewait_death_row *twdr = data; | 202 | struct inet_timewait_death_row *twdr = |
203 | container_of(work, struct inet_timewait_death_row, twkill_work); | ||
203 | int i; | 204 | int i; |
204 | 205 | ||
205 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) | 206 | if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) |
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e4602..9b933381ebbe 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -221,10 +221,10 @@ static void update_defense_level(void) | |||
221 | * Timer for checking the defense | 221 | * Timer for checking the defense |
222 | */ | 222 | */ |
223 | #define DEFENSE_TIMER_PERIOD 1*HZ | 223 | #define DEFENSE_TIMER_PERIOD 1*HZ |
224 | static void defense_work_handler(void *data); | 224 | static void defense_work_handler(struct work_struct *work); |
225 | static DECLARE_WORK(defense_work, defense_work_handler, NULL); | 225 | static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); |
226 | 226 | ||
227 | static void defense_work_handler(void *data) | 227 | static void defense_work_handler(struct work_struct *work) |
228 | { | 228 | { |
229 | update_defense_level(); | 229 | update_defense_level(); |
230 | if (atomic_read(&ip_vs_dropentry)) | 230 | if (atomic_read(&ip_vs_dropentry)) |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6dddf59c1fb9..4a3889dd1943 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { | |||
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | 45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, |
46 | (unsigned long)&tcp_death_row), | 46 | (unsigned long)&tcp_death_row), |
47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | 47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, |
48 | inet_twdr_twkill_work, | 48 | inet_twdr_twkill_work), |
49 | &tcp_death_row), | ||
50 | /* Short-time timewait calendar */ | 49 | /* Short-time timewait calendar */ |
51 | 50 | ||
52 | .twcal_hand = -1, | 51 | .twcal_hand = -1, |
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad7..262bda808d96 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c | |||
@@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); | |||
61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); | 61 | static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); |
62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); | 62 | static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); |
63 | static void ircomm_tty_hangup(struct tty_struct *tty); | 63 | static void ircomm_tty_hangup(struct tty_struct *tty); |
64 | static void ircomm_tty_do_softint(void *private_); | 64 | static void ircomm_tty_do_softint(struct work_struct *work); |
65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); | 65 | static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); |
66 | static void ircomm_tty_stop(struct tty_struct *tty); | 66 | static void ircomm_tty_stop(struct tty_struct *tty); |
67 | 67 | ||
@@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
389 | self->flow = FLOW_STOP; | 389 | self->flow = FLOW_STOP; |
390 | 390 | ||
391 | self->line = line; | 391 | self->line = line; |
392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); | 392 | INIT_WORK(&self->tqueue, ircomm_tty_do_softint); |
393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; | 393 | self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; |
394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; | 394 | self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; |
395 | self->close_delay = 5*HZ/10; | 395 | self->close_delay = 5*HZ/10; |
@@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) | |||
594 | } | 594 | } |
595 | 595 | ||
596 | /* | 596 | /* |
597 | * Function ircomm_tty_do_softint (private_) | 597 | * Function ircomm_tty_do_softint (work) |
598 | * | 598 | * |
599 | * We use this routine to give the write wakeup to the user at at a | 599 | * We use this routine to give the write wakeup to the user at at a |
600 | * safe time (as fast as possible after write have completed). This | 600 | * safe time (as fast as possible after write have completed). This |
601 | * can be compared to the Tx interrupt. | 601 | * can be compared to the Tx interrupt. |
602 | */ | 602 | */ |
603 | static void ircomm_tty_do_softint(void *private_) | 603 | static void ircomm_tty_do_softint(struct work_struct *work) |
604 | { | 604 | { |
605 | struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; | 605 | struct ircomm_tty_cb *self = |
606 | container_of(work, struct ircomm_tty_cb, tqueue); | ||
606 | struct tty_struct *tty; | 607 | struct tty_struct *tty; |
607 | unsigned long flags; | 608 | unsigned long flags; |
608 | struct sk_buff *skb, *ctrl_skb; | 609 | struct sk_buff *skb, *ctrl_skb; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 39471d3b31b9..ad0057db0f91 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal functions. */ | 63 | /* Forward declarations for internal functions. */ |
64 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc); | 64 | static void sctp_assoc_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | 66 | ||
67 | /* 1st Level Abstractions. */ | 67 | /* 1st Level Abstractions. */ |
@@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
269 | 269 | ||
270 | /* Create an input queue. */ | 270 | /* Create an input queue. */ |
271 | sctp_inq_init(&asoc->base.inqueue); | 271 | sctp_inq_init(&asoc->base.inqueue); |
272 | sctp_inq_set_th_handler(&asoc->base.inqueue, | 272 | sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); |
273 | (void (*)(void *))sctp_assoc_bh_rcv, | ||
274 | asoc); | ||
275 | 273 | ||
276 | /* Create an output queue. */ | 274 | /* Create an output queue. */ |
277 | sctp_outq_init(asoc, &asoc->outqueue); | 275 | sctp_outq_init(asoc, &asoc->outqueue); |
@@ -946,8 +944,11 @@ out: | |||
946 | } | 944 | } |
947 | 945 | ||
948 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | 946 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ |
949 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc) | 947 | static void sctp_assoc_bh_rcv(struct work_struct *work) |
950 | { | 948 | { |
949 | struct sctp_association *asoc = | ||
950 | container_of(work, struct sctp_association, | ||
951 | base.inqueue.immediate); | ||
951 | struct sctp_endpoint *ep; | 952 | struct sctp_endpoint *ep; |
952 | struct sctp_chunk *chunk; | 953 | struct sctp_chunk *chunk; |
953 | struct sock *sk; | 954 | struct sock *sk; |
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 33a42e90c32f..129756908da4 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #include <net/sctp/sm.h> | 61 | #include <net/sctp/sm.h> |
62 | 62 | ||
63 | /* Forward declarations for internal helpers. */ | 63 | /* Forward declarations for internal helpers. */ |
64 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); | 64 | static void sctp_endpoint_bh_rcv(struct work_struct *work); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * Initialize the base fields of the endpoint structure. | 67 | * Initialize the base fields of the endpoint structure. |
@@ -89,8 +89,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | |||
89 | sctp_inq_init(&ep->base.inqueue); | 89 | sctp_inq_init(&ep->base.inqueue); |
90 | 90 | ||
91 | /* Set its top-half handler */ | 91 | /* Set its top-half handler */ |
92 | sctp_inq_set_th_handler(&ep->base.inqueue, | 92 | sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); |
93 | (void (*)(void *))sctp_endpoint_bh_rcv, ep); | ||
94 | 93 | ||
95 | /* Initialize the bind addr area */ | 94 | /* Initialize the bind addr area */ |
96 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | 95 | sctp_bind_addr_init(&ep->base.bind_addr, 0); |
@@ -318,8 +317,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | |||
318 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | 317 | /* Do delayed input processing. This is scheduled by sctp_rcv(). |
319 | * This may be called on BH or task time. | 318 | * This may be called on BH or task time. |
320 | */ | 319 | */ |
321 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) | 320 | static void sctp_endpoint_bh_rcv(struct work_struct *work) |
322 | { | 321 | { |
322 | struct sctp_endpoint *ep = | ||
323 | container_of(work, struct sctp_endpoint, | ||
324 | base.inqueue.immediate); | ||
323 | struct sctp_association *asoc; | 325 | struct sctp_association *asoc; |
324 | struct sock *sk; | 326 | struct sock *sk; |
325 | struct sctp_transport *transport; | 327 | struct sctp_transport *transport; |
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e849..71b07466e880 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c | |||
@@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) | |||
54 | queue->in_progress = NULL; | 54 | queue->in_progress = NULL; |
55 | 55 | ||
56 | /* Create a task for delivering data. */ | 56 | /* Create a task for delivering data. */ |
57 | INIT_WORK(&queue->immediate, NULL, NULL); | 57 | INIT_WORK(&queue->immediate, NULL); |
58 | 58 | ||
59 | queue->malloced = 0; | 59 | queue->malloced = 0; |
60 | } | 60 | } |
@@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) | |||
97 | * on the BH related data structures. | 97 | * on the BH related data structures. |
98 | */ | 98 | */ |
99 | list_add_tail(&chunk->list, &q->in_chunk_list); | 99 | list_add_tail(&chunk->list, &q->in_chunk_list); |
100 | q->immediate.func(q->immediate.data); | 100 | q->immediate.func(&q->immediate); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Extract a chunk from an SCTP inqueue. | 103 | /* Extract a chunk from an SCTP inqueue. |
@@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | |||
205 | * The intent is that this routine will pull stuff out of the | 205 | * The intent is that this routine will pull stuff out of the |
206 | * inqueue and process it. | 206 | * inqueue and process it. |
207 | */ | 207 | */ |
208 | void sctp_inq_set_th_handler(struct sctp_inq *q, | 208 | void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) |
209 | void (*callback)(void *), void *arg) | ||
210 | { | 209 | { |
211 | INIT_WORK(&q->immediate, callback, arg); | 210 | INIT_WORK(&q->immediate, callback); |
212 | } | 211 | } |
213 | 212 | ||
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece03..d96fd466a9a4 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; | |||
284 | static struct file_operations content_file_operations; | 284 | static struct file_operations content_file_operations; |
285 | static struct file_operations cache_flush_operations; | 285 | static struct file_operations cache_flush_operations; |
286 | 286 | ||
287 | static void do_cache_clean(void *data); | 287 | static void do_cache_clean(struct work_struct *work); |
288 | static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); | 288 | static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); |
289 | 289 | ||
290 | void cache_register(struct cache_detail *cd) | 290 | void cache_register(struct cache_detail *cd) |
291 | { | 291 | { |
@@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) | |||
337 | spin_unlock(&cache_list_lock); | 337 | spin_unlock(&cache_list_lock); |
338 | 338 | ||
339 | /* start the cleaning process */ | 339 | /* start the cleaning process */ |
340 | schedule_work(&cache_cleaner); | 340 | schedule_delayed_work(&cache_cleaner, 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | int cache_unregister(struct cache_detail *cd) | 343 | int cache_unregister(struct cache_detail *cd) |
@@ -461,7 +461,7 @@ static int cache_clean(void) | |||
461 | /* | 461 | /* |
462 | * We want to regularly clean the cache, so we need to schedule some work ... | 462 | * We want to regularly clean the cache, so we need to schedule some work ... |
463 | */ | 463 | */ |
464 | static void do_cache_clean(void *data) | 464 | static void do_cache_clean(struct work_struct *work) |
465 | { | 465 | { |
466 | int delay = 5; | 466 | int delay = 5; |
467 | if (cache_clean() == -1) | 467 | if (cache_clean() == -1) |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f90..49dba5febbbd 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, | |||
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | rpc_timeout_upcall_queue(void *data) | 57 | rpc_timeout_upcall_queue(struct work_struct *work) |
58 | { | 58 | { |
59 | LIST_HEAD(free_list); | 59 | LIST_HEAD(free_list); |
60 | struct rpc_inode *rpci = (struct rpc_inode *)data; | 60 | struct rpc_inode *rpci = |
61 | container_of(work, struct rpc_inode, queue_timeout.work); | ||
61 | struct inode *inode = &rpci->vfs_inode; | 62 | struct inode *inode = &rpci->vfs_inode; |
62 | void (*destroy_msg)(struct rpc_pipe_msg *); | 63 | void (*destroy_msg)(struct rpc_pipe_msg *); |
63 | 64 | ||
@@ -837,7 +838,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) | |||
837 | INIT_LIST_HEAD(&rpci->pipe); | 838 | INIT_LIST_HEAD(&rpci->pipe); |
838 | rpci->pipelen = 0; | 839 | rpci->pipelen = 0; |
839 | init_waitqueue_head(&rpci->waitq); | 840 | init_waitqueue_head(&rpci->waitq); |
840 | INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); | 841 | INIT_DELAYED_WORK(&rpci->queue_timeout, |
842 | rpc_timeout_upcall_queue); | ||
841 | rpci->ops = NULL; | 843 | rpci->ops = NULL; |
842 | } | 844 | } |
843 | } | 845 | } |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f9fd66b1d48b..18a33d327012 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; | |||
41 | 41 | ||
42 | static void __rpc_default_timer(struct rpc_task *task); | 42 | static void __rpc_default_timer(struct rpc_task *task); |
43 | static void rpciod_killall(void); | 43 | static void rpciod_killall(void); |
44 | static void rpc_async_schedule(void *); | 44 | static void rpc_async_schedule(struct work_struct *); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * RPC tasks sit here while waiting for conditions to improve. | 47 | * RPC tasks sit here while waiting for conditions to improve. |
@@ -323,7 +323,7 @@ static void rpc_make_runnable(struct rpc_task *task) | |||
323 | if (RPC_IS_ASYNC(task)) { | 323 | if (RPC_IS_ASYNC(task)) { |
324 | int status; | 324 | int status; |
325 | 325 | ||
326 | INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); | 326 | INIT_WORK(&task->u.tk_work, rpc_async_schedule); |
327 | status = queue_work(task->tk_workqueue, &task->u.tk_work); | 327 | status = queue_work(task->tk_workqueue, &task->u.tk_work); |
328 | if (status < 0) { | 328 | if (status < 0) { |
329 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); | 329 | printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); |
@@ -729,9 +729,9 @@ rpc_execute(struct rpc_task *task) | |||
729 | return __rpc_execute(task); | 729 | return __rpc_execute(task); |
730 | } | 730 | } |
731 | 731 | ||
732 | static void rpc_async_schedule(void *arg) | 732 | static void rpc_async_schedule(struct work_struct *work) |
733 | { | 733 | { |
734 | __rpc_execute((struct rpc_task *)arg); | 734 | __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); |
735 | } | 735 | } |
736 | 736 | ||
737 | /** | 737 | /** |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index f8ca0a93454c..7a3999f0a4a2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -477,9 +477,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
477 | return status; | 477 | return status; |
478 | } | 478 | } |
479 | 479 | ||
480 | static void xprt_autoclose(void *args) | 480 | static void xprt_autoclose(struct work_struct *work) |
481 | { | 481 | { |
482 | struct rpc_xprt *xprt = (struct rpc_xprt *)args; | 482 | struct rpc_xprt *xprt = |
483 | container_of(work, struct rpc_xprt, task_cleanup); | ||
483 | 484 | ||
484 | xprt_disconnect(xprt); | 485 | xprt_disconnect(xprt); |
485 | xprt->ops->close(xprt); | 486 | xprt->ops->close(xprt); |
@@ -916,7 +917,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si | |||
916 | 917 | ||
917 | INIT_LIST_HEAD(&xprt->free); | 918 | INIT_LIST_HEAD(&xprt->free); |
918 | INIT_LIST_HEAD(&xprt->recv); | 919 | INIT_LIST_HEAD(&xprt->recv); |
919 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); | 920 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
920 | init_timer(&xprt->timer); | 921 | init_timer(&xprt->timer); |
921 | xprt->timer.function = xprt_init_autodisconnect; | 922 | xprt->timer.function = xprt_init_autodisconnect; |
922 | xprt->timer.data = (unsigned long) xprt; | 923 | xprt->timer.data = (unsigned long) xprt; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 21438d7dc47b..3bb232eb5d90 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -235,7 +235,7 @@ struct sock_xprt { | |||
235 | /* | 235 | /* |
236 | * Connection of transports | 236 | * Connection of transports |
237 | */ | 237 | */ |
238 | struct work_struct connect_worker; | 238 | struct delayed_work connect_worker; |
239 | unsigned short port; | 239 | unsigned short port; |
240 | 240 | ||
241 | /* | 241 | /* |
@@ -1175,13 +1175,14 @@ static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) | |||
1175 | 1175 | ||
1176 | /** | 1176 | /** |
1177 | * xs_udp_connect_worker - set up a UDP socket | 1177 | * xs_udp_connect_worker - set up a UDP socket |
1178 | * @args: RPC transport to connect | 1178 | * @work: RPC transport to connect |
1179 | * | 1179 | * |
1180 | * Invoked by a work queue tasklet. | 1180 | * Invoked by a work queue tasklet. |
1181 | */ | 1181 | */ |
1182 | static void xs_udp_connect_worker(void *args) | 1182 | static void xs_udp_connect_worker(struct work_struct *work) |
1183 | { | 1183 | { |
1184 | struct sock_xprt *transport = (struct sock_xprt *)args; | 1184 | struct sock_xprt *transport = |
1185 | container_of(work, struct sock_xprt, connect_worker.work); | ||
1185 | struct rpc_xprt *xprt = &transport->xprt; | 1186 | struct rpc_xprt *xprt = &transport->xprt; |
1186 | struct socket *sock = transport->sock; | 1187 | struct socket *sock = transport->sock; |
1187 | int err, status = -EIO; | 1188 | int err, status = -EIO; |
@@ -1260,13 +1261,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1260 | 1261 | ||
1261 | /** | 1262 | /** |
1262 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint | 1263 | * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint |
1263 | * @args: RPC transport to connect | 1264 | * @work: RPC transport to connect |
1264 | * | 1265 | * |
1265 | * Invoked by a work queue tasklet. | 1266 | * Invoked by a work queue tasklet. |
1266 | */ | 1267 | */ |
1267 | static void xs_tcp_connect_worker(void *args) | 1268 | static void xs_tcp_connect_worker(struct work_struct *work) |
1268 | { | 1269 | { |
1269 | struct sock_xprt *transport = (struct sock_xprt *)args; | 1270 | struct sock_xprt *transport = |
1271 | container_of(work, struct sock_xprt, connect_worker.work); | ||
1270 | struct rpc_xprt *xprt = &transport->xprt; | 1272 | struct rpc_xprt *xprt = &transport->xprt; |
1271 | struct socket *sock = transport->sock; | 1273 | struct socket *sock = transport->sock; |
1272 | int err, status = -EIO; | 1274 | int err, status = -EIO; |
@@ -1380,7 +1382,7 @@ static void xs_connect(struct rpc_task *task) | |||
1380 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1382 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1381 | } else { | 1383 | } else { |
1382 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1384 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1383 | schedule_work(&transport->connect_worker); | 1385 | schedule_delayed_work(&transport->connect_worker, 0); |
1384 | 1386 | ||
1385 | /* flush_scheduled_work can sleep... */ | 1387 | /* flush_scheduled_work can sleep... */ |
1386 | if (!RPC_IS_ASYNC(task)) | 1388 | if (!RPC_IS_ASYNC(task)) |
@@ -1525,7 +1527,7 @@ struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1525 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1527 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1526 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1528 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1527 | 1529 | ||
1528 | INIT_WORK(&transport->connect_worker, xs_udp_connect_worker, transport); | 1530 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker); |
1529 | xprt->bind_timeout = XS_BIND_TO; | 1531 | xprt->bind_timeout = XS_BIND_TO; |
1530 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1532 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1531 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1533 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1569,7 +1571,7 @@ struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_ | |||
1569 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1571 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1570 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1572 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1571 | 1573 | ||
1572 | INIT_WORK(&transport->connect_worker, xs_tcp_connect_worker, transport); | 1574 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker); |
1573 | xprt->bind_timeout = XS_BIND_TO; | 1575 | xprt->bind_timeout = XS_BIND_TO; |
1574 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1576 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1575 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1577 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 64d3938f74c4..f6c77bd36fdd 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -392,7 +392,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) | |||
392 | xfrm_pol_put(policy); | 392 | xfrm_pol_put(policy); |
393 | } | 393 | } |
394 | 394 | ||
395 | static void xfrm_policy_gc_task(void *data) | 395 | static void xfrm_policy_gc_task(struct work_struct *work) |
396 | { | 396 | { |
397 | struct xfrm_policy *policy; | 397 | struct xfrm_policy *policy; |
398 | struct hlist_node *entry, *tmp; | 398 | struct hlist_node *entry, *tmp; |
@@ -580,7 +580,7 @@ static inline int xfrm_byidx_should_resize(int total) | |||
580 | 580 | ||
581 | static DEFINE_MUTEX(hash_resize_mutex); | 581 | static DEFINE_MUTEX(hash_resize_mutex); |
582 | 582 | ||
583 | static void xfrm_hash_resize(void *__unused) | 583 | static void xfrm_hash_resize(struct work_struct *__unused) |
584 | { | 584 | { |
585 | int dir, total; | 585 | int dir, total; |
586 | 586 | ||
@@ -597,7 +597,7 @@ static void xfrm_hash_resize(void *__unused) | |||
597 | mutex_unlock(&hash_resize_mutex); | 597 | mutex_unlock(&hash_resize_mutex); |
598 | } | 598 | } |
599 | 599 | ||
600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 600 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
601 | 601 | ||
602 | /* Generate new index... KAME seems to generate them ordered by cost | 602 | /* Generate new index... KAME seems to generate them ordered by cost |
603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ | 603 | * of an absolute inpredictability of ordering of rules. This will not pass. */ |
@@ -2116,7 +2116,7 @@ static void __init xfrm_policy_init(void) | |||
2116 | panic("XFRM: failed to allocate bydst hash\n"); | 2116 | panic("XFRM: failed to allocate bydst hash\n"); |
2117 | } | 2117 | } |
2118 | 2118 | ||
2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); | 2119 | INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); |
2120 | register_netdevice_notifier(&xfrm_dev_notifier); | 2120 | register_netdevice_notifier(&xfrm_dev_notifier); |
2121 | } | 2121 | } |
2122 | 2122 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 864962bbda90..da54a64ccfa3 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) | |||
115 | 115 | ||
116 | static DEFINE_MUTEX(hash_resize_mutex); | 116 | static DEFINE_MUTEX(hash_resize_mutex); |
117 | 117 | ||
118 | static void xfrm_hash_resize(void *__unused) | 118 | static void xfrm_hash_resize(struct work_struct *__unused) |
119 | { | 119 | { |
120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; | 120 | struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; |
121 | unsigned long nsize, osize; | 121 | unsigned long nsize, osize; |
@@ -168,7 +168,7 @@ out_unlock: | |||
168 | mutex_unlock(&hash_resize_mutex); | 168 | mutex_unlock(&hash_resize_mutex); |
169 | } | 169 | } |
170 | 170 | ||
171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); | 171 | static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); |
172 | 172 | ||
173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); | 173 | DECLARE_WAIT_QUEUE_HEAD(km_waitq); |
174 | EXPORT_SYMBOL(km_waitq); | 174 | EXPORT_SYMBOL(km_waitq); |
@@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) | |||
207 | kfree(x); | 207 | kfree(x); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void xfrm_state_gc_task(void *data) | 210 | static void xfrm_state_gc_task(struct work_struct *data) |
211 | { | 211 | { |
212 | struct xfrm_state *x; | 212 | struct xfrm_state *x; |
213 | struct hlist_node *entry, *tmp; | 213 | struct hlist_node *entry, *tmp; |
@@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) | |||
1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); | 1568 | panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); |
1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); | 1569 | xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); |
1570 | 1570 | ||
1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); | 1571 | INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); |
1572 | } | 1572 | } |
1573 | 1573 | ||
diff --git a/security/keys/key.c b/security/keys/key.c index 80de8c3e9cc3..70eacbe5abde 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -30,8 +30,8 @@ DEFINE_SPINLOCK(key_user_lock); | |||
30 | static LIST_HEAD(key_types_list); | 30 | static LIST_HEAD(key_types_list); |
31 | static DECLARE_RWSEM(key_types_sem); | 31 | static DECLARE_RWSEM(key_types_sem); |
32 | 32 | ||
33 | static void key_cleanup(void *data); | 33 | static void key_cleanup(struct work_struct *work); |
34 | static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL); | 34 | static DECLARE_WORK(key_cleanup_task, key_cleanup); |
35 | 35 | ||
36 | /* we serialise key instantiation and link */ | 36 | /* we serialise key instantiation and link */ |
37 | DECLARE_RWSEM(key_construction_sem); | 37 | DECLARE_RWSEM(key_construction_sem); |
@@ -552,7 +552,7 @@ EXPORT_SYMBOL(key_negate_and_link); | |||
552 | * do cleaning up in process context so that we don't have to disable | 552 | * do cleaning up in process context so that we don't have to disable |
553 | * interrupts all over the place | 553 | * interrupts all over the place |
554 | */ | 554 | */ |
555 | static void key_cleanup(void *data) | 555 | static void key_cleanup(struct work_struct *work) |
556 | { | 556 | { |
557 | struct rb_node *_n; | 557 | struct rb_node *_n; |
558 | struct key *key; | 558 | struct key *key; |
diff --git a/sound/aoa/aoa-gpio.h b/sound/aoa/aoa-gpio.h index 3a61f3115573..ee64f5de8966 100644 --- a/sound/aoa/aoa-gpio.h +++ b/sound/aoa/aoa-gpio.h | |||
@@ -59,10 +59,10 @@ struct gpio_methods { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct gpio_notification { | 61 | struct gpio_notification { |
62 | struct delayed_work work; | ||
62 | notify_func_t notify; | 63 | notify_func_t notify; |
63 | void *data; | 64 | void *data; |
64 | void *gpio_private; | 65 | void *gpio_private; |
65 | struct work_struct work; | ||
66 | struct mutex mutex; | 66 | struct mutex mutex; |
67 | }; | 67 | }; |
68 | 68 | ||
diff --git a/sound/aoa/core/snd-aoa-gpio-feature.c b/sound/aoa/core/snd-aoa-gpio-feature.c index 40eb47eccf9a..2b03bc798bcb 100644 --- a/sound/aoa/core/snd-aoa-gpio-feature.c +++ b/sound/aoa/core/snd-aoa-gpio-feature.c | |||
@@ -195,9 +195,10 @@ static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt) | |||
195 | ftr_gpio_set_lineout(rt, (s>>2)&1); | 195 | ftr_gpio_set_lineout(rt, (s>>2)&1); |
196 | } | 196 | } |
197 | 197 | ||
198 | static void ftr_handle_notify(void *data) | 198 | static void ftr_handle_notify(struct work_struct *work) |
199 | { | 199 | { |
200 | struct gpio_notification *notif = data; | 200 | struct gpio_notification *notif = |
201 | container_of(work, struct gpio_notification, work.work); | ||
201 | 202 | ||
202 | mutex_lock(¬if->mutex); | 203 | mutex_lock(¬if->mutex); |
203 | if (notif->notify) | 204 | if (notif->notify) |
@@ -253,12 +254,9 @@ static void ftr_gpio_init(struct gpio_runtime *rt) | |||
253 | 254 | ||
254 | ftr_gpio_all_amps_off(rt); | 255 | ftr_gpio_all_amps_off(rt); |
255 | rt->implementation_private = 0; | 256 | rt->implementation_private = 0; |
256 | INIT_WORK(&rt->headphone_notify.work, ftr_handle_notify, | 257 | INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); |
257 | &rt->headphone_notify); | 258 | INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); |
258 | INIT_WORK(&rt->line_in_notify.work, ftr_handle_notify, | 259 | INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); |
259 | &rt->line_in_notify); | ||
260 | INIT_WORK(&rt->line_out_notify.work, ftr_handle_notify, | ||
261 | &rt->line_out_notify); | ||
262 | mutex_init(&rt->headphone_notify.mutex); | 260 | mutex_init(&rt->headphone_notify.mutex); |
263 | mutex_init(&rt->line_in_notify.mutex); | 261 | mutex_init(&rt->line_in_notify.mutex); |
264 | mutex_init(&rt->line_out_notify.mutex); | 262 | mutex_init(&rt->line_out_notify.mutex); |
@@ -287,7 +285,7 @@ static irqreturn_t ftr_handle_notify_irq(int xx, void *data) | |||
287 | { | 285 | { |
288 | struct gpio_notification *notif = data; | 286 | struct gpio_notification *notif = data; |
289 | 287 | ||
290 | schedule_work(¬if->work); | 288 | schedule_delayed_work(¬if->work, 0); |
291 | 289 | ||
292 | return IRQ_HANDLED; | 290 | return IRQ_HANDLED; |
293 | } | 291 | } |
diff --git a/sound/aoa/core/snd-aoa-gpio-pmf.c b/sound/aoa/core/snd-aoa-gpio-pmf.c index 2836c3218391..5ca2220eac7d 100644 --- a/sound/aoa/core/snd-aoa-gpio-pmf.c +++ b/sound/aoa/core/snd-aoa-gpio-pmf.c | |||
@@ -69,9 +69,10 @@ static void pmf_gpio_all_amps_restore(struct gpio_runtime *rt) | |||
69 | pmf_gpio_set_lineout(rt, (s>>2)&1); | 69 | pmf_gpio_set_lineout(rt, (s>>2)&1); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void pmf_handle_notify(void *data) | 72 | static void pmf_handle_notify(struct work_struct *work) |
73 | { | 73 | { |
74 | struct gpio_notification *notif = data; | 74 | struct gpio_notification *notif = |
75 | container_of(work, struct gpio_notification, work.work); | ||
75 | 76 | ||
76 | mutex_lock(¬if->mutex); | 77 | mutex_lock(¬if->mutex); |
77 | if (notif->notify) | 78 | if (notif->notify) |
@@ -83,12 +84,9 @@ static void pmf_gpio_init(struct gpio_runtime *rt) | |||
83 | { | 84 | { |
84 | pmf_gpio_all_amps_off(rt); | 85 | pmf_gpio_all_amps_off(rt); |
85 | rt->implementation_private = 0; | 86 | rt->implementation_private = 0; |
86 | INIT_WORK(&rt->headphone_notify.work, pmf_handle_notify, | 87 | INIT_DELAYED_WORK(&rt->headphone_notify.work, pmf_handle_notify); |
87 | &rt->headphone_notify); | 88 | INIT_DELAYED_WORK(&rt->line_in_notify.work, pmf_handle_notify); |
88 | INIT_WORK(&rt->line_in_notify.work, pmf_handle_notify, | 89 | INIT_DELAYED_WORK(&rt->line_out_notify.work, pmf_handle_notify); |
89 | &rt->line_in_notify); | ||
90 | INIT_WORK(&rt->line_out_notify.work, pmf_handle_notify, | ||
91 | &rt->line_out_notify); | ||
92 | mutex_init(&rt->headphone_notify.mutex); | 90 | mutex_init(&rt->headphone_notify.mutex); |
93 | mutex_init(&rt->line_in_notify.mutex); | 91 | mutex_init(&rt->line_in_notify.mutex); |
94 | mutex_init(&rt->line_out_notify.mutex); | 92 | mutex_init(&rt->line_out_notify.mutex); |
@@ -129,7 +127,7 @@ static void pmf_handle_notify_irq(void *data) | |||
129 | { | 127 | { |
130 | struct gpio_notification *notif = data; | 128 | struct gpio_notification *notif = data; |
131 | 129 | ||
132 | schedule_work(¬if->work); | 130 | schedule_delayed_work(¬if->work, 0); |
133 | } | 131 | } |
134 | 132 | ||
135 | static int pmf_set_notify(struct gpio_runtime *rt, | 133 | static int pmf_set_notify(struct gpio_runtime *rt, |
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c index 12ffffc9e814..d2f2c5078e65 100644 --- a/sound/i2c/other/ak4114.c +++ b/sound/i2c/other/ak4114.c | |||
@@ -35,7 +35,7 @@ MODULE_LICENSE("GPL"); | |||
35 | 35 | ||
36 | #define AK4114_ADDR 0x00 /* fixed address */ | 36 | #define AK4114_ADDR 0x00 /* fixed address */ |
37 | 37 | ||
38 | static void ak4114_stats(void *); | 38 | static void ak4114_stats(struct work_struct *work); |
39 | 39 | ||
40 | static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) | 40 | static void reg_write(struct ak4114 *ak4114, unsigned char reg, unsigned char val) |
41 | { | 41 | { |
@@ -158,7 +158,7 @@ void snd_ak4114_reinit(struct ak4114 *chip) | |||
158 | reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); | 158 | reg_write(chip, AK4114_REG_PWRDN, old | AK4114_RST | AK4114_PWN); |
159 | /* bring up statistics / event queing */ | 159 | /* bring up statistics / event queing */ |
160 | chip->init = 0; | 160 | chip->init = 0; |
161 | INIT_WORK(&chip->work, ak4114_stats, chip); | 161 | INIT_DELAYED_WORK(&chip->work, ak4114_stats); |
162 | queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); | 162 | queue_delayed_work(chip->workqueue, &chip->work, HZ / 10); |
163 | } | 163 | } |
164 | 164 | ||
@@ -561,9 +561,9 @@ int snd_ak4114_check_rate_and_errors(struct ak4114 *ak4114, unsigned int flags) | |||
561 | return res; | 561 | return res; |
562 | } | 562 | } |
563 | 563 | ||
564 | static void ak4114_stats(void *data) | 564 | static void ak4114_stats(struct work_struct *work) |
565 | { | 565 | { |
566 | struct ak4114 *chip = (struct ak4114 *)data; | 566 | struct ak4114 *chip = container_of(work, struct ak4114, work.work); |
567 | 567 | ||
568 | if (chip->init) | 568 | if (chip->init) |
569 | return; | 569 | return; |
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 6577b2325357..7abcb10b2754 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c | |||
@@ -1927,9 +1927,10 @@ static int snd_ac97_dev_disconnect(struct snd_device *device) | |||
1927 | static struct snd_ac97_build_ops null_build_ops; | 1927 | static struct snd_ac97_build_ops null_build_ops; |
1928 | 1928 | ||
1929 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 1929 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
1930 | static void do_update_power(void *data) | 1930 | static void do_update_power(struct work_struct *work) |
1931 | { | 1931 | { |
1932 | update_power_regs(data); | 1932 | update_power_regs( |
1933 | container_of(work, struct snd_ac97, power_work.work)); | ||
1933 | } | 1934 | } |
1934 | #endif | 1935 | #endif |
1935 | 1936 | ||
@@ -1989,7 +1990,7 @@ int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template, | |||
1989 | mutex_init(&ac97->page_mutex); | 1990 | mutex_init(&ac97->page_mutex); |
1990 | #ifdef CONFIG_SND_AC97_POWER_SAVE | 1991 | #ifdef CONFIG_SND_AC97_POWER_SAVE |
1991 | ac97->power_workq = create_workqueue("ac97"); | 1992 | ac97->power_workq = create_workqueue("ac97"); |
1992 | INIT_WORK(&ac97->power_work, do_update_power, ac97); | 1993 | INIT_DELAYED_WORK(&ac97->power_work, do_update_power); |
1993 | #endif | 1994 | #endif |
1994 | 1995 | ||
1995 | #ifdef CONFIG_PCI | 1996 | #ifdef CONFIG_PCI |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 9c3d7ac08068..71482c15a852 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -272,10 +272,11 @@ EXPORT_SYMBOL(snd_hda_queue_unsol_event); | |||
272 | /* | 272 | /* |
273 | * process queueud unsolicited events | 273 | * process queueud unsolicited events |
274 | */ | 274 | */ |
275 | static void process_unsol_events(void *data) | 275 | static void process_unsol_events(struct work_struct *work) |
276 | { | 276 | { |
277 | struct hda_bus *bus = data; | 277 | struct hda_bus_unsolicited *unsol = |
278 | struct hda_bus_unsolicited *unsol = bus->unsol; | 278 | container_of(work, struct hda_bus_unsolicited, work); |
279 | struct hda_bus *bus = unsol->bus; | ||
279 | struct hda_codec *codec; | 280 | struct hda_codec *codec; |
280 | unsigned int rp, caddr, res; | 281 | unsigned int rp, caddr, res; |
281 | 282 | ||
@@ -314,7 +315,8 @@ static int init_unsol_queue(struct hda_bus *bus) | |||
314 | kfree(unsol); | 315 | kfree(unsol); |
315 | return -ENOMEM; | 316 | return -ENOMEM; |
316 | } | 317 | } |
317 | INIT_WORK(&unsol->work, process_unsol_events, bus); | 318 | INIT_WORK(&unsol->work, process_unsol_events); |
319 | unsol->bus = bus; | ||
318 | bus->unsol = unsol; | 320 | bus->unsol = unsol; |
319 | return 0; | 321 | return 0; |
320 | } | 322 | } |
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h index f9416c36396e..9ca1baf860bd 100644 --- a/sound/pci/hda/hda_local.h +++ b/sound/pci/hda/hda_local.h | |||
@@ -206,6 +206,7 @@ struct hda_bus_unsolicited { | |||
206 | /* workqueue */ | 206 | /* workqueue */ |
207 | struct workqueue_struct *workq; | 207 | struct workqueue_struct *workq; |
208 | struct work_struct work; | 208 | struct work_struct work; |
209 | struct hda_bus *bus; | ||
209 | }; | 210 | }; |
210 | 211 | ||
211 | /* | 212 | /* |
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c index 2fbe1d183fce..8f074c7936e6 100644 --- a/sound/ppc/tumbler.c +++ b/sound/ppc/tumbler.c | |||
@@ -942,10 +942,11 @@ static void check_mute(struct snd_pmac *chip, struct pmac_gpio *gp, int val, int | |||
942 | } | 942 | } |
943 | 943 | ||
944 | static struct work_struct device_change; | 944 | static struct work_struct device_change; |
945 | static struct snd_pmac *device_change_chip; | ||
945 | 946 | ||
946 | static void device_change_handler(void *self) | 947 | static void device_change_handler(struct work_struct *work) |
947 | { | 948 | { |
948 | struct snd_pmac *chip = self; | 949 | struct snd_pmac *chip = device_change_chip; |
949 | struct pmac_tumbler *mix; | 950 | struct pmac_tumbler *mix; |
950 | int headphone, lineout; | 951 | int headphone, lineout; |
951 | 952 | ||
@@ -1417,7 +1418,8 @@ int __init snd_pmac_tumbler_init(struct snd_pmac *chip) | |||
1417 | chip->resume = tumbler_resume; | 1418 | chip->resume = tumbler_resume; |
1418 | #endif | 1419 | #endif |
1419 | 1420 | ||
1420 | INIT_WORK(&device_change, device_change_handler, (void *)chip); | 1421 | INIT_WORK(&device_change, device_change_handler); |
1422 | device_change_chip = chip; | ||
1421 | 1423 | ||
1422 | #ifdef PMAC_SUPPORT_AUTOMUTE | 1424 | #ifdef PMAC_SUPPORT_AUTOMUTE |
1423 | if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) | 1425 | if ((mix->headphone_irq >=0 || mix->lineout_irq >= 0) |