diff options
author | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-10-12 09:44:33 -0400 |
---|---|---|
committer | Haavard Skinnemoen <haavard.skinnemoen@atmel.com> | 2008-10-12 09:44:33 -0400 |
commit | 0d62950125241a6e6db8e8f14271f098ec7a2da4 (patch) | |
tree | 8cdd9e17f6a6ff4cb6166ad12a4d3ed1d45b2dc9 | |
parent | b3bc2c5562f06ca34b30f61c5714e96490946c81 (diff) | |
parent | 5e7184ae0dd49456387e8b1cdebc6b2c92fc6d51 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/hskinnemoen/atmel-mci-2.6.28
231 files changed, 3590 insertions, 1861 deletions
diff --git a/Documentation/DMA-mapping.txt b/Documentation/DMA-mapping.txt index b463ecd0c7ce..c74fec8c2351 100644 --- a/Documentation/DMA-mapping.txt +++ b/Documentation/DMA-mapping.txt | |||
@@ -740,7 +740,7 @@ failure can be determined by: | |||
740 | dma_addr_t dma_handle; | 740 | dma_addr_t dma_handle; |
741 | 741 | ||
742 | dma_handle = pci_map_single(pdev, addr, size, direction); | 742 | dma_handle = pci_map_single(pdev, addr, size, direction); |
743 | if (pci_dma_mapping_error(dma_handle)) { | 743 | if (pci_dma_mapping_error(pdev, dma_handle)) { |
744 | /* | 744 | /* |
745 | * reduce current DMA mapping usage, | 745 | * reduce current DMA mapping usage, |
746 | * delay and try again later or | 746 | * delay and try again later or |
diff --git a/Documentation/HOWTO b/Documentation/HOWTO index c2371c5a98f9..48a3955f05fc 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO | |||
@@ -77,7 +77,8 @@ documentation files are also added which explain how to use the feature. | |||
77 | When a kernel change causes the interface that the kernel exposes to | 77 | When a kernel change causes the interface that the kernel exposes to |
78 | userspace to change, it is recommended that you send the information or | 78 | userspace to change, it is recommended that you send the information or |
79 | a patch to the manual pages explaining the change to the manual pages | 79 | a patch to the manual pages explaining the change to the manual pages |
80 | maintainer at mtk.manpages@gmail.com. | 80 | maintainer at mtk.manpages@gmail.com, and CC the list |
81 | linux-api@vger.kernel.org. | ||
81 | 82 | ||
82 | Here is a list of files that are in the kernel source tree that are | 83 | Here is a list of files that are in the kernel source tree that are |
83 | required reading: | 84 | required reading: |
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist index da10e0714241..21f0795af20f 100644 --- a/Documentation/SubmitChecklist +++ b/Documentation/SubmitChecklist | |||
@@ -67,6 +67,8 @@ kernel patches. | |||
67 | 67 | ||
68 | 19: All new userspace interfaces are documented in Documentation/ABI/. | 68 | 19: All new userspace interfaces are documented in Documentation/ABI/. |
69 | See Documentation/ABI/README for more information. | 69 | See Documentation/ABI/README for more information. |
70 | Patches that change userspace interfaces should be CCed to | ||
71 | linux-api@vger.kernel.org. | ||
70 | 72 | ||
71 | 20: Check that it all passes `make headers_check'. | 73 | 20: Check that it all passes `make headers_check'. |
72 | 74 | ||
diff --git a/Documentation/ioctl/cdrom.txt b/Documentation/ioctl/cdrom.txt index 62d4af44ec4a..59df81c8da2b 100644 --- a/Documentation/ioctl/cdrom.txt +++ b/Documentation/ioctl/cdrom.txt | |||
@@ -271,14 +271,14 @@ CDROMCLOSETRAY pendant of CDROMEJECT | |||
271 | 271 | ||
272 | usage: | 272 | usage: |
273 | 273 | ||
274 | ioctl(fd, CDROMEJECT, 0); | 274 | ioctl(fd, CDROMCLOSETRAY, 0); |
275 | 275 | ||
276 | inputs: none | 276 | inputs: none |
277 | 277 | ||
278 | outputs: none | 278 | outputs: none |
279 | 279 | ||
280 | error returns: | 280 | error returns: |
281 | ENOSYS cd drive not capable of ejecting | 281 | ENOSYS cd drive not capable of closing the tray |
282 | EBUSY other processes are accessing drive, or door is locked | 282 | EBUSY other processes are accessing drive, or door is locked |
283 | 283 | ||
284 | notes: | 284 | notes: |
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 276a7e637822..e1ff0d920a5c 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -351,9 +351,10 @@ kernel. This value defaults to SHMMAX. | |||
351 | 351 | ||
352 | softlockup_thresh: | 352 | softlockup_thresh: |
353 | 353 | ||
354 | This value can be used to lower the softlockup tolerance | 354 | This value can be used to lower the softlockup tolerance threshold. The |
355 | threshold. The default threshold is 10s. If a cpu is locked up | 355 | default threshold is 60 seconds. If a cpu is locked up for 60 seconds, |
356 | for 10s, the kernel complains. Valid values are 1-60s. | 356 | the kernel complains. Valid values are 1-60 seconds. Setting this |
357 | tunable to zero will disable the softlockup detection altogether. | ||
357 | 358 | ||
358 | ============================================================== | 359 | ============================================================== |
359 | 360 | ||
diff --git a/Documentation/usb/anchors.txt b/Documentation/usb/anchors.txt index 7304bcf5a306..5e6b64c20d25 100644 --- a/Documentation/usb/anchors.txt +++ b/Documentation/usb/anchors.txt | |||
@@ -42,9 +42,21 @@ This function kills all URBs associated with an anchor. The URBs | |||
42 | are called in the reverse temporal order they were submitted. | 42 | are called in the reverse temporal order they were submitted. |
43 | This way no data can be reordered. | 43 | This way no data can be reordered. |
44 | 44 | ||
45 | usb_unlink_anchored_urbs() | ||
46 | -------------------------- | ||
47 | |||
48 | This function unlinks all URBs associated with an anchor. The URBs | ||
49 | are processed in the reverse temporal order they were submitted. | ||
50 | This is similar to usb_kill_anchored_urbs(), but it will not sleep. | ||
51 | Therefore no guarantee is made that the URBs have been unlinked when | ||
52 | the call returns. They may be unlinked later but will be unlinked in | ||
53 | finite time. | ||
54 | |||
45 | usb_wait_anchor_empty_timeout() | 55 | usb_wait_anchor_empty_timeout() |
46 | ------------------------------- | 56 | ------------------------------- |
47 | 57 | ||
48 | This function waits for all URBs associated with an anchor to finish | 58 | This function waits for all URBs associated with an anchor to finish |
49 | or a timeout, whichever comes first. Its return value will tell you | 59 | or a timeout, whichever comes first. Its return value will tell you |
50 | whether the timeout was reached. | 60 | whether the timeout was reached. |
61 | |||
62 | |||
diff --git a/MAINTAINERS b/MAINTAINERS index cad81a24e832..8dae4555f10e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1198,9 +1198,7 @@ M: hpa@zytor.com | |||
1198 | S: Maintained | 1198 | S: Maintained |
1199 | 1199 | ||
1200 | CPUSETS | 1200 | CPUSETS |
1201 | P: Paul Jackson | ||
1202 | P: Paul Menage | 1201 | P: Paul Menage |
1203 | M: pj@sgi.com | ||
1204 | M: menage@google.com | 1202 | M: menage@google.com |
1205 | L: linux-kernel@vger.kernel.org | 1203 | L: linux-kernel@vger.kernel.org |
1206 | W: http://www.bullopensource.org/cpuset/ | 1204 | W: http://www.bullopensource.org/cpuset/ |
@@ -1984,7 +1982,7 @@ S: Maintained | |||
1984 | I2C/SMBUS STUB DRIVER | 1982 | I2C/SMBUS STUB DRIVER |
1985 | P: Mark M. Hoffman | 1983 | P: Mark M. Hoffman |
1986 | M: mhoffman@lightlink.com | 1984 | M: mhoffman@lightlink.com |
1987 | L: lm-sensors@lm-sensors.org | 1985 | L: i2c@lm-sensors.org |
1988 | S: Maintained | 1986 | S: Maintained |
1989 | 1987 | ||
1990 | I2C SUBSYSTEM | 1988 | I2C SUBSYSTEM |
@@ -2706,6 +2704,7 @@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 | |||
2706 | P: Michael Kerrisk | 2704 | P: Michael Kerrisk |
2707 | M: mtk.manpages@gmail.com | 2705 | M: mtk.manpages@gmail.com |
2708 | W: http://www.kernel.org/doc/man-pages | 2706 | W: http://www.kernel.org/doc/man-pages |
2707 | L: linux-man@vger.kernel.org | ||
2709 | S: Supported | 2708 | S: Supported |
2710 | 2709 | ||
2711 | MARVELL LIBERTAS WIRELESS DRIVER | 2710 | MARVELL LIBERTAS WIRELESS DRIVER |
@@ -3726,7 +3725,7 @@ S: Maintained | |||
3726 | SIS 96X I2C/SMBUS DRIVER | 3725 | SIS 96X I2C/SMBUS DRIVER |
3727 | P: Mark M. Hoffman | 3726 | P: Mark M. Hoffman |
3728 | M: mhoffman@lightlink.com | 3727 | M: mhoffman@lightlink.com |
3729 | L: lm-sensors@lm-sensors.org | 3728 | L: i2c@lm-sensors.org |
3730 | S: Maintained | 3729 | S: Maintained |
3731 | 3730 | ||
3732 | SIS FRAMEBUFFER DRIVER | 3731 | SIS FRAMEBUFFER DRIVER |
@@ -3833,11 +3832,12 @@ S: Maintained | |||
3833 | 3832 | ||
3834 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT | 3833 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT |
3835 | P: Liam Girdwood | 3834 | P: Liam Girdwood |
3836 | M: liam.girdwood@wolfsonmicro.com | 3835 | M: lrg@slimlogic.co.uk |
3837 | P: Mark Brown | 3836 | P: Mark Brown |
3838 | M: broonie@opensource.wolfsonmicro.com | 3837 | M: broonie@opensource.wolfsonmicro.com |
3839 | T: git opensource.wolfsonmicro.com/linux-2.6-asoc | 3838 | T: git opensource.wolfsonmicro.com/linux-2.6-asoc |
3840 | L: alsa-devel@alsa-project.org (subscribers-only) | 3839 | L: alsa-devel@alsa-project.org (subscribers-only) |
3840 | W: http://alsa-project.org/main/index.php/ASoC | ||
3841 | S: Supported | 3841 | S: Supported |
3842 | 3842 | ||
3843 | SPI SUBSYSTEM | 3843 | SPI SUBSYSTEM |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 27 | 3 | SUBLEVEL = 27 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = -rc8 |
5 | NAME = Rotary Wombat | 5 | NAME = Rotary Wombat |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index aaffaecffcd1..ba8ccfede964 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -111,8 +111,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, | |||
111 | case 'D': | 111 | case 'D': |
112 | case 'k': | 112 | case 'k': |
113 | case 'c': | 113 | case 'c': |
114 | kgdb_contthread = NULL; | ||
115 | |||
116 | /* | 114 | /* |
117 | * Try to read optional parameter, pc unchanged if no parm. | 115 | * Try to read optional parameter, pc unchanged if no parm. |
118 | * If this was a compiled breakpoint, we need to move | 116 | * If this was a compiled breakpoint, we need to move |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 67e18509d7bf..b0d6b32654cf 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c | |||
@@ -17,9 +17,9 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/clockchips.h> | 18 | #include <linux/clockchips.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/cnt32_to_63.h> | ||
20 | 21 | ||
21 | #include <asm/div64.h> | 22 | #include <asm/div64.h> |
22 | #include <asm/cnt32_to_63.h> | ||
23 | #include <asm/mach/irq.h> | 23 | #include <asm/mach/irq.h> |
24 | #include <asm/mach/time.h> | 24 | #include <asm/mach/time.h> |
25 | #include <mach/pxa-regs.h> | 25 | #include <mach/pxa-regs.h> |
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 1362994c78aa..b422526f6d8b 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
@@ -18,9 +18,9 @@ | |||
18 | #include <linux/ioport.h> | 18 | #include <linux/ioport.h> |
19 | #include <linux/sched.h> /* just for sched_clock() - funny that */ | 19 | #include <linux/sched.h> /* just for sched_clock() - funny that */ |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/cnt32_to_63.h> | ||
21 | 22 | ||
22 | #include <asm/div64.h> | 23 | #include <asm/div64.h> |
23 | #include <asm/cnt32_to_63.h> | ||
24 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
25 | #include <asm/system.h> | 25 | #include <asm/system.h> |
26 | #include <asm/pgtable.h> | 26 | #include <asm/pgtable.h> |
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index d75e795c893e..b638f10411e8 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include <linux/amba/clcd.h> | 28 | #include <linux/amba/clcd.h> |
29 | #include <linux/clocksource.h> | 29 | #include <linux/clocksource.h> |
30 | #include <linux/clockchips.h> | 30 | #include <linux/clockchips.h> |
31 | #include <linux/cnt32_to_63.h> | ||
31 | 32 | ||
32 | #include <asm/cnt32_to_63.h> | ||
33 | #include <asm/system.h> | 33 | #include <asm/system.h> |
34 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
35 | #include <asm/io.h> | 35 | #include <asm/io.h> |
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c index abcb0d9559b1..65db3d266965 100644 --- a/arch/avr32/boards/atngw100/setup.c +++ b/arch/avr32/boards/atngw100/setup.c | |||
@@ -54,8 +54,11 @@ static struct spi_board_info spi0_board_info[] __initdata = { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | static struct mci_platform_data __initdata mci0_data = { | 56 | static struct mci_platform_data __initdata mci0_data = { |
57 | .detect_pin = GPIO_PIN_PC(25), | 57 | .slot[0] = { |
58 | .wp_pin = GPIO_PIN_PE(0), | 58 | .bus_width = 4, |
59 | .detect_pin = GPIO_PIN_PC(25), | ||
60 | .wp_pin = GPIO_PIN_PE(0), | ||
61 | }, | ||
59 | }; | 62 | }; |
60 | 63 | ||
61 | /* | 64 | /* |
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c index cccca241fae9..32aceec0ae72 100644 --- a/arch/avr32/boards/atstk1000/atstk1002.c +++ b/arch/avr32/boards/atstk1000/atstk1002.c | |||
@@ -264,16 +264,20 @@ void __init setup_board(void) | |||
264 | 264 | ||
265 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | 265 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM |
266 | 266 | ||
267 | static struct mci_platform_data __initdata mci0_data = { | ||
268 | .slot[0] = { | ||
269 | .bus_width = 4, | ||
270 | |||
267 | /* MMC card detect requires MACB0 *NOT* be used */ | 271 | /* MMC card detect requires MACB0 *NOT* be used */ |
268 | #ifdef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM | 272 | #ifdef CONFIG_BOARD_ATSTK1002_SW6_CUSTOM |
269 | static struct mci_platform_data __initdata mci0_data = { | 273 | .detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */ |
270 | .detect_pin = GPIO_PIN_PC(14), /* gpio30/sdcd */ | 274 | .wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */ |
271 | .wp_pin = GPIO_PIN_PC(15), /* gpio31/sdwp */ | ||
272 | }; | ||
273 | #define MCI_PDATA &mci0_data | ||
274 | #else | 275 | #else |
275 | #define MCI_PDATA NULL | 276 | .detect_pin = -ENODEV, |
277 | .wp_pin = -ENODEV, | ||
276 | #endif /* SW6 for sd{cd,wp} routing */ | 278 | #endif /* SW6 for sd{cd,wp} routing */ |
279 | }, | ||
280 | }; | ||
277 | 281 | ||
278 | #endif /* SW2 for MMC signal routing */ | 282 | #endif /* SW2 for MMC signal routing */ |
279 | 283 | ||
@@ -326,7 +330,7 @@ static int __init atstk1002_init(void) | |||
326 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); | 330 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); |
327 | #endif | 331 | #endif |
328 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | 332 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM |
329 | at32_add_device_mci(0, MCI_PDATA); | 333 | at32_add_device_mci(0, &mci0_pdata); |
330 | #endif | 334 | #endif |
331 | #ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM | 335 | #ifdef CONFIG_BOARD_ATSTK1002_SW5_CUSTOM |
332 | set_hw_addr(at32_add_device_eth(1, ð_data[1])); | 336 | set_hw_addr(at32_add_device_eth(1, ð_data[1])); |
diff --git a/arch/avr32/boards/atstk1000/atstk1003.c b/arch/avr32/boards/atstk1000/atstk1003.c index 0cf664174c17..acc61235b895 100644 --- a/arch/avr32/boards/atstk1000/atstk1003.c +++ b/arch/avr32/boards/atstk1000/atstk1003.c | |||
@@ -66,6 +66,16 @@ static struct spi_board_info spi1_board_info[] __initdata = { { | |||
66 | } }; | 66 | } }; |
67 | #endif | 67 | #endif |
68 | 68 | ||
69 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | ||
70 | static struct mci_platform_data __initdata mci0_data = { | ||
71 | .slot[0] = { | ||
72 | .bus_width = 4, | ||
73 | .detect_pin = -ENODEV, | ||
74 | .wp_pin = -ENODEV, | ||
75 | }, | ||
76 | }; | ||
77 | #endif | ||
78 | |||
69 | #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC | 79 | #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC |
70 | static void __init atstk1003_setup_extdac(void) | 80 | static void __init atstk1003_setup_extdac(void) |
71 | { | 81 | { |
@@ -154,7 +164,7 @@ static int __init atstk1003_init(void) | |||
154 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); | 164 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); |
155 | #endif | 165 | #endif |
156 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | 166 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM |
157 | at32_add_device_mci(0, NULL); | 167 | at32_add_device_mci(0, &mci0_data); |
158 | #endif | 168 | #endif |
159 | at32_add_device_usba(0, NULL); | 169 | at32_add_device_usba(0, NULL); |
160 | #ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM | 170 | #ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM |
diff --git a/arch/avr32/boards/atstk1000/atstk1004.c b/arch/avr32/boards/atstk1000/atstk1004.c index 2c072cd0c22e..949c13ec51ed 100644 --- a/arch/avr32/boards/atstk1000/atstk1004.c +++ b/arch/avr32/boards/atstk1000/atstk1004.c | |||
@@ -71,6 +71,16 @@ static struct spi_board_info spi1_board_info[] __initdata = { { | |||
71 | } }; | 71 | } }; |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | ||
75 | static struct mci_platform_data __initdata mci0_data = { | ||
76 | .slot[0] = { | ||
77 | .bus_width = 4, | ||
78 | .detect_pin = -ENODEV, | ||
79 | .wp_pin = -ENODEV, | ||
80 | }, | ||
81 | }; | ||
82 | #endif | ||
83 | |||
74 | #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC | 84 | #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC |
75 | static void __init atstk1004_setup_extdac(void) | 85 | static void __init atstk1004_setup_extdac(void) |
76 | { | 86 | { |
@@ -137,7 +147,7 @@ static int __init atstk1004_init(void) | |||
137 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); | 147 | at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); |
138 | #endif | 148 | #endif |
139 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM | 149 | #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM |
140 | at32_add_device_mci(0, NULL); | 150 | at32_add_device_mci(0, &mci0_data); |
141 | #endif | 151 | #endif |
142 | at32_add_device_lcdc(0, &atstk1000_lcdc_data, | 152 | at32_add_device_lcdc(0, &atstk1000_lcdc_data, |
143 | fbmem_start, fbmem_size, | 153 | fbmem_start, fbmem_size, |
diff --git a/arch/avr32/include/asm/atmel-mci.h b/arch/avr32/include/asm/atmel-mci.h index c2ea6e1c9aa1..59f3fadd0b68 100644 --- a/arch/avr32/include/asm/atmel-mci.h +++ b/arch/avr32/include/asm/atmel-mci.h | |||
@@ -1,9 +1,39 @@ | |||
1 | #ifndef __ASM_AVR32_ATMEL_MCI_H | 1 | #ifndef __ASM_AVR32_ATMEL_MCI_H |
2 | #define __ASM_AVR32_ATMEL_MCI_H | 2 | #define __ASM_AVR32_ATMEL_MCI_H |
3 | 3 | ||
4 | struct mci_platform_data { | 4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 |
5 | |||
6 | struct dma_slave; | ||
7 | |||
8 | /** | ||
9 | * struct mci_slot_pdata - board-specific per-slot configuration | ||
10 | * @bus_width: Number of data lines wired up the slot | ||
11 | * @detect_pin: GPIO pin wired to the card detect switch | ||
12 | * @wp_pin: GPIO pin wired to the write protect sensor | ||
13 | * | ||
14 | * If a given slot is not present on the board, @bus_width should be | ||
15 | * set to 0. The other fields are ignored in this case. | ||
16 | * | ||
17 | * Any pins that aren't available should be set to a negative value. | ||
18 | * | ||
19 | * Note that support for multiple slots is experimental -- some cards | ||
20 | * might get upset if we don't get the clock management exactly right. | ||
21 | * But in most cases, it should work just fine. | ||
22 | */ | ||
23 | struct mci_slot_pdata { | ||
24 | unsigned int bus_width; | ||
5 | int detect_pin; | 25 | int detect_pin; |
6 | int wp_pin; | 26 | int wp_pin; |
7 | }; | 27 | }; |
8 | 28 | ||
29 | /** | ||
30 | * struct mci_platform_data - board-specific MMC/SDcard configuration | ||
31 | * @dma_slave: DMA slave interface to use in data transfers, or NULL. | ||
32 | * @slot: Per-slot configuration data. | ||
33 | */ | ||
34 | struct mci_platform_data { | ||
35 | struct dma_slave *dma_slave; | ||
36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; | ||
37 | }; | ||
38 | |||
9 | #endif /* __ASM_AVR32_ATMEL_MCI_H */ | 39 | #endif /* __ASM_AVR32_ATMEL_MCI_H */ |
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index fd306c49194b..5d00bb8d3cc2 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c | |||
@@ -1272,10 +1272,14 @@ static struct clk atmel_mci0_pclk = { | |||
1272 | struct platform_device *__init | 1272 | struct platform_device *__init |
1273 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | 1273 | at32_add_device_mci(unsigned int id, struct mci_platform_data *data) |
1274 | { | 1274 | { |
1275 | struct mci_platform_data _data; | ||
1276 | struct platform_device *pdev; | 1275 | struct platform_device *pdev; |
1276 | struct dw_dma_slave *dws; | ||
1277 | 1277 | ||
1278 | if (id != 0) | 1278 | if (id != 0 || !data) |
1279 | return NULL; | ||
1280 | |||
1281 | /* Must have at least one usable slot */ | ||
1282 | if (!data->slot[0].bus_width && !data->slot[1].bus_width) | ||
1279 | return NULL; | 1283 | return NULL; |
1280 | 1284 | ||
1281 | pdev = platform_device_alloc("atmel_mci", id); | 1285 | pdev = platform_device_alloc("atmel_mci", id); |
@@ -1286,28 +1290,76 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) | |||
1286 | ARRAY_SIZE(atmel_mci0_resource))) | 1290 | ARRAY_SIZE(atmel_mci0_resource))) |
1287 | goto fail; | 1291 | goto fail; |
1288 | 1292 | ||
1289 | if (!data) { | 1293 | if (data->dma_slave) |
1290 | data = &_data; | 1294 | dws = kmemdup(to_dw_dma_slave(data->dma_slave), |
1291 | memset(data, -1, sizeof(struct mci_platform_data)); | 1295 | sizeof(struct dw_dma_slave), GFP_KERNEL); |
1292 | data->detect_pin = GPIO_PIN_NONE; | 1296 | else |
1293 | data->wp_pin = GPIO_PIN_NONE; | 1297 | dws = kzalloc(sizeof(struct dw_dma_slave), GFP_KERNEL); |
1294 | } | 1298 | |
1299 | dws->slave.dev = &pdev->dev; | ||
1300 | dws->slave.dma_dev = &dw_dmac0_device.dev; | ||
1301 | dws->slave.reg_width = DMA_SLAVE_WIDTH_32BIT; | ||
1302 | dws->cfg_hi = (DWC_CFGH_SRC_PER(0) | ||
1303 | | DWC_CFGH_DST_PER(1)); | ||
1304 | dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | ||
1305 | | DWC_CFGL_HS_SRC_POL); | ||
1306 | |||
1307 | data->dma_slave = &dws->slave; | ||
1295 | 1308 | ||
1296 | if (platform_device_add_data(pdev, data, | 1309 | if (platform_device_add_data(pdev, data, |
1297 | sizeof(struct mci_platform_data))) | 1310 | sizeof(struct mci_platform_data))) |
1298 | goto fail; | 1311 | goto fail; |
1299 | 1312 | ||
1300 | select_peripheral(PA(10), PERIPH_A, 0); /* CLK */ | 1313 | /* CLK line is common to both slots */ |
1301 | select_peripheral(PA(11), PERIPH_A, 0); /* CMD */ | 1314 | select_peripheral(PA(10), PERIPH_A, 0); |
1302 | select_peripheral(PA(12), PERIPH_A, 0); /* DATA0 */ | ||
1303 | select_peripheral(PA(13), PERIPH_A, 0); /* DATA1 */ | ||
1304 | select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ | ||
1305 | select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ | ||
1306 | 1315 | ||
1307 | if (gpio_is_valid(data->detect_pin)) | 1316 | switch (data->slot[0].bus_width) { |
1308 | at32_select_gpio(data->detect_pin, 0); | 1317 | case 4: |
1309 | if (gpio_is_valid(data->wp_pin)) | 1318 | select_peripheral(PA(13), PERIPH_A, 0); /* DATA1 */ |
1310 | at32_select_gpio(data->wp_pin, 0); | 1319 | select_peripheral(PA(14), PERIPH_A, 0); /* DATA2 */ |
1320 | select_peripheral(PA(15), PERIPH_A, 0); /* DATA3 */ | ||
1321 | /* fall through */ | ||
1322 | case 1: | ||
1323 | select_peripheral(PA(11), PERIPH_A, 0); /* CMD */ | ||
1324 | select_peripheral(PA(12), PERIPH_A, 0); /* DATA0 */ | ||
1325 | |||
1326 | if (gpio_is_valid(data->slot[0].detect_pin)) | ||
1327 | at32_select_gpio(data->slot[0].detect_pin, 0); | ||
1328 | if (gpio_is_valid(data->slot[0].wp_pin)) | ||
1329 | at32_select_gpio(data->slot[0].wp_pin, 0); | ||
1330 | break; | ||
1331 | case 0: | ||
1332 | /* Slot is unused */ | ||
1333 | break; | ||
1334 | default: | ||
1335 | goto fail; | ||
1336 | } | ||
1337 | |||
1338 | switch (data->slot[1].bus_width) { | ||
1339 | case 4: | ||
1340 | select_peripheral(PB(8), PERIPH_B, 0); /* DATA1 */ | ||
1341 | select_peripheral(PB(9), PERIPH_B, 0); /* DATA2 */ | ||
1342 | select_peripheral(PB(10), PERIPH_B, 0); /* DATA3 */ | ||
1343 | /* fall through */ | ||
1344 | case 1: | ||
1345 | select_peripheral(PB(6), PERIPH_B, 0); /* CMD */ | ||
1346 | select_peripheral(PB(7), PERIPH_B, 0); /* DATA0 */ | ||
1347 | |||
1348 | if (gpio_is_valid(data->slot[1].detect_pin)) | ||
1349 | at32_select_gpio(data->slot[1].detect_pin, 0); | ||
1350 | if (gpio_is_valid(data->slot[1].wp_pin)) | ||
1351 | at32_select_gpio(data->slot[1].wp_pin, 0); | ||
1352 | break; | ||
1353 | case 0: | ||
1354 | /* Slot is unused */ | ||
1355 | break; | ||
1356 | default: | ||
1357 | if (!data->slot[0].bus_width) | ||
1358 | goto fail; | ||
1359 | |||
1360 | data->slot[1].bus_width = 0; | ||
1361 | break; | ||
1362 | } | ||
1311 | 1363 | ||
1312 | atmel_mci0_pclk.dev = &pdev->dev; | 1364 | atmel_mci0_pclk.dev = &pdev->dev; |
1313 | 1365 | ||
diff --git a/arch/ia64/include/asm/sections.h b/arch/ia64/include/asm/sections.h index f66799891036..1a873b36a4a1 100644 --- a/arch/ia64/include/asm/sections.h +++ b/arch/ia64/include/asm/sections.h | |||
@@ -11,6 +11,9 @@ | |||
11 | #include <asm-generic/sections.h> | 11 | #include <asm-generic/sections.h> |
12 | 12 | ||
13 | extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; | 13 | extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[]; |
14 | #ifdef CONFIG_SMP | ||
15 | extern char __cpu0_per_cpu[]; | ||
16 | #endif | ||
14 | extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; | 17 | extern char __start___vtop_patchlist[], __end___vtop_patchlist[]; |
15 | extern char __start___rse_patchlist[], __end___rse_patchlist[]; | 18 | extern char __start___rse_patchlist[], __end___rse_patchlist[]; |
16 | extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; | 19 | extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[]; |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index d45f215bc8fc..51b75cea7018 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -1232,9 +1232,10 @@ efi_initialize_iomem_resources(struct resource *code_resource, | |||
1232 | if (md->attribute & EFI_MEMORY_WP) { | 1232 | if (md->attribute & EFI_MEMORY_WP) { |
1233 | name = "System ROM"; | 1233 | name = "System ROM"; |
1234 | flags |= IORESOURCE_READONLY; | 1234 | flags |= IORESOURCE_READONLY; |
1235 | } else { | 1235 | } else if (md->attribute == EFI_MEMORY_UC) |
1236 | name = "Uncached RAM"; | ||
1237 | else | ||
1236 | name = "System RAM"; | 1238 | name = "System RAM"; |
1237 | } | ||
1238 | break; | 1239 | break; |
1239 | 1240 | ||
1240 | case EFI_ACPI_MEMORY_NVS: | 1241 | case EFI_ACPI_MEMORY_NVS: |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 8bdea8eb62e3..66e491d8baac 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -367,16 +367,17 @@ start_ap: | |||
367 | ;; | 367 | ;; |
368 | #else | 368 | #else |
369 | (isAP) br.few 2f | 369 | (isAP) br.few 2f |
370 | mov r20=r19 | 370 | movl r20=__cpu0_per_cpu |
371 | sub r19=r19,r18 | ||
372 | ;; | 371 | ;; |
373 | shr.u r18=r18,3 | 372 | shr.u r18=r18,3 |
374 | 1: | 373 | 1: |
375 | ld8 r21=[r20],8;; | 374 | ld8 r21=[r19],8;; |
376 | st8[r19]=r21,8 | 375 | st8[r20]=r21,8 |
377 | adds r18=-1,r18;; | 376 | adds r18=-1,r18;; |
378 | cmp4.lt p7,p6=0,r18 | 377 | cmp4.lt p7,p6=0,r18 |
379 | (p7) br.cond.dptk.few 1b | 378 | (p7) br.cond.dptk.few 1b |
379 | mov r19=r20 | ||
380 | ;; | ||
380 | 2: | 381 | 2: |
381 | #endif | 382 | #endif |
382 | tpa r19=r19 | 383 | tpa r19=r19 |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index c27d5b2c182b..de636b215677 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -616,7 +616,9 @@ setup_arch (char **cmdline_p) | |||
616 | ia64_mca_init(); | 616 | ia64_mca_init(); |
617 | 617 | ||
618 | platform_setup(cmdline_p); | 618 | platform_setup(cmdline_p); |
619 | #ifndef CONFIG_IA64_HP_SIM | ||
619 | check_sal_cache_flush(); | 620 | check_sal_cache_flush(); |
621 | #endif | ||
620 | paging_init(); | 622 | paging_init(); |
621 | } | 623 | } |
622 | 624 | ||
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index de71da811cd6..10a7d47e8510 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S | |||
@@ -215,9 +215,6 @@ SECTIONS | |||
215 | /* Per-cpu data: */ | 215 | /* Per-cpu data: */ |
216 | percpu : { } :percpu | 216 | percpu : { } :percpu |
217 | . = ALIGN(PERCPU_PAGE_SIZE); | 217 | . = ALIGN(PERCPU_PAGE_SIZE); |
218 | #ifdef CONFIG_SMP | ||
219 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
220 | #endif | ||
221 | __phys_per_cpu_start = .; | 218 | __phys_per_cpu_start = .; |
222 | .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) | 219 | .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET) |
223 | { | 220 | { |
@@ -233,6 +230,11 @@ SECTIONS | |||
233 | data : { } :data | 230 | data : { } :data |
234 | .data : AT(ADDR(.data) - LOAD_OFFSET) | 231 | .data : AT(ADDR(.data) - LOAD_OFFSET) |
235 | { | 232 | { |
233 | #ifdef CONFIG_SMP | ||
234 | . = ALIGN(PERCPU_PAGE_SIZE); | ||
235 | __cpu0_per_cpu = .; | ||
236 | . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ | ||
237 | #endif | ||
236 | DATA_DATA | 238 | DATA_DATA |
237 | *(.data1) | 239 | *(.data1) |
238 | *(.gnu.linkonce.d*) | 240 | *(.gnu.linkonce.d*) |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index e566ff43884a..0ee085efbe29 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -163,7 +163,7 @@ per_cpu_init (void) | |||
163 | * get_zeroed_page(). | 163 | * get_zeroed_page(). |
164 | */ | 164 | */ |
165 | if (first_time) { | 165 | if (first_time) { |
166 | void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; | 166 | void *cpu0_data = __cpu0_per_cpu; |
167 | 167 | ||
168 | first_time=0; | 168 | first_time=0; |
169 | 169 | ||
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 78026aabaa7f..d8c5fcd89e5b 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -144,7 +144,7 @@ static void *per_cpu_node_setup(void *cpu_data, int node) | |||
144 | 144 | ||
145 | for_each_possible_early_cpu(cpu) { | 145 | for_each_possible_early_cpu(cpu) { |
146 | if (cpu == 0) { | 146 | if (cpu == 0) { |
147 | void *cpu0_data = __phys_per_cpu_start - PERCPU_PAGE_SIZE; | 147 | void *cpu0_data = __cpu0_per_cpu; |
148 | __per_cpu_offset[cpu] = (char*)cpu0_data - | 148 | __per_cpu_offset[cpu] = (char*)cpu0_data - |
149 | __per_cpu_start; | 149 | __per_cpu_start; |
150 | } else if (node == node_cpuid[cpu].nid) { | 150 | } else if (node == node_cpuid[cpu].nid) { |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index a5f864c445b2..f57113f1f892 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -216,10 +216,6 @@ config MEMORY_SIZE | |||
216 | default "01000000" if PLAT_M32104UT | 216 | default "01000000" if PLAT_M32104UT |
217 | default "00800000" if PLAT_OAKS32R | 217 | default "00800000" if PLAT_OAKS32R |
218 | 218 | ||
219 | config NOHIGHMEM | ||
220 | bool | ||
221 | default y | ||
222 | |||
223 | config ARCH_DISCONTIGMEM_ENABLE | 219 | config ARCH_DISCONTIGMEM_ENABLE |
224 | bool "Internal RAM Support" | 220 | bool "Internal RAM Support" |
225 | depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104 | 221 | depends on CHIP_M32700 || CHIP_M32102 || CHIP_VDEC2 || CHIP_OPSP || CHIP_M32104 |
@@ -410,11 +406,7 @@ config PCI_DIRECT | |||
410 | source "drivers/pci/Kconfig" | 406 | source "drivers/pci/Kconfig" |
411 | 407 | ||
412 | config ISA | 408 | config ISA |
413 | bool "ISA support" | 409 | bool |
414 | help | ||
415 | Find out whether you have ISA slots on your motherboard. ISA is the | ||
416 | name of a bus system, i.e. the way the CPU talks to the other stuff | ||
417 | inside your box. If you have ISA, say Y, otherwise N. | ||
418 | 410 | ||
419 | source "drivers/pcmcia/Kconfig" | 411 | source "drivers/pcmcia/Kconfig" |
420 | 412 | ||
diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index d4eaa2fd1818..612d35b082a6 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S | |||
@@ -143,7 +143,7 @@ ret_from_intr: | |||
143 | and3 r4, r4, #0x8000 ; check BSM bit | 143 | and3 r4, r4, #0x8000 ; check BSM bit |
144 | #endif | 144 | #endif |
145 | beqz r4, resume_kernel | 145 | beqz r4, resume_kernel |
146 | ENTRY(resume_userspace) | 146 | resume_userspace: |
147 | DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt | 147 | DISABLE_INTERRUPTS(r4) ; make sure we don't miss an interrupt |
148 | ; setting need_resched or sigpending | 148 | ; setting need_resched or sigpending |
149 | ; between sampling and the iret | 149 | ; between sampling and the iret |
diff --git a/arch/m32r/kernel/head.S b/arch/m32r/kernel/head.S index dab7436d7bbe..40180778a5c7 100644 --- a/arch/m32r/kernel/head.S +++ b/arch/m32r/kernel/head.S | |||
@@ -29,7 +29,6 @@ __INITDATA | |||
29 | .global _end | 29 | .global _end |
30 | ENTRY(stext) | 30 | ENTRY(stext) |
31 | ENTRY(_stext) | 31 | ENTRY(_stext) |
32 | ENTRY(startup_32) | ||
33 | /* Setup up the stack pointer */ | 32 | /* Setup up the stack pointer */ |
34 | LDIMM (r0, spi_stack_top) | 33 | LDIMM (r0, spi_stack_top) |
35 | LDIMM (r1, spu_stack_top) | 34 | LDIMM (r1, spu_stack_top) |
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index d0c5b0b7da2f..2aeae4670098 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c | |||
@@ -22,9 +22,6 @@ | |||
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | 24 | ||
25 | atomic_t irq_err_count; | ||
26 | atomic_t irq_mis_count; | ||
27 | |||
28 | /* | 25 | /* |
29 | * Generic, controller-independent functions: | 26 | * Generic, controller-independent functions: |
30 | */ | 27 | */ |
@@ -63,9 +60,6 @@ int show_interrupts(struct seq_file *p, void *v) | |||
63 | seq_putc(p, '\n'); | 60 | seq_putc(p, '\n'); |
64 | skip: | 61 | skip: |
65 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 62 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); |
66 | } else if (i == NR_IRQS) { | ||
67 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | ||
68 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); | ||
69 | } | 63 | } |
70 | return 0; | 64 | return 0; |
71 | } | 65 | } |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 16bcb189a383..22624b51d4d3 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/delay.h> | 14 | #include <asm/delay.h> |
15 | #include <asm/irq.h> | 15 | #include <asm/irq.h> |
16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
17 | #include <asm/pgtable.h> | ||
17 | 18 | ||
18 | /* platform dependent support */ | 19 | /* platform dependent support */ |
19 | EXPORT_SYMBOL(boot_cpu_data); | 20 | EXPORT_SYMBOL(boot_cpu_data); |
@@ -65,6 +66,7 @@ EXPORT_SYMBOL(memset); | |||
65 | EXPORT_SYMBOL(copy_page); | 66 | EXPORT_SYMBOL(copy_page); |
66 | EXPORT_SYMBOL(clear_page); | 67 | EXPORT_SYMBOL(clear_page); |
67 | EXPORT_SYMBOL(strlen); | 68 | EXPORT_SYMBOL(strlen); |
69 | EXPORT_SYMBOL(empty_zero_page); | ||
68 | 70 | ||
69 | EXPORT_SYMBOL(_inb); | 71 | EXPORT_SYMBOL(_inb); |
70 | EXPORT_SYMBOL(_inw); | 72 | EXPORT_SYMBOL(_inw); |
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c index a689e2978b6e..5be4faaf5b1c 100644 --- a/arch/m32r/kernel/process.c +++ b/arch/m32r/kernel/process.c | |||
@@ -35,8 +35,6 @@ | |||
35 | 35 | ||
36 | #include <linux/err.h> | 36 | #include <linux/err.h> |
37 | 37 | ||
38 | static int hlt_counter=0; | ||
39 | |||
40 | /* | 38 | /* |
41 | * Return saved PC of a blocked thread. | 39 | * Return saved PC of a blocked thread. |
42 | */ | 40 | */ |
@@ -48,31 +46,16 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
48 | /* | 46 | /* |
49 | * Powermanagement idle function, if any.. | 47 | * Powermanagement idle function, if any.. |
50 | */ | 48 | */ |
51 | void (*pm_idle)(void) = NULL; | 49 | static void (*pm_idle)(void) = NULL; |
52 | EXPORT_SYMBOL(pm_idle); | ||
53 | 50 | ||
54 | void (*pm_power_off)(void) = NULL; | 51 | void (*pm_power_off)(void) = NULL; |
55 | EXPORT_SYMBOL(pm_power_off); | 52 | EXPORT_SYMBOL(pm_power_off); |
56 | 53 | ||
57 | void disable_hlt(void) | ||
58 | { | ||
59 | hlt_counter++; | ||
60 | } | ||
61 | |||
62 | EXPORT_SYMBOL(disable_hlt); | ||
63 | |||
64 | void enable_hlt(void) | ||
65 | { | ||
66 | hlt_counter--; | ||
67 | } | ||
68 | |||
69 | EXPORT_SYMBOL(enable_hlt); | ||
70 | |||
71 | /* | 54 | /* |
72 | * We use this is we don't have any better | 55 | * We use this is we don't have any better |
73 | * idle routine.. | 56 | * idle routine.. |
74 | */ | 57 | */ |
75 | void default_idle(void) | 58 | static void default_idle(void) |
76 | { | 59 | { |
77 | /* M32R_FIXME: Please use "cpu_sleep" mode. */ | 60 | /* M32R_FIXME: Please use "cpu_sleep" mode. */ |
78 | cpu_relax(); | 61 | cpu_relax(); |
@@ -260,15 +243,6 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long spu, | |||
260 | return 0; | 243 | return 0; |
261 | } | 244 | } |
262 | 245 | ||
263 | /* | ||
264 | * Capture the user space registers if the task is not running (in user space) | ||
265 | */ | ||
266 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
267 | { | ||
268 | /* M32R_FIXME */ | ||
269 | return 1; | ||
270 | } | ||
271 | |||
272 | asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2, | 246 | asmlinkage int sys_fork(unsigned long r0, unsigned long r1, unsigned long r2, |
273 | unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, | 247 | unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, |
274 | struct pt_regs regs) | 248 | struct pt_regs regs) |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index 7577f971ea4e..929e5c9d3ad9 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -84,7 +84,7 @@ void smp_send_timer(void); | |||
84 | void smp_ipi_timer_interrupt(struct pt_regs *); | 84 | void smp_ipi_timer_interrupt(struct pt_regs *); |
85 | void smp_local_timer_interrupt(void); | 85 | void smp_local_timer_interrupt(void); |
86 | 86 | ||
87 | void send_IPI_allbutself(int, int); | 87 | static void send_IPI_allbutself(int, int); |
88 | static void send_IPI_mask(cpumask_t, int, int); | 88 | static void send_IPI_mask(cpumask_t, int, int); |
89 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 89 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); |
90 | 90 | ||
@@ -722,7 +722,7 @@ void smp_local_timer_interrupt(void) | |||
722 | * ---------- --- -------------------------------------------------------- | 722 | * ---------- --- -------------------------------------------------------- |
723 | * | 723 | * |
724 | *==========================================================================*/ | 724 | *==========================================================================*/ |
725 | void send_IPI_allbutself(int ipi_num, int try) | 725 | static void send_IPI_allbutself(int ipi_num, int try) |
726 | { | 726 | { |
727 | cpumask_t cpumask; | 727 | cpumask_t cpumask; |
728 | 728 | ||
diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 994cc1556355..6ea017727cce 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/hw_irq.h> | 34 | #include <asm/hw_irq.h> |
35 | 35 | ||
36 | #ifdef CONFIG_SMP | 36 | #ifdef CONFIG_SMP |
37 | extern void send_IPI_allbutself(int, int); | ||
38 | extern void smp_local_timer_interrupt(void); | 37 | extern void smp_local_timer_interrupt(void); |
39 | #endif | 38 | #endif |
40 | 39 | ||
@@ -188,7 +187,7 @@ static long last_rtc_update = 0; | |||
188 | * timer_interrupt() needs to keep up the real-time clock, | 187 | * timer_interrupt() needs to keep up the real-time clock, |
189 | * as well as call the "do_timer()" routine every clocktick | 188 | * as well as call the "do_timer()" routine every clocktick |
190 | */ | 189 | */ |
191 | irqreturn_t timer_interrupt(int irq, void *dev_id) | 190 | static irqreturn_t timer_interrupt(int irq, void *dev_id) |
192 | { | 191 | { |
193 | #ifndef CONFIG_SMP | 192 | #ifndef CONFIG_SMP |
194 | profile_tick(CPU_PROFILING); | 193 | profile_tick(CPU_PROFILING); |
@@ -228,7 +227,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
228 | return IRQ_HANDLED; | 227 | return IRQ_HANDLED; |
229 | } | 228 | } |
230 | 229 | ||
231 | struct irqaction irq0 = { | 230 | static struct irqaction irq0 = { |
232 | .handler = timer_interrupt, | 231 | .handler = timer_interrupt, |
233 | .flags = IRQF_DISABLED, | 232 | .flags = IRQF_DISABLED, |
234 | .mask = CPU_MASK_NONE, | 233 | .mask = CPU_MASK_NONE, |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 46159a4e644b..03b14e55cd89 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
@@ -61,7 +61,7 @@ extern unsigned long eit_vector[]; | |||
61 | ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \ | 61 | ((unsigned long)func - (unsigned long)eit_vector - entry*4)/4 \ |
62 | + 0xff000000UL | 62 | + 0xff000000UL |
63 | 63 | ||
64 | void set_eit_vector_entries(void) | 64 | static void set_eit_vector_entries(void) |
65 | { | 65 | { |
66 | extern void default_eit_handler(void); | 66 | extern void default_eit_handler(void); |
67 | extern void system_call(void); | 67 | extern void system_call(void); |
@@ -121,9 +121,9 @@ void __init trap_init(void) | |||
121 | cpu_init(); | 121 | cpu_init(); |
122 | } | 122 | } |
123 | 123 | ||
124 | int kstack_depth_to_print = 24; | 124 | static int kstack_depth_to_print = 24; |
125 | 125 | ||
126 | void show_trace(struct task_struct *task, unsigned long *stack) | 126 | static void show_trace(struct task_struct *task, unsigned long *stack) |
127 | { | 127 | { |
128 | unsigned long addr; | 128 | unsigned long addr; |
129 | 129 | ||
@@ -224,7 +224,7 @@ bad: | |||
224 | printk("\n"); | 224 | printk("\n"); |
225 | } | 225 | } |
226 | 226 | ||
227 | DEFINE_SPINLOCK(die_lock); | 227 | static DEFINE_SPINLOCK(die_lock); |
228 | 228 | ||
229 | void die(const char * str, struct pt_regs * regs, long err) | 229 | void die(const char * str, struct pt_regs * regs, long err) |
230 | { | 230 | { |
diff --git a/arch/m32r/lib/delay.c b/arch/m32r/lib/delay.c index 59bfc34e0d9f..ced549be80f5 100644 --- a/arch/m32r/lib/delay.c +++ b/arch/m32r/lib/delay.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/param.h> | 8 | #include <linux/param.h> |
9 | #include <linux/module.h> | ||
9 | #ifdef CONFIG_SMP | 10 | #ifdef CONFIG_SMP |
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <asm/current.h> | 12 | #include <asm/current.h> |
@@ -121,3 +122,4 @@ void __ndelay(unsigned long nsecs) | |||
121 | { | 122 | { |
122 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ | 123 | __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */ |
123 | } | 124 | } |
125 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d72..c930b8ceb418 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC | |||
1403 | depends on CPU_MIPS32_R2 | 1403 | depends on CPU_MIPS32_R2 |
1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... | 1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... |
1405 | depends on SYS_SUPPORTS_MULTITHREADING | 1405 | depends on SYS_SUPPORTS_MULTITHREADING |
1406 | select GENERIC_CLOCKEVENTS_BROADCAST | ||
1407 | select CPU_MIPSR2_IRQ_VI | 1406 | select CPU_MIPSR2_IRQ_VI |
1408 | select CPU_MIPSR2_IRQ_EI | 1407 | select CPU_MIPSR2_IRQ_EI |
1409 | select MIPS_MT | 1408 | select MIPS_MT |
@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER | |||
1451 | Includes a loader for loading an elf relocatable object | 1450 | Includes a loader for loading an elf relocatable object |
1452 | onto another VPE and running it. | 1451 | onto another VPE and running it. |
1453 | 1452 | ||
1454 | config MIPS_MT_SMTC_INSTANT_REPLAY | ||
1455 | bool "Low-latency Dispatch of Deferred SMTC IPIs" | ||
1456 | depends on MIPS_MT_SMTC && !PREEMPT | ||
1457 | default y | ||
1458 | help | ||
1459 | SMTC pseudo-interrupts between TCs are deferred and queued | ||
1460 | if the target TC is interrupt-inhibited (IXMT). In the first | ||
1461 | SMTC prototypes, these queued IPIs were serviced on return | ||
1462 | to user mode, or on entry into the kernel idle loop. The | ||
1463 | INSTANT_REPLAY option dispatches them as part of local_irq_restore() | ||
1464 | processing, which adds runtime overhead (hence the option to turn | ||
1465 | it off), but ensures that IPIs are handled promptly even under | ||
1466 | heavy I/O interrupt load. | ||
1467 | |||
1468 | config MIPS_MT_SMTC_IM_BACKSTOP | 1453 | config MIPS_MT_SMTC_IM_BACKSTOP |
1469 | bool "Use per-TC register bits as backstop for inhibited IM bits" | 1454 | bool "Use per-TC register bits as backstop for inhibited IM bits" |
1470 | depends on MIPS_MT_SMTC | 1455 | depends on MIPS_MT_SMTC |
1471 | default y | 1456 | default n |
1472 | help | 1457 | help |
1473 | To support multiple TC microthreads acting as "CPUs" within | 1458 | To support multiple TC microthreads acting as "CPUs" within |
1474 | a VPE, VPE-wide interrupt mask bits must be specially manipulated | 1459 | a VPE, VPE-wide interrupt mask bits must be specially manipulated |
1475 | during interrupt handling. To support legacy drivers and interrupt | 1460 | during interrupt handling. To support legacy drivers and interrupt |
1476 | controller management code, SMTC has a "backstop" to track and | 1461 | controller management code, SMTC has a "backstop" to track and |
1477 | if necessary restore the interrupt mask. This has some performance | 1462 | if necessary restore the interrupt mask. This has some performance |
1478 | impact on interrupt service overhead. Disable it only if you know | 1463 | impact on interrupt service overhead. |
1479 | what you are doing. | ||
1480 | 1464 | ||
1481 | config MIPS_MT_SMTC_IRQAFF | 1465 | config MIPS_MT_SMTC_IRQAFF |
1482 | bool "Support IRQ affinity API" | 1466 | bool "Support IRQ affinity API" |
@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF | |||
1486 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | 1470 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) |
1487 | for SMTC Linux kernel. Requires platform support, of which | 1471 | for SMTC Linux kernel. Requires platform support, of which |
1488 | an example can be found in the MIPS kernel i8259 and Malta | 1472 | an example can be found in the MIPS kernel i8259 and Malta |
1489 | platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY | 1473 | platform code. Adds some overhead to interrupt dispatch, and |
1490 | be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to | 1474 | should be used only if you know what you are doing. |
1491 | interrupt dispatch, and should be used only if you know what | ||
1492 | you are doing. | ||
1493 | 1475 | ||
1494 | config MIPS_VPE_LOADER_TOM | 1476 | config MIPS_VPE_LOADER_TOM |
1495 | bool "Load VPE program into memory hidden from linux" | 1477 | bool "Load VPE program into memory hidden from linux" |
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c index b485d94ce8a5..e660ddd611c4 100644 --- a/arch/mips/au1000/common/gpio.c +++ b/arch/mips/au1000/common/gpio.c | |||
@@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value) | |||
48 | { | 48 | { |
49 | gpio -= AU1XXX_GPIO_BASE; | 49 | gpio -= AU1XXX_GPIO_BASE; |
50 | 50 | ||
51 | gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); | 51 | gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); |
52 | } | 52 | } |
53 | 53 | ||
54 | static int au1xxx_gpio2_direction_input(unsigned gpio) | 54 | static int au1xxx_gpio2_direction_input(unsigned gpio) |
@@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio) | |||
61 | static int au1xxx_gpio2_direction_output(unsigned gpio, int value) | 61 | static int au1xxx_gpio2_direction_output(unsigned gpio, int value) |
62 | { | 62 | { |
63 | gpio -= AU1XXX_GPIO_BASE; | 63 | gpio -= AU1XXX_GPIO_BASE; |
64 | gpio2->dir = (0x01 << gpio) | (value << gpio); | 64 | gpio2->dir |= 0x01 << gpio; |
65 | gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio); | ||
65 | return 0; | 66 | return 0; |
66 | } | 67 | } |
67 | 68 | ||
@@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio) | |||
90 | static int au1xxx_gpio1_direction_output(unsigned gpio, int value) | 91 | static int au1xxx_gpio1_direction_output(unsigned gpio, int value) |
91 | { | 92 | { |
92 | gpio1->trioutclr = (0x01 & gpio); | 93 | gpio1->trioutclr = (0x01 & gpio); |
94 | au1xxx_gpio1_write(gpio, value); | ||
93 | return 0; | 95 | return 0; |
94 | } | 96 | } |
95 | 97 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..25775cb54000 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
13 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
13 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 14 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
14 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 15 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
15 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 16 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/cevt-r4k.h> | ||
16 | |||
17 | /* | ||
18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
19 | * of these routines with SMTC-specific variants. | ||
20 | */ | ||
21 | |||
22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
15 | 23 | ||
16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
19 | unsigned int cnt; | 27 | unsigned int cnt; |
20 | int res; | 28 | int res; |
21 | 29 | ||
22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
23 | { | ||
24 | unsigned long flags, vpflags; | ||
25 | local_irq_save(flags); | ||
26 | vpflags = dvpe(); | ||
27 | #endif | ||
28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
29 | cnt += delta; | 31 | cnt += delta; |
30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | evpe(vpflags); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | #endif | ||
37 | return res; | 34 | return res; |
38 | } | 35 | } |
39 | 36 | ||
40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
41 | struct clock_event_device *evt) | 38 | |
39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
40 | struct clock_event_device *evt) | ||
42 | { | 41 | { |
43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
44 | } | 43 | } |
45 | 44 | ||
46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
48 | 47 | ||
49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
51 | */ | ||
52 | static void c0_timer_ack(void) | ||
53 | { | ||
54 | write_c0_compare(read_c0_compare()); | ||
55 | } | ||
56 | 49 | ||
57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
58 | * Possibly handle a performance counter interrupt. | ||
59 | * Return true if the timer interrupt should not be checked | ||
60 | */ | ||
61 | static inline int handle_perf_irq(int r2) | ||
62 | { | ||
63 | /* | ||
64 | * The performance counter overflow interrupt may be shared with the | ||
65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
67 | * and we can't reliably determine if a counter interrupt has also | ||
68 | * happened (!r2) then don't check for a timer interrupt. | ||
69 | */ | ||
70 | return (cp0_perfcount_irq < 0) && | ||
71 | perf_irq() == IRQ_HANDLED && | ||
72 | !r2; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
76 | { | 51 | { |
77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
94 | */ | 69 | */ |
95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
98 | if (cpu_data[cpu].vpe_id) | ||
99 | goto out; | ||
100 | cpu = 0; | ||
101 | #endif | ||
102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
104 | } | 75 | } |
@@ -107,65 +78,16 @@ out: | |||
107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
108 | } | 79 | } |
109 | 80 | ||
110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
82 | |||
83 | struct irqaction c0_compare_irqaction = { | ||
111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
113 | .flags = IRQF_DISABLED, | ||
114 | #else | ||
115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
116 | #endif | ||
117 | .name = "timer", | 86 | .name = "timer", |
118 | }; | 87 | }; |
119 | 88 | ||
120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
122 | |||
123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static void mips_broadcast(cpumask_t mask) | ||
129 | { | ||
130 | unsigned int cpu; | ||
131 | |||
132 | for_each_cpu_mask(cpu, mask) | ||
133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
134 | } | ||
135 | |||
136 | static void setup_smtc_dummy_clockevent_device(void) | ||
137 | { | ||
138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
139 | unsigned int cpu = smp_processor_id(); | ||
140 | struct clock_event_device *cd; | ||
141 | 89 | ||
142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
143 | |||
144 | cd->name = "SMTC"; | ||
145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
146 | |||
147 | /* Calculate the min / max delta */ | ||
148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
149 | cd->shift = 0; //32; | ||
150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
152 | |||
153 | cd->rating = 200; | ||
154 | cd->irq = 17; //-1; | ||
155 | // if (cpu) | ||
156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
157 | // else | ||
158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
159 | |||
160 | cd->set_mode = smtc_set_mode; | ||
161 | |||
162 | cd->broadcast = mips_broadcast; | ||
163 | |||
164 | clockevents_register_device(cd); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static void mips_event_handler(struct clock_event_device *dev) | ||
169 | { | 91 | { |
170 | } | 92 | } |
171 | 93 | ||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
178 | } | 100 | } |
179 | 101 | ||
180 | static int c0_compare_int_usable(void) | 102 | /* |
103 | * Compare interrupt can be routed and latched outside the core, | ||
104 | * so a single execution hazard barrier may not be enough to give | ||
105 | * it time to clear as seen in the Cause register. 4 time the | ||
106 | * pipeline depth seems reasonably conservative, and empirically | ||
107 | * works better in configurations with high CPU/bus clock ratios. | ||
108 | */ | ||
109 | |||
110 | #define compare_change_hazard() \ | ||
111 | do { \ | ||
112 | irq_disable_hazard(); \ | ||
113 | irq_disable_hazard(); \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | } while (0) | ||
117 | |||
118 | int c0_compare_int_usable(void) | ||
181 | { | 119 | { |
182 | unsigned int delta; | 120 | unsigned int delta; |
183 | unsigned int cnt; | 121 | unsigned int cnt; |
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
187 | */ | 125 | */ |
188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
192 | return 0; | 130 | return 0; |
193 | } | 131 | } |
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
197 | cnt += delta; | 135 | cnt += delta; |
198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
201 | break; | 139 | break; |
202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
207 | 145 | ||
146 | compare_change_hazard(); | ||
208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
209 | return 0; | 148 | return 0; |
210 | 149 | ||
211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
214 | return 0; | 153 | return 0; |
215 | 154 | ||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
219 | return 1; | 158 | return 1; |
220 | } | 159 | } |
221 | 160 | ||
161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
162 | |||
222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
223 | { | 164 | { |
224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
230 | return -ENXIO; | 171 | return -ENXIO; |
231 | 172 | ||
232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
233 | setup_smtc_dummy_clockevent_device(); | ||
234 | |||
235 | /* | ||
236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
237 | * device. | ||
238 | */ | ||
239 | if (cpu) | ||
240 | return 0; | ||
241 | #endif | ||
242 | |||
243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
244 | return -ENXIO; | 174 | return -ENXIO; |
245 | 175 | ||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
265 | 195 | ||
266 | cd->rating = 300; | 196 | cd->rating = 300; |
267 | cd->irq = irq; | 197 | cd->irq = irq; |
268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
269 | cd->cpumask = CPU_MASK_ALL; | ||
270 | #else | ||
271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
272 | #endif | ||
273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
276 | 202 | ||
277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
281 | 207 | ||
282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
283 | 209 | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
287 | #else | ||
288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
289 | #endif | ||
290 | 211 | ||
291 | return 0; | 212 | return 0; |
292 | } | 213 | } |
214 | |||
215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
9 | */ | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/smtc_ipi.h> | ||
15 | #include <asm/time.h> | ||
16 | #include <asm/cevt-r4k.h> | ||
17 | |||
18 | /* | ||
19 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
20 | * or other MIPS MT cores. | ||
21 | * | ||
22 | * Notes on SMTC Support: | ||
23 | * | ||
24 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
25 | * But there's only one Count/Compare pair per VPE, and Compare | ||
26 | * interrupts are taken opportunisitically by available TCs | ||
27 | * bound to the VPE with the Count register. The new timer | ||
28 | * framework provides for global broadcasts, but we really | ||
29 | * want VPE-level multicasts for best behavior. So instead | ||
30 | * of invoking the high-level clock-event broadcast code, | ||
31 | * this version of SMTC support uses the historical SMTC | ||
32 | * multicast mechanisms "under the hood", appearing to the | ||
33 | * generic clock layer as if the interrupts are per-CPU. | ||
34 | * | ||
35 | * The approach taken here is to maintain a set of NR_CPUS | ||
36 | * virtual timers, and track which "CPU" needs to be alerted | ||
37 | * at each event. | ||
38 | * | ||
39 | * It's unlikely that we'll see a MIPS MT core with more than | ||
40 | * 2 VPEs, but we *know* that we won't need to handle more | ||
41 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
42 | * is always going to be overkill, but always going to be enough. | ||
43 | */ | ||
44 | |||
45 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
46 | static int smtc_nextinvpe[NR_CPUS]; | ||
47 | |||
48 | /* | ||
49 | * Timestamps stored are absolute values to be programmed | ||
50 | * into Count register. Valid timestamps will never be zero. | ||
51 | * If a Zero Count value is actually calculated, it is converted | ||
52 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
53 | * roughly once every four billion events, which at 1000 HZ means | ||
54 | * about once every 50 days. If that's actually a problem, one | ||
55 | * could alternate squashing 0 to 1 and to -1. | ||
56 | */ | ||
57 | |||
58 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
59 | #define ISVALID(x) ((x) != 0L) | ||
60 | |||
61 | /* | ||
62 | * Time comparison is subtle, as it's really truncated | ||
63 | * modular arithmetic. | ||
64 | */ | ||
65 | |||
66 | #define IS_SOONER(a, b, reference) \ | ||
67 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
68 | |||
69 | /* | ||
70 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
71 | * Could be an increasing function instead of a constant; | ||
72 | */ | ||
73 | |||
74 | #define CATCHUP_INCREMENT 64 | ||
75 | |||
76 | static int mips_next_event(unsigned long delta, | ||
77 | struct clock_event_device *evt) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned int mtflags; | ||
81 | unsigned long timestamp, reference, previous; | ||
82 | unsigned long nextcomp = 0L; | ||
83 | int vpe = current_cpu_data.vpe_id; | ||
84 | int cpu = smp_processor_id(); | ||
85 | local_irq_save(flags); | ||
86 | mtflags = dmt(); | ||
87 | |||
88 | /* | ||
89 | * Maintain the per-TC virtual timer | ||
90 | * and program the per-VPE shared Count register | ||
91 | * as appropriate here... | ||
92 | */ | ||
93 | reference = (unsigned long)read_c0_count(); | ||
94 | timestamp = MAKEVALID(reference + delta); | ||
95 | /* | ||
96 | * To really model the clock, we have to catch the case | ||
97 | * where the current next-in-VPE timestamp is the old | ||
98 | * timestamp for the calling CPE, but the new value is | ||
99 | * in fact later. In that case, we have to do a full | ||
100 | * scan and discover the new next-in-VPE CPU id and | ||
101 | * timestamp. | ||
102 | */ | ||
103 | previous = smtc_nexttime[vpe][cpu]; | ||
104 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
105 | && IS_SOONER(previous, timestamp, reference)) { | ||
106 | int i; | ||
107 | int soonest = cpu; | ||
108 | |||
109 | /* | ||
110 | * Update timestamp array here, so that new | ||
111 | * value gets considered along with those of | ||
112 | * other virtual CPUs on the VPE. | ||
113 | */ | ||
114 | smtc_nexttime[vpe][cpu] = timestamp; | ||
115 | for_each_online_cpu(i) { | ||
116 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
117 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
118 | smtc_nexttime[vpe][soonest], reference)) { | ||
119 | soonest = i; | ||
120 | } | ||
121 | } | ||
122 | smtc_nextinvpe[vpe] = soonest; | ||
123 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
124 | /* | ||
125 | * Otherwise, we don't have to process the whole array rank, | ||
126 | * we just have to see if the event horizon has gotten closer. | ||
127 | */ | ||
128 | } else { | ||
129 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
130 | IS_SOONER(timestamp, | ||
131 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
132 | smtc_nextinvpe[vpe] = cpu; | ||
133 | nextcomp = timestamp; | ||
134 | } | ||
135 | /* | ||
136 | * Since next-in-VPE may me the same as the executing | ||
137 | * virtual CPU, we update the array *after* checking | ||
138 | * its value. | ||
139 | */ | ||
140 | smtc_nexttime[vpe][cpu] = timestamp; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * It may be that, in fact, we don't need to update Compare, | ||
145 | * but if we do, we want to make sure we didn't fall into | ||
146 | * a crack just behind Count. | ||
147 | */ | ||
148 | if (ISVALID(nextcomp)) { | ||
149 | write_c0_compare(nextcomp); | ||
150 | ehb(); | ||
151 | /* | ||
152 | * We never return an error, we just make sure | ||
153 | * that we trigger the handlers as quickly as | ||
154 | * we can if we fell behind. | ||
155 | */ | ||
156 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
157 | > (unsigned long)LONG_MAX) { | ||
158 | nextcomp += CATCHUP_INCREMENT; | ||
159 | write_c0_compare(nextcomp); | ||
160 | ehb(); | ||
161 | } | ||
162 | } | ||
163 | emt(mtflags); | ||
164 | local_irq_restore(flags); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | |||
169 | void smtc_distribute_timer(int vpe) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | unsigned int mtflags; | ||
173 | int cpu; | ||
174 | struct clock_event_device *cd; | ||
175 | unsigned long nextstamp = 0L; | ||
176 | unsigned long reference; | ||
177 | |||
178 | |||
179 | repeat: | ||
180 | for_each_online_cpu(cpu) { | ||
181 | /* | ||
182 | * Find virtual CPUs within the current VPE who have | ||
183 | * unserviced timer requests whose time is now past. | ||
184 | */ | ||
185 | local_irq_save(flags); | ||
186 | mtflags = dmt(); | ||
187 | if (cpu_data[cpu].vpe_id == vpe && | ||
188 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
189 | reference = (unsigned long)read_c0_count(); | ||
190 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
191 | > (unsigned long)LONG_MAX) { | ||
192 | smtc_nexttime[vpe][cpu] = 0L; | ||
193 | emt(mtflags); | ||
194 | local_irq_restore(flags); | ||
195 | /* | ||
196 | * We don't send IPIs to ourself. | ||
197 | */ | ||
198 | if (cpu != smp_processor_id()) { | ||
199 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
200 | } else { | ||
201 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
202 | cd->event_handler(cd); | ||
203 | } | ||
204 | } else { | ||
205 | /* Local to VPE but Valid Time not yet reached. */ | ||
206 | if (!ISVALID(nextstamp) || | ||
207 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
208 | reference)) { | ||
209 | smtc_nextinvpe[vpe] = cpu; | ||
210 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
211 | } | ||
212 | emt(mtflags); | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
215 | } else { | ||
216 | emt(mtflags); | ||
217 | local_irq_restore(flags); | ||
218 | |||
219 | } | ||
220 | } | ||
221 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
222 | if (ISVALID(nextstamp)) { | ||
223 | write_c0_compare(nextstamp); | ||
224 | ehb(); | ||
225 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
226 | > (unsigned long)LONG_MAX) | ||
227 | goto repeat; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | |||
232 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
233 | { | ||
234 | int cpu = smp_processor_id(); | ||
235 | |||
236 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
237 | handle_perf_irq(1); | ||
238 | |||
239 | if (read_c0_cause() & (1 << 30)) { | ||
240 | /* Clear Count/Compare Interrupt */ | ||
241 | write_c0_compare(read_c0_compare()); | ||
242 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
243 | } | ||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | |||
248 | int __cpuinit mips_clockevent_init(void) | ||
249 | { | ||
250 | uint64_t mips_freq = mips_hpt_frequency; | ||
251 | unsigned int cpu = smp_processor_id(); | ||
252 | struct clock_event_device *cd; | ||
253 | unsigned int irq; | ||
254 | int i; | ||
255 | int j; | ||
256 | |||
257 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
258 | return -ENXIO; | ||
259 | if (cpu == 0) { | ||
260 | for (i = 0; i < num_possible_cpus(); i++) { | ||
261 | smtc_nextinvpe[i] = 0; | ||
262 | for (j = 0; j < num_possible_cpus(); j++) | ||
263 | smtc_nexttime[i][j] = 0L; | ||
264 | } | ||
265 | /* | ||
266 | * SMTC also can't have the usablility test | ||
267 | * run by secondary TCs once Compare is in use. | ||
268 | */ | ||
269 | if (!c0_compare_int_usable()) | ||
270 | return -ENXIO; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * With vectored interrupts things are getting platform specific. | ||
275 | * get_c0_compare_int is a hook to allow a platform to return the | ||
276 | * interrupt number of it's liking. | ||
277 | */ | ||
278 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
279 | if (get_c0_compare_int) | ||
280 | irq = get_c0_compare_int(); | ||
281 | |||
282 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
283 | |||
284 | cd->name = "MIPS"; | ||
285 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
286 | |||
287 | /* Calculate the min / max delta */ | ||
288 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
289 | cd->shift = 32; | ||
290 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
291 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
292 | |||
293 | cd->rating = 300; | ||
294 | cd->irq = irq; | ||
295 | cd->cpumask = cpumask_of_cpu(cpu); | ||
296 | cd->set_next_event = mips_next_event; | ||
297 | cd->set_mode = mips_set_clock_mode; | ||
298 | cd->event_handler = mips_event_handler; | ||
299 | |||
300 | clockevents_register_device(cd); | ||
301 | |||
302 | /* | ||
303 | * On SMTC we only want to do the data structure | ||
304 | * initialization and IRQ setup once. | ||
305 | */ | ||
306 | if (cpu) | ||
307 | return 0; | ||
308 | /* | ||
309 | * And we need the hwmask associated with the c0_compare | ||
310 | * vector to be initialized. | ||
311 | */ | ||
312 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
313 | if (cp0_timer_irq_installed) | ||
314 | return 0; | ||
315 | |||
316 | cp0_timer_irq_installed = 1; | ||
317 | |||
318 | setup_irq(irq, &c0_compare_irqaction); | ||
319 | |||
320 | return 0; | ||
321 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 11c92dc53791..e621fda8ab37 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -54,14 +54,18 @@ extern void r4k_wait(void); | |||
54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | 54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes |
55 | * using this version a gamble. | 55 | * using this version a gamble. |
56 | */ | 56 | */ |
57 | static void r4k_wait_irqoff(void) | 57 | void r4k_wait_irqoff(void) |
58 | { | 58 | { |
59 | local_irq_disable(); | 59 | local_irq_disable(); |
60 | if (!need_resched()) | 60 | if (!need_resched()) |
61 | __asm__(" .set mips3 \n" | 61 | __asm__(" .set push \n" |
62 | " .set mips3 \n" | ||
62 | " wait \n" | 63 | " wait \n" |
63 | " .set mips0 \n"); | 64 | " .set pop \n"); |
64 | local_irq_enable(); | 65 | local_irq_enable(); |
66 | __asm__(" .globl __pastwait \n" | ||
67 | "__pastwait: \n"); | ||
68 | return; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | /* | 71 | /* |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939d..ffa331029e08 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit) | |||
79 | 79 | ||
80 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
81 | #ifdef CONFIG_MIPS_MT_SMTC | 81 | #ifdef CONFIG_MIPS_MT_SMTC |
82 | /* Detect and execute deferred IPI "interrupts" */ | ||
83 | LONG_L s0, TI_REGS($28) | ||
84 | LONG_S sp, TI_REGS($28) | ||
85 | jal deferred_smtc_ipi | ||
86 | LONG_S s0, TI_REGS($28) | ||
87 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 82 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
88 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 83 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
89 | mfc0 v0, CP0_TCSTATUS | 84 | mfc0 v0, CP0_TCSTATUS |
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame | |||
112 | xor t0, t0, t3 | 107 | xor t0, t0, t3 |
113 | mtc0 t0, CP0_TCCONTEXT | 108 | mtc0 t0, CP0_TCCONTEXT |
114 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 109 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
110 | /* Detect and execute deferred IPI "interrupts" */ | ||
111 | LONG_L s0, TI_REGS($28) | ||
112 | LONG_S sp, TI_REGS($28) | ||
113 | jal deferred_smtc_ipi | ||
114 | LONG_S s0, TI_REGS($28) | ||
115 | #endif /* CONFIG_MIPS_MT_SMTC */ | 115 | #endif /* CONFIG_MIPS_MT_SMTC */ |
116 | .set noat | 116 | .set noat |
117 | RESTORE_TEMP | 117 | RESTORE_TEMP |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index f886dd7f708e..01dcbe38fa01 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
282 | and t0, a0, t1 | 282 | and t0, a0, t1 |
283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
284 | mfc0 t2, CP0_TCCONTEXT | 284 | mfc0 t2, CP0_TCCONTEXT |
285 | or t0, t0, t2 | 285 | or t2, t0, t2 |
286 | mtc0 t0, CP0_TCCONTEXT | 286 | mtc0 t2, CP0_TCCONTEXT |
287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
288 | xor t1, t1, t0 | 288 | xor t1, t1, t0 |
289 | mtc0 t1, CP0_STATUS | 289 | mtc0 t1, CP0_STATUS |
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index 8f6d58ede33c..6e152c80cd4a 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c | |||
@@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
236 | 236 | ||
237 | atomic_set(&kgdb_cpu_doing_single_step, -1); | 237 | atomic_set(&kgdb_cpu_doing_single_step, -1); |
238 | if (remcom_in_buffer[0] == 's') | 238 | if (remcom_in_buffer[0] == 's') |
239 | if (kgdb_contthread) | 239 | atomic_set(&kgdb_cpu_doing_single_step, cpu); |
240 | atomic_set(&kgdb_cpu_doing_single_step, cpu); | ||
241 | 240 | ||
242 | return 0; | 241 | return 0; |
243 | } | 242 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740c..dc9eb72ed9de 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); | |||
159 | /* | 159 | /* |
160 | * FPU Use Factor empirically derived from experiments on 34K | 160 | * FPU Use Factor empirically derived from experiments on 34K |
161 | */ | 161 | */ |
162 | #define FPUSEFACTOR 333 | 162 | #define FPUSEFACTOR 2000 |
163 | 163 | ||
164 | static __init int mt_fp_affinity_init(void) | 164 | static __init int mt_fp_affinity_init(void) |
165 | { | 165 | { |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ce7684335a41..22fc19bbe87f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) | |||
55 | while (1) { | 55 | while (1) { |
56 | tick_nohz_stop_sched_tick(1); | 56 | tick_nohz_stop_sched_tick(1); |
57 | while (!need_resched()) { | 57 | while (!need_resched()) { |
58 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | 58 | #ifdef CONFIG_MIPS_MT_SMTC |
59 | extern void smtc_idle_loop_hook(void); | 59 | extern void smtc_idle_loop_hook(void); |
60 | 60 | ||
61 | smtc_idle_loop_hook(); | 61 | smtc_idle_loop_hook(); |
@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
145 | */ | 145 | */ |
146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | 146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); |
147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
148 | |||
149 | #ifdef CONFIG_MIPS_MT_SMTC | ||
150 | /* | ||
151 | * SMTC restores TCStatus after Status, and the CU bits | ||
152 | * are aliased there. | ||
153 | */ | ||
154 | childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); | ||
155 | #endif | ||
148 | clear_tsk_thread_flag(p, TIF_USEDFPU); | 156 | clear_tsk_thread_flag(p, TIF_USEDFPU); |
149 | 157 | ||
150 | #ifdef CONFIG_MIPS_MT_FPAFF | 158 | #ifdef CONFIG_MIPS_MT_FPAFF |
151 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | 159 | clear_tsk_thread_flag(p, TIF_FPUBOUND); |
152 | |||
153 | /* | ||
154 | * FPU affinity support is cleaner if we track the | ||
155 | * user-visible CPU affinity from the very beginning. | ||
156 | * The generic cpus_allowed mask will already have | ||
157 | * been copied from the parent before copy_thread | ||
158 | * is invoked. | ||
159 | */ | ||
160 | p->thread.user_cpus_allowed = p->cpus_allowed; | ||
161 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 160 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
162 | 161 | ||
163 | if (clone_flags & CLONE_SETTLS) | 162 | if (clone_flags & CLONE_SETTLS) |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a5..96ffc9c6d194 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
238 | case FPC_EIR: { /* implementation / version register */ | 238 | case FPC_EIR: { /* implementation / version register */ |
239 | unsigned int flags; | 239 | unsigned int flags; |
240 | #ifdef CONFIG_MIPS_MT_SMTC | 240 | #ifdef CONFIG_MIPS_MT_SMTC |
241 | unsigned int irqflags; | 241 | unsigned long irqflags; |
242 | unsigned int mtflags; | 242 | unsigned int mtflags; |
243 | #endif /* CONFIG_MIPS_MT_SMTC */ | 243 | #endif /* CONFIG_MIPS_MT_SMTC */ |
244 | 244 | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1,4 +1,21 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* |
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; either version 2 | ||
5 | * of the License, or (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
17 | * Copyright (C) 2008 Kevin D. Kissell | ||
18 | */ | ||
2 | 19 | ||
3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -21,7 +38,6 @@ | |||
21 | #include <asm/time.h> | 38 | #include <asm/time.h> |
22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> |
24 | #include <asm/smtc_ipi.h> | ||
25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> |
26 | 42 | ||
27 | /* | 43 | /* |
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
58 | 74 | ||
59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
60 | 76 | ||
61 | /* | ||
62 | * Clock interrupt "latch" buffers, per "CPU" | ||
63 | */ | ||
64 | |||
65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
66 | 77 | ||
67 | /* | 78 | /* |
68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; | |||
70 | 81 | ||
71 | #define IPIBUF_PER_CPU 4 | 82 | #define IPIBUF_PER_CPU 4 |
72 | 83 | ||
73 | static struct smtc_ipi_q IPIQ[NR_CPUS]; | 84 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
74 | static struct smtc_ipi_q freeIPIq; | 85 | static struct smtc_ipi_q freeIPIq; |
75 | 86 | ||
76 | 87 | ||
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. |
283 | */ | 294 | */ |
284 | 295 | ||
285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
286 | { | 297 | { |
287 | int i, ntcs; | 298 | int i, ntcs; |
288 | 299 | ||
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); |
328 | write_tc_c0_tccontext(0); | 339 | /* |
340 | * TCContext gets an offset from the base of the IPIQ array | ||
341 | * to be used in low-level code to detect the presence of | ||
342 | * an active IPI queue | ||
343 | */ | ||
344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ |
330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); |
331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ |
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; |
338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; |
355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
339 | } | 357 | } |
340 | 358 | ||
359 | /* | ||
360 | * Tweak to get Count registes in as close a sync as possible. | ||
361 | * Value seems good for 34K-class cores. | ||
362 | */ | ||
363 | |||
364 | #define CP0_SKEW 8 | ||
341 | 365 | ||
342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) |
343 | { | 367 | { |
344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
345 | unsigned long flags; | 369 | unsigned long flags; |
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; |
364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); |
365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; |
366 | atomic_set(&ipi_timer_latch[i], 0); | ||
367 | } | 390 | } |
368 | 391 | ||
369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ |
370 | cpu = 0; | 393 | cpu = 0; |
371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; |
372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; |
396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
373 | cpu++; | 397 | cpu++; |
374 | 398 | ||
375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ |
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); |
485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ |
486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); |
487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
512 | ehb(); | ||
488 | } | 513 | } |
489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ |
490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) | |||
556 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 581 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
557 | { | 582 | { |
558 | extern u32 kernelsp[NR_CPUS]; | 583 | extern u32 kernelsp[NR_CPUS]; |
559 | long flags; | 584 | unsigned long flags; |
560 | int mtflags; | 585 | int mtflags; |
561 | 586 | ||
562 | LOCK_MT_PRA(); | 587 | LOCK_MT_PRA(); |
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
585 | 610 | ||
586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) |
587 | { | 612 | { |
588 | /* | ||
589 | * Start timer on secondary VPEs if necessary. | ||
590 | * plat_timer_setup has already have been invoked by init/main | ||
591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
593 | * to across available VPEs. | ||
594 | */ | ||
595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
599 | } | ||
600 | |||
601 | local_irq_enable(); | 613 | local_irq_enable(); |
602 | } | 614 | } |
603 | 615 | ||
604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) |
605 | { | 617 | { |
618 | int cpu = smp_processor_id(); | ||
619 | |||
620 | /* | ||
621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
622 | * Like per_cpu_trap_init() hack, this assumes that | ||
623 | * SMTC init code assigns TCs consdecutively and | ||
624 | * in ascending order across available VPEs. | ||
625 | */ | ||
626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
628 | |||
606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", |
607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
608 | } | 631 | } |
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
753 | { | 776 | { |
754 | int tcstatus; | 777 | int tcstatus; |
755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; |
756 | long flags; | 779 | unsigned long flags; |
757 | int mtflags; | 780 | int mtflags; |
781 | unsigned long tcrestart; | ||
782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
758 | 783 | ||
759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { |
760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); |
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; |
772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; |
773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
774 | if (type == SMTC_CLOCK_TICK) | ||
775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); |
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
800 | 823 | ||
801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
802 | /* | 825 | /* |
803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait |
804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and |
828 | * do a direct post of the IPI. | ||
829 | */ | ||
830 | if (cpu_wait == r4k_wait_irqoff) { | ||
831 | tcrestart = read_tc_c0_tcrestart(); | ||
832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
833 | && tcrestart < (unsigned long)__pastwait) { | ||
834 | write_tc_c0_tcrestart(__pastwait); | ||
835 | tcstatus &= ~TCSTATUS_IXMT; | ||
836 | write_tc_c0_tcstatus(tcstatus); | ||
837 | goto postdirect; | ||
838 | } | ||
839 | } | ||
840 | /* | ||
841 | * Otherwise we queue the message for the target TC | ||
842 | * to pick up when he does a local_irq_restore() | ||
805 | */ | 843 | */ |
806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); |
807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); |
808 | /* Try to reduce redundant timer interrupt messages */ | ||
809 | if (type == SMTC_CLOCK_TICK) { | ||
810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
812 | return; | ||
813 | } | ||
814 | } | ||
815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
816 | } else { | 847 | } else { |
817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: |
818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); |
820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); |
821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); |
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); |
884 | } | 914 | } |
885 | 915 | ||
886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
887 | 917 | ||
888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) |
889 | { | 919 | { |
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; |
892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; |
893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; |
894 | int ticks; | ||
895 | |||
896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); |
897 | switch (type_copy) { | 925 | switch (type_copy) { |
898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: |
899 | irq_enter(); | 927 | irq_enter(); |
900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; |
901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); |
902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); |
903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
904 | while (ticks) { | ||
905 | cd->event_handler(cd); | ||
906 | ticks--; | ||
907 | } | ||
908 | irq_exit(); | 931 | irq_exit(); |
909 | break; | 932 | break; |
910 | 933 | ||
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
937 | } | 960 | } |
938 | } | 961 | } |
939 | 962 | ||
963 | /* | ||
964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
965 | * so it reuses the current exception frame rather than set up a | ||
966 | * new one with self_ipi. | ||
967 | */ | ||
968 | |||
940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) |
941 | { | 970 | { |
942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); |
943 | unsigned long flags; | ||
944 | /* DEBUG */ | ||
945 | int q = smp_processor_id(); | ||
946 | 972 | ||
947 | /* | 973 | /* |
948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, |
949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. |
976 | * If irq_disabled when this was called, then any IPIs queued | ||
977 | * after we test last will be taken on the next irq_enable/restore. | ||
978 | * If interrupts were enabled, then any IPIs added after the | ||
979 | * last test will be taken directly. | ||
950 | */ | 980 | */ |
951 | if (IPIQ[q].head != NULL) { | 981 | |
952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { |
953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; |
985 | unsigned long flags; | ||
986 | |||
987 | /* | ||
988 | * It may be possible we'll come in with interrupts | ||
989 | * already enabled. | ||
990 | */ | ||
991 | local_irq_save(flags); | ||
992 | |||
993 | spin_lock(&q->lock); | ||
994 | pipi = __smtc_ipi_dq(q); | ||
995 | spin_unlock(&q->lock); | ||
996 | if (pipi != NULL) | ||
955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); |
956 | local_irq_restore(flags); | 998 | /* |
957 | } | 999 | * The use of the __raw_local restore isn't |
1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
1001 | * but it's more efficient, given that we're already | ||
1002 | * running down the IPI queue. | ||
1003 | */ | ||
1004 | __raw_local_irq_restore(flags); | ||
958 | } | 1005 | } |
959 | } | 1006 | } |
960 | 1007 | ||
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
975 | struct smtc_ipi *pipi; | 1022 | struct smtc_ipi *pipi; |
976 | unsigned long tcstatus; | 1023 | unsigned long tcstatus; |
977 | int sent; | 1024 | int sent; |
978 | long flags; | 1025 | unsigned long flags; |
979 | unsigned int mtflags; | 1026 | unsigned int mtflags; |
980 | unsigned int vpflags; | 1027 | unsigned int vpflags; |
981 | 1028 | ||
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1066 | 1113 | ||
1067 | /* | 1114 | /* |
1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
1069 | * | ||
1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
1073 | * result in a recursive call to raw_local_irq_restore(). | ||
1074 | */ | 1116 | */ |
1075 | 1117 | ||
1076 | static void __smtc_ipi_replay(void) | 1118 | /* |
1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
1120 | */ | ||
1121 | |||
1122 | void smtc_ipi_replay(void) | ||
1077 | { | 1123 | { |
1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); |
1079 | 1125 | ||
1080 | /* | 1126 | /* |
1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, |
1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. |
1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1084 | * exact number - but we'll also disable interrupts | ||
1085 | * and create a window of failure where a new IPI gets | ||
1086 | * queued after we test the depth but before we re-enable | ||
1087 | * interrupts. So long as IXMT never gets set, however, | ||
1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch |
1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent |
1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT |
1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt |
1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing |
1134 | * is to do the last check for queued message *after* the | ||
1135 | * re-enabling of interrupts. | ||
1093 | */ | 1136 | */ |
1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { |
1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; |
1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; |
1098 | extern void self_ipi(struct smtc_ipi *); | 1141 | |
1099 | 1142 | /* | |
1100 | spin_lock(&q->lock); | 1143 | * It's just possible we'll come in with interrupts |
1101 | pipi = __smtc_ipi_dq(q); | 1144 | * already enabled. |
1102 | spin_unlock(&q->lock); | 1145 | */ |
1103 | if (!pipi) | 1146 | local_irq_save(flags); |
1104 | break; | 1147 | |
1148 | spin_lock(&q->lock); | ||
1149 | pipi = __smtc_ipi_dq(q); | ||
1150 | spin_unlock(&q->lock); | ||
1151 | /* | ||
1152 | ** But use a raw restore here to avoid recursion. | ||
1153 | */ | ||
1154 | __raw_local_irq_restore(flags); | ||
1105 | 1155 | ||
1156 | if (pipi) { | ||
1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); |
1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; |
1108 | } | 1159 | } |
1109 | } | 1160 | } |
1110 | } | 1161 | } |
1111 | 1162 | ||
1112 | void smtc_ipi_replay(void) | ||
1113 | { | ||
1114 | raw_local_irq_disable(); | ||
1115 | __smtc_ipi_replay(); | ||
1116 | } | ||
1117 | |||
1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); |
1119 | 1164 | ||
1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) |
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
1193 | } | 1238 | } |
1194 | } | 1239 | } |
1195 | 1240 | ||
1196 | /* | ||
1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1198 | */ | ||
1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1201 | if ((tc != smp_processor_id()) && | ||
1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
1203 | if (clock_hang_reported[tc] == 0) { | ||
1204 | pdb_msg += sprintf(pdb_msg, | ||
1205 | "TC %d looks hung with timer latch at %d\n", | ||
1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
1207 | clock_hang_reported[tc]++; | ||
1208 | } | ||
1209 | } | ||
1210 | } | ||
1211 | emt(mtflags); | 1241 | emt(mtflags); |
1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); |
1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) |
1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
1216 | 1246 | ||
1217 | /* | 1247 | smtc_ipi_replay(); |
1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
1219 | * is in use, there should never be any. | ||
1220 | */ | ||
1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
1222 | { | ||
1223 | unsigned long flags; | ||
1224 | |||
1225 | local_irq_save(flags); | ||
1226 | __smtc_ipi_replay(); | ||
1227 | local_irq_restore(flags); | ||
1228 | } | ||
1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
1230 | } | 1248 | } |
1231 | 1249 | ||
1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) |
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
1243 | } | 1261 | } |
1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); |
1245 | printk("Timer IPI Backlogs:\n"); | ||
1246 | for (i=0; i < NR_CPUS; i++) { | ||
1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
1248 | } | ||
1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", |
1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); |
1251 | } | 1265 | } |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5fd0cd020af5..b602ac6eb47d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void) | |||
825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { |
826 | cpumask_t tmask; | 826 | cpumask_t tmask; |
827 | 827 | ||
828 | cpus_and(tmask, current->thread.user_cpus_allowed, | 828 | current->thread.user_cpus_allowed |
829 | mt_fpu_cpumask); | 829 | = current->cpus_allowed; |
830 | cpus_and(tmask, current->cpus_allowed, | ||
831 | mt_fpu_cpumask); | ||
830 | set_cpus_allowed(current, tmask); | 832 | set_cpus_allowed(current, tmask); |
831 | set_thread_flag(TIF_FPUBOUND); | 833 | set_thread_flag(TIF_FPUBOUND); |
832 | } | 834 | } |
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 3b7dd722c32a..cef2db8d2225 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile | |||
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o | |||
15 | obj-$(CONFIG_PCI) += malta-pci.o | 15 | obj-$(CONFIG_PCI) += malta-pci.o |
16 | 16 | ||
17 | # FIXME FIXME FIXME | 17 | # FIXME FIXME FIXME |
18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o | 18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o |
19 | 19 | ||
20 | EXTRA_CFLAGS += -Werror | 20 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e49454..f84a46a8ae6e 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) | |||
84 | 84 | ||
85 | static void __init msmtc_smp_setup(void) | 85 | static void __init msmtc_smp_setup(void) |
86 | { | 86 | { |
87 | mipsmt_build_cpu_map(0); | 87 | /* |
88 | * we won't get the definitive value until | ||
89 | * we've run smtc_prepare_cpus later, but | ||
90 | * we would appear to need an upper bound now. | ||
91 | */ | ||
92 | smp_num_siblings = smtc_build_cpu_map(0); | ||
88 | } | 93 | } |
89 | 94 | ||
90 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | 95 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) |
91 | { | 96 | { |
92 | mipsmt_prepare_cpus(); | 97 | smtc_prepare_cpus(max_cpus); |
93 | } | 98 | } |
94 | 99 | ||
95 | struct plat_smp_ops msmtc_smp_ops = { | 100 | struct plat_smp_ops msmtc_smp_ops = { |
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 15e01aec37fd..c8c32f417b6c 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o | |||
15 | obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o | 15 | obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o |
16 | obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o | 16 | obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o |
17 | obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o | 17 | obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o |
18 | obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o | ||
18 | 19 | ||
19 | # | 20 | # |
20 | # These are still pretty much in the old state, watch, go blind. | 21 | # These are still pretty much in the old state, watch, go blind. |
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c new file mode 100644 index 000000000000..bea9b6cdfdbf --- /dev/null +++ b/arch/mips/pci/pci-bcm47xx.c | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the | ||
6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
7 | * option) any later version. | ||
8 | * | ||
9 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
10 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
11 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
12 | * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
13 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
14 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
15 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
16 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
17 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
18 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along | ||
21 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
22 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | */ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/ssb/ssb.h> | ||
28 | |||
29 | int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
30 | { | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
35 | { | ||
36 | int res; | ||
37 | u8 slot, pin; | ||
38 | |||
39 | res = ssb_pcibios_plat_dev_init(dev); | ||
40 | if (res < 0) { | ||
41 | printk(KERN_ALERT "PCI: Failed to init device %s\n", | ||
42 | pci_name(dev)); | ||
43 | return res; | ||
44 | } | ||
45 | |||
46 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | ||
47 | slot = PCI_SLOT(dev->devfn); | ||
48 | res = ssb_pcibios_map_irq(dev, slot, pin); | ||
49 | |||
50 | /* IRQ-0 and IRQ-1 are software interrupts. */ | ||
51 | if (res < 2) { | ||
52 | printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n", | ||
53 | pci_name(dev)); | ||
54 | return res; | ||
55 | } | ||
56 | |||
57 | dev->irq = res; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c index bd78368c82bf..f97ab1461012 100644 --- a/arch/mips/pci/pci-ip27.c +++ b/arch/mips/pci/pci-ip27.c | |||
@@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) | |||
143 | */ | 143 | */ |
144 | int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 144 | int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
145 | { | 145 | { |
146 | return 0; | ||
147 | } | ||
148 | |||
149 | /* Most MIPS systems have straight-forward swizzling needs. */ | ||
150 | static inline u8 bridge_swizzle(u8 pin, u8 slot) | ||
151 | { | ||
152 | return (((pin - 1) + slot) % 4) + 1; | ||
153 | } | ||
154 | |||
155 | static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev) | ||
156 | { | ||
157 | while (dev->bus->parent) { | ||
158 | /* Move up the chain of bridges. */ | ||
159 | dev = dev->bus->self; | ||
160 | } | ||
161 | |||
162 | return dev; | ||
163 | } | ||
164 | |||
165 | /* Do platform specific device initialization at pci_enable_device() time */ | ||
166 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
167 | { | ||
146 | struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); | 168 | struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); |
147 | int irq = bc->pci_int[slot]; | 169 | struct pci_dev *rdev = bridge_root_dev(dev); |
170 | int slot = PCI_SLOT(rdev->devfn); | ||
171 | int irq; | ||
148 | 172 | ||
173 | irq = bc->pci_int[slot]; | ||
149 | if (irq == -1) { | 174 | if (irq == -1) { |
150 | irq = bc->pci_int[slot] = request_bridge_irq(bc); | 175 | irq = request_bridge_irq(bc); |
151 | if (irq < 0) | 176 | if (irq < 0) |
152 | panic("Can't allocate interrupt for PCI device %s\n", | 177 | return irq; |
153 | pci_name(dev)); | 178 | |
179 | bc->pci_int[slot] = irq; | ||
154 | } | 180 | } |
155 | 181 | ||
156 | irq_to_bridge[irq] = bc; | 182 | irq_to_bridge[irq] = bc; |
157 | irq_to_slot[irq] = slot; | 183 | irq_to_slot[irq] = slot; |
158 | 184 | ||
159 | return irq; | 185 | dev->irq = irq; |
160 | } | ||
161 | 186 | ||
162 | /* Do platform specific device initialization at pci_enable_device() time */ | ||
163 | int pcibios_plat_dev_init(struct pci_dev *dev) | ||
164 | { | ||
165 | return 0; | 187 | return 0; |
166 | } | 188 | } |
167 | 189 | ||
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 761c434a2488..56c64ccc9c21 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -20,22 +20,8 @@ EXPORT_SYMBOL(__mn10300_irq_enabled_epsw); | |||
20 | atomic_t irq_err_count; | 20 | atomic_t irq_err_count; |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * MN10300 INTC controller operations | 23 | * MN10300 interrupt controller operations |
24 | */ | 24 | */ |
25 | static void mn10300_cpupic_disable(unsigned int irq) | ||
26 | { | ||
27 | u16 tmp = GxICR(irq); | ||
28 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; | ||
29 | tmp = GxICR(irq); | ||
30 | } | ||
31 | |||
32 | static void mn10300_cpupic_enable(unsigned int irq) | ||
33 | { | ||
34 | u16 tmp = GxICR(irq); | ||
35 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | ||
36 | tmp = GxICR(irq); | ||
37 | } | ||
38 | |||
39 | static void mn10300_cpupic_ack(unsigned int irq) | 25 | static void mn10300_cpupic_ack(unsigned int irq) |
40 | { | 26 | { |
41 | u16 tmp; | 27 | u16 tmp; |
@@ -60,26 +46,54 @@ static void mn10300_cpupic_mask_ack(unsigned int irq) | |||
60 | static void mn10300_cpupic_unmask(unsigned int irq) | 46 | static void mn10300_cpupic_unmask(unsigned int irq) |
61 | { | 47 | { |
62 | u16 tmp = GxICR(irq); | 48 | u16 tmp = GxICR(irq); |
63 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; | 49 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; |
64 | tmp = GxICR(irq); | 50 | tmp = GxICR(irq); |
65 | } | 51 | } |
66 | 52 | ||
67 | static void mn10300_cpupic_end(unsigned int irq) | 53 | static void mn10300_cpupic_unmask_clear(unsigned int irq) |
68 | { | 54 | { |
55 | /* the MN10300 PIC latches its interrupt request bit, even after the | ||
56 | * device has ceased to assert its interrupt line and the interrupt | ||
57 | * channel has been disabled in the PIC, so for level-triggered | ||
58 | * interrupts we need to clear the request bit when we re-enable */ | ||
69 | u16 tmp = GxICR(irq); | 59 | u16 tmp = GxICR(irq); |
70 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE; | 60 | GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; |
71 | tmp = GxICR(irq); | 61 | tmp = GxICR(irq); |
72 | } | 62 | } |
73 | 63 | ||
74 | static struct irq_chip mn10300_cpu_pic = { | 64 | /* |
75 | .name = "cpu", | 65 | * MN10300 PIC level-triggered IRQ handling. |
76 | .disable = mn10300_cpupic_disable, | 66 | * |
77 | .enable = mn10300_cpupic_enable, | 67 | * The PIC has no 'ACK' function per se. It is possible to clear individual |
68 | * channel latches, but each latch relatches whether or not the channel is | ||
69 | * masked, so we need to clear the latch when we unmask the channel. | ||
70 | * | ||
71 | * Also for this reason, we don't supply an ack() op (it's unused anyway if | ||
72 | * mask_ack() is provided), and mask_ack() just masks. | ||
73 | */ | ||
74 | static struct irq_chip mn10300_cpu_pic_level = { | ||
75 | .name = "cpu_l", | ||
76 | .disable = mn10300_cpupic_mask, | ||
77 | .enable = mn10300_cpupic_unmask_clear, | ||
78 | .ack = NULL, | ||
79 | .mask = mn10300_cpupic_mask, | ||
80 | .mask_ack = mn10300_cpupic_mask, | ||
81 | .unmask = mn10300_cpupic_unmask_clear, | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * MN10300 PIC edge-triggered IRQ handling. | ||
86 | * | ||
87 | * We use the latch clearing function of the PIC as the 'ACK' function. | ||
88 | */ | ||
89 | static struct irq_chip mn10300_cpu_pic_edge = { | ||
90 | .name = "cpu_e", | ||
91 | .disable = mn10300_cpupic_mask, | ||
92 | .enable = mn10300_cpupic_unmask, | ||
78 | .ack = mn10300_cpupic_ack, | 93 | .ack = mn10300_cpupic_ack, |
79 | .mask = mn10300_cpupic_mask, | 94 | .mask = mn10300_cpupic_mask, |
80 | .mask_ack = mn10300_cpupic_mask_ack, | 95 | .mask_ack = mn10300_cpupic_mask_ack, |
81 | .unmask = mn10300_cpupic_unmask, | 96 | .unmask = mn10300_cpupic_unmask, |
82 | .end = mn10300_cpupic_end, | ||
83 | }; | 97 | }; |
84 | 98 | ||
85 | /* | 99 | /* |
@@ -114,7 +128,8 @@ void set_intr_level(int irq, u16 level) | |||
114 | */ | 128 | */ |
115 | void set_intr_postackable(int irq) | 129 | void set_intr_postackable(int irq) |
116 | { | 130 | { |
117 | set_irq_handler(irq, handle_level_irq); | 131 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level, |
132 | handle_level_irq); | ||
118 | } | 133 | } |
119 | 134 | ||
120 | /* | 135 | /* |
@@ -126,8 +141,12 @@ void __init init_IRQ(void) | |||
126 | 141 | ||
127 | for (irq = 0; irq < NR_IRQS; irq++) | 142 | for (irq = 0; irq < NR_IRQS; irq++) |
128 | if (irq_desc[irq].chip == &no_irq_type) | 143 | if (irq_desc[irq].chip == &no_irq_type) |
129 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic, | 144 | /* due to the PIC latching interrupt requests, even |
130 | handle_edge_irq); | 145 | * when the IRQ is disabled, IRQ_PENDING is superfluous |
146 | * and we can use handle_level_irq() for edge-triggered | ||
147 | * interrupts */ | ||
148 | set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge, | ||
149 | handle_level_irq); | ||
131 | unit_init_IRQ(); | 150 | unit_init_IRQ(); |
132 | } | 151 | } |
133 | 152 | ||
diff --git a/arch/mn10300/kernel/time.c b/arch/mn10300/kernel/time.c index babb7c2ac377..e4606586f94c 100644 --- a/arch/mn10300/kernel/time.c +++ b/arch/mn10300/kernel/time.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* MN10300 Low level time management | 1 | /* MN10300 Low level time management |
2 | * | 2 | * |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2007-2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * - Derived from arch/i386/kernel/time.c | 5 | * - Derived from arch/i386/kernel/time.c |
6 | * | 6 | * |
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/profile.h> | 18 | #include <linux/profile.h> |
19 | #include <linux/cnt32_to_63.h> | ||
19 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
20 | #include <asm/div64.h> | 21 | #include <asm/div64.h> |
21 | #include <asm/processor.h> | 22 | #include <asm/processor.h> |
@@ -40,27 +41,54 @@ static struct irqaction timer_irq = { | |||
40 | .name = "timer", | 41 | .name = "timer", |
41 | }; | 42 | }; |
42 | 43 | ||
44 | static unsigned long sched_clock_multiplier; | ||
45 | |||
43 | /* | 46 | /* |
44 | * scheduler clock - returns current time in nanosec units. | 47 | * scheduler clock - returns current time in nanosec units. |
45 | */ | 48 | */ |
46 | unsigned long long sched_clock(void) | 49 | unsigned long long sched_clock(void) |
47 | { | 50 | { |
48 | union { | 51 | union { |
49 | unsigned long long l; | 52 | unsigned long long ll; |
50 | u32 w[2]; | 53 | unsigned l[2]; |
51 | } quot; | 54 | } tsc64, result; |
55 | unsigned long tsc, tmp; | ||
56 | unsigned product[3]; /* 96-bit intermediate value */ | ||
57 | |||
58 | /* read the TSC value | ||
59 | */ | ||
60 | tsc = 0 - get_cycles(); /* get_cycles() counts down */ | ||
52 | 61 | ||
53 | quot.w[0] = mn10300_last_tsc - get_cycles(); | 62 | /* expand to 64-bits. |
54 | quot.w[1] = 1000000000; | 63 | * - sched_clock() must be called once a minute or better or the |
64 | * following will go horribly wrong - see cnt32_to_63() | ||
65 | */ | ||
66 | tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL; | ||
55 | 67 | ||
56 | asm("mulu %2,%3,%0,%1" | 68 | /* scale the 64-bit TSC value to a nanosecond value via a 96-bit |
57 | : "=r"(quot.w[1]), "=r"(quot.w[0]) | 69 | * intermediate |
58 | : "0"(quot.w[1]), "1"(quot.w[0]) | 70 | */ |
71 | asm("mulu %2,%0,%3,%0 \n" /* LSW * mult -> 0:%3:%0 */ | ||
72 | "mulu %2,%1,%2,%1 \n" /* MSW * mult -> %2:%1:0 */ | ||
73 | "add %3,%1 \n" | ||
74 | "addc 0,%2 \n" /* result in %2:%1:%0 */ | ||
75 | : "=r"(product[0]), "=r"(product[1]), "=r"(product[2]), "=r"(tmp) | ||
76 | : "0"(tsc64.l[0]), "1"(tsc64.l[1]), "2"(sched_clock_multiplier) | ||
59 | : "cc"); | 77 | : "cc"); |
60 | 78 | ||
61 | do_div(quot.l, MN10300_TSCCLK); | 79 | result.l[0] = product[1] << 16 | product[0] >> 16; |
80 | result.l[1] = product[2] << 16 | product[1] >> 16; | ||
62 | 81 | ||
63 | return quot.l; | 82 | return result.ll; |
83 | } | ||
84 | |||
85 | /* | ||
86 | * initialise the scheduler clock | ||
87 | */ | ||
88 | static void __init mn10300_sched_clock_init(void) | ||
89 | { | ||
90 | sched_clock_multiplier = | ||
91 | __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK); | ||
64 | } | 92 | } |
65 | 93 | ||
66 | /* | 94 | /* |
@@ -128,4 +156,6 @@ void __init time_init(void) | |||
128 | /* start the watchdog timer */ | 156 | /* start the watchdog timer */ |
129 | watchdog_go(); | 157 | watchdog_go(); |
130 | #endif | 158 | #endif |
159 | |||
160 | mn10300_sched_clock_init(); | ||
131 | } | 161 | } |
diff --git a/arch/mn10300/unit-asb2303/unit-init.c b/arch/mn10300/unit-asb2303/unit-init.c index 14b2c817cff8..70e8cb4ea266 100644 --- a/arch/mn10300/unit-asb2303/unit-init.c +++ b/arch/mn10300/unit-asb2303/unit-init.c | |||
@@ -51,7 +51,7 @@ void __init unit_init_IRQ(void) | |||
51 | switch (GET_XIRQ_TRIGGER(extnum)) { | 51 | switch (GET_XIRQ_TRIGGER(extnum)) { |
52 | case XIRQ_TRIGGER_HILEVEL: | 52 | case XIRQ_TRIGGER_HILEVEL: |
53 | case XIRQ_TRIGGER_LOWLEVEL: | 53 | case XIRQ_TRIGGER_LOWLEVEL: |
54 | set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); | 54 | set_intr_postackable(XIRQ2IRQ(extnum)); |
55 | break; | 55 | break; |
56 | default: | 56 | default: |
57 | break; | 57 | break; |
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c index 6a352414a358..72812a9439ac 100644 --- a/arch/mn10300/unit-asb2305/unit-init.c +++ b/arch/mn10300/unit-asb2305/unit-init.c | |||
@@ -52,7 +52,7 @@ void __init unit_init_IRQ(void) | |||
52 | switch (GET_XIRQ_TRIGGER(extnum)) { | 52 | switch (GET_XIRQ_TRIGGER(extnum)) { |
53 | case XIRQ_TRIGGER_HILEVEL: | 53 | case XIRQ_TRIGGER_HILEVEL: |
54 | case XIRQ_TRIGGER_LOWLEVEL: | 54 | case XIRQ_TRIGGER_LOWLEVEL: |
55 | set_irq_handler(XIRQ2IRQ(extnum), handle_level_irq); | 55 | set_intr_postackable(XIRQ2IRQ(extnum)); |
56 | break; | 56 | break; |
57 | default: | 57 | default: |
58 | break; | 58 | break; |
diff --git a/arch/powerpc/boot/dts/holly.dts b/arch/powerpc/boot/dts/holly.dts index f87fe7b9ced9..c6e11ebecebb 100644 --- a/arch/powerpc/boot/dts/holly.dts +++ b/arch/powerpc/boot/dts/holly.dts | |||
@@ -133,61 +133,61 @@ | |||
133 | reg = <0x00007400 0x00000400>; | 133 | reg = <0x00007400 0x00000400>; |
134 | big-endian; | 134 | big-endian; |
135 | }; | 135 | }; |
136 | }; | ||
136 | 137 | ||
137 | pci@1000 { | 138 | pci@c0001000 { |
138 | device_type = "pci"; | 139 | device_type = "pci"; |
139 | compatible = "tsi109-pci", "tsi108-pci"; | 140 | compatible = "tsi109-pci", "tsi108-pci"; |
140 | #interrupt-cells = <1>; | 141 | #interrupt-cells = <1>; |
141 | #size-cells = <2>; | 142 | #size-cells = <2>; |
142 | #address-cells = <3>; | 143 | #address-cells = <3>; |
143 | reg = <0x00001000 0x00001000>; | 144 | reg = <0xc0001000 0x00001000>; |
144 | bus-range = <0x0 0x0>; | 145 | bus-range = <0x0 0x0>; |
145 | /*----------------------------------------------------+ | 146 | /*----------------------------------------------------+ |
146 | | PCI memory range. | 147 | | PCI memory range. |
147 | | 01 denotes I/O space | 148 | | 01 denotes I/O space |
148 | | 02 denotes 32-bit memory space | 149 | | 02 denotes 32-bit memory space |
149 | +----------------------------------------------------*/ | 150 | +----------------------------------------------------*/ |
150 | ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 | 151 | ranges = <0x02000000 0x00000000 0x40000000 0x40000000 0x00000000 0x10000000 |
151 | 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; | 152 | 0x01000000 0x00000000 0x00000000 0x7e000000 0x00000000 0x00010000>; |
152 | clock-frequency = <133333332>; | 153 | clock-frequency = <133333332>; |
153 | interrupt-parent = <&MPIC>; | 154 | interrupt-parent = <&MPIC>; |
155 | interrupts = <0x17 0x2>; | ||
156 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | ||
157 | /*----------------------------------------------------+ | ||
158 | | The INTA, INTB, INTC, INTD are shared. | ||
159 | +----------------------------------------------------*/ | ||
160 | interrupt-map = < | ||
161 | 0x800 0x0 0x0 0x1 &RT0 0x24 0x0 | ||
162 | 0x800 0x0 0x0 0x2 &RT0 0x25 0x0 | ||
163 | 0x800 0x0 0x0 0x3 &RT0 0x26 0x0 | ||
164 | 0x800 0x0 0x0 0x4 &RT0 0x27 0x0 | ||
165 | |||
166 | 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0 | ||
167 | 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0 | ||
168 | 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0 | ||
169 | 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0 | ||
170 | |||
171 | 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0 | ||
172 | 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0 | ||
173 | 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0 | ||
174 | 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0 | ||
175 | |||
176 | 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0 | ||
177 | 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0 | ||
178 | 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0 | ||
179 | 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0 | ||
180 | >; | ||
181 | |||
182 | RT0: router@1180 { | ||
183 | device_type = "pic-router"; | ||
184 | interrupt-controller; | ||
185 | big-endian; | ||
186 | clock-frequency = <0>; | ||
187 | #address-cells = <0>; | ||
188 | #interrupt-cells = <2>; | ||
154 | interrupts = <0x17 0x2>; | 189 | interrupts = <0x17 0x2>; |
155 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | 190 | interrupt-parent = <&MPIC>; |
156 | /*----------------------------------------------------+ | ||
157 | | The INTA, INTB, INTC, INTD are shared. | ||
158 | +----------------------------------------------------*/ | ||
159 | interrupt-map = < | ||
160 | 0x800 0x0 0x0 0x1 &RT0 0x24 0x0 | ||
161 | 0x800 0x0 0x0 0x2 &RT0 0x25 0x0 | ||
162 | 0x800 0x0 0x0 0x3 &RT0 0x26 0x0 | ||
163 | 0x800 0x0 0x0 0x4 &RT0 0x27 0x0 | ||
164 | |||
165 | 0x1000 0x0 0x0 0x1 &RT0 0x25 0x0 | ||
166 | 0x1000 0x0 0x0 0x2 &RT0 0x26 0x0 | ||
167 | 0x1000 0x0 0x0 0x3 &RT0 0x27 0x0 | ||
168 | 0x1000 0x0 0x0 0x4 &RT0 0x24 0x0 | ||
169 | |||
170 | 0x1800 0x0 0x0 0x1 &RT0 0x26 0x0 | ||
171 | 0x1800 0x0 0x0 0x2 &RT0 0x27 0x0 | ||
172 | 0x1800 0x0 0x0 0x3 &RT0 0x24 0x0 | ||
173 | 0x1800 0x0 0x0 0x4 &RT0 0x25 0x0 | ||
174 | |||
175 | 0x2000 0x0 0x0 0x1 &RT0 0x27 0x0 | ||
176 | 0x2000 0x0 0x0 0x2 &RT0 0x24 0x0 | ||
177 | 0x2000 0x0 0x0 0x3 &RT0 0x25 0x0 | ||
178 | 0x2000 0x0 0x0 0x4 &RT0 0x26 0x0 | ||
179 | >; | ||
180 | |||
181 | RT0: router@1180 { | ||
182 | device_type = "pic-router"; | ||
183 | interrupt-controller; | ||
184 | big-endian; | ||
185 | clock-frequency = <0>; | ||
186 | #address-cells = <0>; | ||
187 | #interrupt-cells = <2>; | ||
188 | interrupts = <0x17 0x2>; | ||
189 | interrupt-parent = <&MPIC>; | ||
190 | }; | ||
191 | }; | 191 | }; |
192 | }; | 192 | }; |
193 | 193 | ||
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index d308a9f70f1b..31982d05d81a 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -34,11 +34,7 @@ | |||
34 | #include <asm/smp.h> | 34 | #include <asm/smp.h> |
35 | 35 | ||
36 | #ifdef CONFIG_HOTPLUG_CPU | 36 | #ifdef CONFIG_HOTPLUG_CPU |
37 | /* this is used for software suspend, and that shuts down | 37 | #define cpu_should_die() cpu_is_offline(smp_processor_id()) |
38 | * CPUs even while the system is still booting... */ | ||
39 | #define cpu_should_die() (cpu_is_offline(smp_processor_id()) && \ | ||
40 | (system_state == SYSTEM_RUNNING \ | ||
41 | || system_state == SYSTEM_BOOTING)) | ||
42 | #else | 38 | #else |
43 | #define cpu_should_die() 0 | 39 | #define cpu_should_die() 0 |
44 | #endif | 40 | #endif |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index b4fdf2f2743c..fe8f71dd0b3f 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -347,9 +347,8 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
347 | linux_regs->msr |= MSR_SE; | 347 | linux_regs->msr |= MSR_SE; |
348 | #endif | 348 | #endif |
349 | kgdb_single_step = 1; | 349 | kgdb_single_step = 1; |
350 | if (kgdb_contthread) | 350 | atomic_set(&kgdb_cpu_doing_single_step, |
351 | atomic_set(&kgdb_cpu_doing_single_step, | 351 | raw_smp_processor_id()); |
352 | raw_smp_processor_id()); | ||
353 | } | 352 | } |
354 | return 0; | 353 | return 0; |
355 | } | 354 | } |
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c index ef74a0763ec1..8c619963becc 100644 --- a/arch/powerpc/platforms/fsl_uli1575.c +++ b/arch/powerpc/platforms/fsl_uli1575.c | |||
@@ -219,11 +219,21 @@ static void __devinit quirk_final_uli5249(struct pci_dev *dev) | |||
219 | int i; | 219 | int i; |
220 | u8 *dummy; | 220 | u8 *dummy; |
221 | struct pci_bus *bus = dev->bus; | 221 | struct pci_bus *bus = dev->bus; |
222 | resource_size_t end = 0; | ||
223 | |||
224 | for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCES+3; i++) { | ||
225 | unsigned long flags = pci_resource_flags(dev, i); | ||
226 | if ((flags & (IORESOURCE_MEM|IORESOURCE_PREFETCH)) == IORESOURCE_MEM) | ||
227 | end = pci_resource_end(dev, i); | ||
228 | } | ||
222 | 229 | ||
223 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { | 230 | for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { |
224 | if ((bus->resource[i]) && | 231 | if ((bus->resource[i]) && |
225 | (bus->resource[i]->flags & IORESOURCE_MEM)) { | 232 | (bus->resource[i]->flags & IORESOURCE_MEM)) { |
226 | dummy = ioremap(bus->resource[i]->end - 3, 0x4); | 233 | if (bus->resource[i]->end == end) |
234 | dummy = ioremap(bus->resource[i]->start, 0x4); | ||
235 | else | ||
236 | dummy = ioremap(bus->resource[i]->end - 3, 0x4); | ||
227 | if (dummy) { | 237 | if (dummy) { |
228 | in_8(dummy); | 238 | in_8(dummy); |
229 | iounmap(dummy); | 239 | iounmap(dummy); |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ca114fe46ffb..06acb1a18bbc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -169,6 +169,8 @@ void init_cpu_timer(void) | |||
169 | 169 | ||
170 | static void clock_comparator_interrupt(__u16 code) | 170 | static void clock_comparator_interrupt(__u16 code) |
171 | { | 171 | { |
172 | if (S390_lowcore.clock_comparator == -1ULL) | ||
173 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
172 | } | 174 | } |
173 | 175 | ||
174 | static void etr_timing_alert(struct etr_irq_parm *); | 176 | static void etr_timing_alert(struct etr_irq_parm *); |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index fc6ab6094df8..0953cee05efc 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -1,14 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/lib/delay.c | ||
3 | * Precise Delay Loops for S390 | 2 | * Precise Delay Loops for S390 |
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999,2008 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * | ||
9 | * Derived from "arch/i386/lib/delay.c" | ||
10 | * Copyright (C) 1993 Linus Torvalds | ||
11 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
12 | */ | 7 | */ |
13 | 8 | ||
14 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
@@ -29,30 +24,31 @@ void __delay(unsigned long loops) | |||
29 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); | 24 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); |
30 | } | 25 | } |
31 | 26 | ||
32 | /* | 27 | static void __udelay_disabled(unsigned long usecs) |
33 | * Waits for 'usecs' microseconds using the TOD clock comparator. | ||
34 | */ | ||
35 | void __udelay(unsigned long usecs) | ||
36 | { | 28 | { |
37 | u64 end, time, old_cc = 0; | 29 | unsigned long mask, cr0, cr0_saved; |
38 | unsigned long flags, cr0, mask, dummy; | 30 | u64 clock_saved; |
39 | int irq_context; | ||
40 | 31 | ||
41 | irq_context = in_interrupt(); | 32 | clock_saved = local_tick_disable(); |
42 | if (!irq_context) | 33 | set_clock_comparator(get_clock() + ((u64) usecs << 12)); |
43 | local_bh_disable(); | 34 | __ctl_store(cr0_saved, 0, 0); |
44 | local_irq_save(flags); | 35 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; |
45 | if (raw_irqs_disabled_flags(flags)) { | 36 | __ctl_load(cr0 , 0, 0); |
46 | old_cc = local_tick_disable(); | 37 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; |
47 | S390_lowcore.clock_comparator = -1ULL; | 38 | trace_hardirqs_on(); |
48 | __ctl_store(cr0, 0, 0); | 39 | __load_psw_mask(mask); |
49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 40 | local_irq_disable(); |
50 | __ctl_load(dummy , 0, 0); | 41 | __ctl_load(cr0_saved, 0, 0); |
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | 42 | local_tick_enable(clock_saved); |
52 | } else | 43 | set_clock_comparator(S390_lowcore.clock_comparator); |
53 | mask = psw_kernel_bits | PSW_MASK_WAIT | | 44 | } |
54 | PSW_MASK_EXT | PSW_MASK_IO; | ||
55 | 45 | ||
46 | static void __udelay_enabled(unsigned long usecs) | ||
47 | { | ||
48 | unsigned long mask; | ||
49 | u64 end, time; | ||
50 | |||
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO; | ||
56 | end = get_clock() + ((u64) usecs << 12); | 52 | end = get_clock() + ((u64) usecs << 12); |
57 | do { | 53 | do { |
58 | time = end < S390_lowcore.clock_comparator ? | 54 | time = end < S390_lowcore.clock_comparator ? |
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs) | |||
62 | __load_psw_mask(mask); | 58 | __load_psw_mask(mask); |
63 | local_irq_disable(); | 59 | local_irq_disable(); |
64 | } while (get_clock() < end); | 60 | } while (get_clock() < end); |
61 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
62 | } | ||
65 | 63 | ||
66 | if (raw_irqs_disabled_flags(flags)) { | 64 | /* |
67 | __ctl_load(cr0, 0, 0); | 65 | * Waits for 'usecs' microseconds using the TOD clock comparator. |
68 | local_tick_enable(old_cc); | 66 | */ |
67 | void __udelay(unsigned long usecs) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | preempt_disable(); | ||
72 | local_irq_save(flags); | ||
73 | if (in_irq()) { | ||
74 | __udelay_disabled(usecs); | ||
75 | goto out; | ||
76 | } | ||
77 | if (in_softirq()) { | ||
78 | if (raw_irqs_disabled_flags(flags)) | ||
79 | __udelay_disabled(usecs); | ||
80 | else | ||
81 | __udelay_enabled(usecs); | ||
82 | goto out; | ||
69 | } | 83 | } |
70 | if (!irq_context) | 84 | if (raw_irqs_disabled_flags(flags)) { |
85 | local_bh_disable(); | ||
86 | __udelay_disabled(usecs); | ||
71 | _local_bh_enable(); | 87 | _local_bh_enable(); |
72 | set_clock_comparator(S390_lowcore.clock_comparator); | 88 | goto out; |
89 | } | ||
90 | __udelay_enabled(usecs); | ||
91 | out: | ||
73 | local_irq_restore(flags); | 92 | local_irq_restore(flags); |
93 | preempt_enable(); | ||
74 | } | 94 | } |
diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c index f845f150f565..100ebd527499 100644 --- a/arch/sparc64/kernel/of_device.c +++ b/arch/sparc64/kernel/of_device.c | |||
@@ -169,7 +169,7 @@ static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long fla | |||
169 | 169 | ||
170 | static int of_bus_pci_match(struct device_node *np) | 170 | static int of_bus_pci_match(struct device_node *np) |
171 | { | 171 | { |
172 | if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { | 172 | if (!strcmp(np->name, "pci")) { |
173 | const char *model = of_get_property(np, "model", NULL); | 173 | const char *model = of_get_property(np, "model", NULL); |
174 | 174 | ||
175 | if (model && !strcmp(model, "SUNW,simba")) | 175 | if (model && !strcmp(model, "SUNW,simba")) |
@@ -200,7 +200,7 @@ static int of_bus_simba_match(struct device_node *np) | |||
200 | /* Treat PCI busses lacking ranges property just like | 200 | /* Treat PCI busses lacking ranges property just like |
201 | * simba. | 201 | * simba. |
202 | */ | 202 | */ |
203 | if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) { | 203 | if (!strcmp(np->name, "pci")) { |
204 | if (!of_find_property(np, "ranges", NULL)) | 204 | if (!of_find_property(np, "ranges", NULL)) |
205 | return 1; | 205 | return 1; |
206 | } | 206 | } |
@@ -429,7 +429,7 @@ static int __init use_1to1_mapping(struct device_node *pp) | |||
429 | * it lacks a ranges property, and this will include | 429 | * it lacks a ranges property, and this will include |
430 | * cases like Simba. | 430 | * cases like Simba. |
431 | */ | 431 | */ |
432 | if (!strcmp(pp->type, "pci") || !strcmp(pp->type, "pciex")) | 432 | if (!strcmp(pp->name, "pci")) |
433 | return 0; | 433 | return 0; |
434 | 434 | ||
435 | return 1; | 435 | return 1; |
@@ -714,8 +714,7 @@ static unsigned int __init build_one_device_irq(struct of_device *op, | |||
714 | break; | 714 | break; |
715 | } | 715 | } |
716 | } else { | 716 | } else { |
717 | if (!strcmp(pp->type, "pci") || | 717 | if (!strcmp(pp->name, "pci")) { |
718 | !strcmp(pp->type, "pciex")) { | ||
719 | unsigned int this_orig_irq = irq; | 718 | unsigned int this_orig_irq = irq; |
720 | 719 | ||
721 | irq = pci_irq_swizzle(dp, pp, irq); | 720 | irq = pci_irq_swizzle(dp, pp, irq); |
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index 55096195458f..80dad76f8b81 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -425,7 +425,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, | |||
425 | dev->current_state = 4; /* unknown power state */ | 425 | dev->current_state = 4; /* unknown power state */ |
426 | dev->error_state = pci_channel_io_normal; | 426 | dev->error_state = pci_channel_io_normal; |
427 | 427 | ||
428 | if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { | 428 | if (!strcmp(node->name, "pci")) { |
429 | /* a PCI-PCI bridge */ | 429 | /* a PCI-PCI bridge */ |
430 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; | 430 | dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; |
431 | dev->rom_base_reg = PCI_ROM_ADDRESS1; | 431 | dev->rom_base_reg = PCI_ROM_ADDRESS1; |
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index a1310c52fc0c..857e492c571e 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c | |||
@@ -492,7 +492,7 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) | |||
492 | continue; | 492 | continue; |
493 | } | 493 | } |
494 | sh_symtab = sec_symtab->symtab; | 494 | sh_symtab = sec_symtab->symtab; |
495 | sym_strtab = sec->link->strtab; | 495 | sym_strtab = sec_symtab->link->strtab; |
496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { | 496 | for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { |
497 | Elf32_Rel *rel; | 497 | Elf32_Rel *rel; |
498 | Elf32_Sym *sym; | 498 | Elf32_Sym *sym; |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 69b4d060b21c..042fdc27bc92 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -101,10 +101,10 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
101 | */ | 101 | */ |
102 | static int iommu_completion_wait(struct amd_iommu *iommu) | 102 | static int iommu_completion_wait(struct amd_iommu *iommu) |
103 | { | 103 | { |
104 | int ret, ready = 0; | 104 | int ret = 0, ready = 0; |
105 | unsigned status = 0; | 105 | unsigned status = 0; |
106 | struct iommu_cmd cmd; | 106 | struct iommu_cmd cmd; |
107 | unsigned long i = 0; | 107 | unsigned long flags, i = 0; |
108 | 108 | ||
109 | memset(&cmd, 0, sizeof(cmd)); | 109 | memset(&cmd, 0, sizeof(cmd)); |
110 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; | 110 | cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; |
@@ -112,10 +112,12 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
112 | 112 | ||
113 | iommu->need_sync = 0; | 113 | iommu->need_sync = 0; |
114 | 114 | ||
115 | ret = iommu_queue_command(iommu, &cmd); | 115 | spin_lock_irqsave(&iommu->lock, flags); |
116 | |||
117 | ret = __iommu_queue_command(iommu, &cmd); | ||
116 | 118 | ||
117 | if (ret) | 119 | if (ret) |
118 | return ret; | 120 | goto out; |
119 | 121 | ||
120 | while (!ready && (i < EXIT_LOOP_COUNT)) { | 122 | while (!ready && (i < EXIT_LOOP_COUNT)) { |
121 | ++i; | 123 | ++i; |
@@ -130,6 +132,8 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
130 | 132 | ||
131 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) | 133 | if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) |
132 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); | 134 | printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); |
135 | out: | ||
136 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
133 | 137 | ||
134 | return 0; | 138 | return 0; |
135 | } | 139 | } |
@@ -140,6 +144,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) | |||
140 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | 144 | static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) |
141 | { | 145 | { |
142 | struct iommu_cmd cmd; | 146 | struct iommu_cmd cmd; |
147 | int ret; | ||
143 | 148 | ||
144 | BUG_ON(iommu == NULL); | 149 | BUG_ON(iommu == NULL); |
145 | 150 | ||
@@ -147,9 +152,11 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid) | |||
147 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); | 152 | CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); |
148 | cmd.data[0] = devid; | 153 | cmd.data[0] = devid; |
149 | 154 | ||
155 | ret = iommu_queue_command(iommu, &cmd); | ||
156 | |||
150 | iommu->need_sync = 1; | 157 | iommu->need_sync = 1; |
151 | 158 | ||
152 | return iommu_queue_command(iommu, &cmd); | 159 | return ret; |
153 | } | 160 | } |
154 | 161 | ||
155 | /* | 162 | /* |
@@ -159,6 +166,7 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
159 | u64 address, u16 domid, int pde, int s) | 166 | u64 address, u16 domid, int pde, int s) |
160 | { | 167 | { |
161 | struct iommu_cmd cmd; | 168 | struct iommu_cmd cmd; |
169 | int ret; | ||
162 | 170 | ||
163 | memset(&cmd, 0, sizeof(cmd)); | 171 | memset(&cmd, 0, sizeof(cmd)); |
164 | address &= PAGE_MASK; | 172 | address &= PAGE_MASK; |
@@ -171,9 +179,11 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, | |||
171 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ | 179 | if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ |
172 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; | 180 | cmd.data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; |
173 | 181 | ||
182 | ret = iommu_queue_command(iommu, &cmd); | ||
183 | |||
174 | iommu->need_sync = 1; | 184 | iommu->need_sync = 1; |
175 | 185 | ||
176 | return iommu_queue_command(iommu, &cmd); | 186 | return ret; |
177 | } | 187 | } |
178 | 188 | ||
179 | /* | 189 | /* |
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c index 9ee24e6bc4b0..732d1f4e10ee 100644 --- a/arch/x86/kernel/apm_32.c +++ b/arch/x86/kernel/apm_32.c | |||
@@ -234,6 +234,7 @@ | |||
234 | #include <asm/uaccess.h> | 234 | #include <asm/uaccess.h> |
235 | #include <asm/desc.h> | 235 | #include <asm/desc.h> |
236 | #include <asm/i8253.h> | 236 | #include <asm/i8253.h> |
237 | #include <asm/olpc.h> | ||
237 | #include <asm/paravirt.h> | 238 | #include <asm/paravirt.h> |
238 | #include <asm/reboot.h> | 239 | #include <asm/reboot.h> |
239 | 240 | ||
@@ -2217,7 +2218,7 @@ static int __init apm_init(void) | |||
2217 | 2218 | ||
2218 | dmi_check_system(apm_dmi_table); | 2219 | dmi_check_system(apm_dmi_table); |
2219 | 2220 | ||
2220 | if (apm_info.bios.version == 0 || paravirt_enabled()) { | 2221 | if (apm_info.bios.version == 0 || paravirt_enabled() || machine_is_olpc()) { |
2221 | printk(KERN_INFO "apm: BIOS not found.\n"); | 2222 | printk(KERN_INFO "apm: BIOS not found.\n"); |
2222 | return -ENODEV; | 2223 | return -ENODEV; |
2223 | } | 2224 | } |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index b117d7f8a564..885c8265e6b5 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -834,7 +834,7 @@ static int __init enable_mtrr_cleanup_setup(char *str) | |||
834 | enable_mtrr_cleanup = 1; | 834 | enable_mtrr_cleanup = 1; |
835 | return 0; | 835 | return 0; |
836 | } | 836 | } |
837 | early_param("enble_mtrr_cleanup", enable_mtrr_cleanup_setup); | 837 | early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup); |
838 | 838 | ||
839 | struct var_mtrr_state { | 839 | struct var_mtrr_state { |
840 | unsigned long range_startk; | 840 | unsigned long range_startk; |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index f47f0eb886b8..8282a2139681 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -69,6 +69,9 @@ static int gdb_x86vector = -1; | |||
69 | */ | 69 | */ |
70 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 70 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
71 | { | 71 | { |
72 | #ifndef CONFIG_X86_32 | ||
73 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
74 | #endif | ||
72 | gdb_regs[GDB_AX] = regs->ax; | 75 | gdb_regs[GDB_AX] = regs->ax; |
73 | gdb_regs[GDB_BX] = regs->bx; | 76 | gdb_regs[GDB_BX] = regs->bx; |
74 | gdb_regs[GDB_CX] = regs->cx; | 77 | gdb_regs[GDB_CX] = regs->cx; |
@@ -76,9 +79,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
76 | gdb_regs[GDB_SI] = regs->si; | 79 | gdb_regs[GDB_SI] = regs->si; |
77 | gdb_regs[GDB_DI] = regs->di; | 80 | gdb_regs[GDB_DI] = regs->di; |
78 | gdb_regs[GDB_BP] = regs->bp; | 81 | gdb_regs[GDB_BP] = regs->bp; |
79 | gdb_regs[GDB_PS] = regs->flags; | ||
80 | gdb_regs[GDB_PC] = regs->ip; | 82 | gdb_regs[GDB_PC] = regs->ip; |
81 | #ifdef CONFIG_X86_32 | 83 | #ifdef CONFIG_X86_32 |
84 | gdb_regs[GDB_PS] = regs->flags; | ||
82 | gdb_regs[GDB_DS] = regs->ds; | 85 | gdb_regs[GDB_DS] = regs->ds; |
83 | gdb_regs[GDB_ES] = regs->es; | 86 | gdb_regs[GDB_ES] = regs->es; |
84 | gdb_regs[GDB_CS] = regs->cs; | 87 | gdb_regs[GDB_CS] = regs->cs; |
@@ -94,6 +97,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
94 | gdb_regs[GDB_R13] = regs->r13; | 97 | gdb_regs[GDB_R13] = regs->r13; |
95 | gdb_regs[GDB_R14] = regs->r14; | 98 | gdb_regs[GDB_R14] = regs->r14; |
96 | gdb_regs[GDB_R15] = regs->r15; | 99 | gdb_regs[GDB_R15] = regs->r15; |
100 | gdb_regs32[GDB_PS] = regs->flags; | ||
101 | gdb_regs32[GDB_CS] = regs->cs; | ||
102 | gdb_regs32[GDB_SS] = regs->ss; | ||
97 | #endif | 103 | #endif |
98 | gdb_regs[GDB_SP] = regs->sp; | 104 | gdb_regs[GDB_SP] = regs->sp; |
99 | } | 105 | } |
@@ -112,6 +118,9 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
112 | */ | 118 | */ |
113 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | 119 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
114 | { | 120 | { |
121 | #ifndef CONFIG_X86_32 | ||
122 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
123 | #endif | ||
115 | gdb_regs[GDB_AX] = 0; | 124 | gdb_regs[GDB_AX] = 0; |
116 | gdb_regs[GDB_BX] = 0; | 125 | gdb_regs[GDB_BX] = 0; |
117 | gdb_regs[GDB_CX] = 0; | 126 | gdb_regs[GDB_CX] = 0; |
@@ -129,8 +138,10 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
129 | gdb_regs[GDB_FS] = 0xFFFF; | 138 | gdb_regs[GDB_FS] = 0xFFFF; |
130 | gdb_regs[GDB_GS] = 0xFFFF; | 139 | gdb_regs[GDB_GS] = 0xFFFF; |
131 | #else | 140 | #else |
132 | gdb_regs[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); | 141 | gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); |
133 | gdb_regs[GDB_PC] = 0; | 142 | gdb_regs32[GDB_CS] = __KERNEL_CS; |
143 | gdb_regs32[GDB_SS] = __KERNEL_DS; | ||
144 | gdb_regs[GDB_PC] = p->thread.ip; | ||
134 | gdb_regs[GDB_R8] = 0; | 145 | gdb_regs[GDB_R8] = 0; |
135 | gdb_regs[GDB_R9] = 0; | 146 | gdb_regs[GDB_R9] = 0; |
136 | gdb_regs[GDB_R10] = 0; | 147 | gdb_regs[GDB_R10] = 0; |
@@ -153,6 +164,9 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
153 | */ | 164 | */ |
154 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 165 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) |
155 | { | 166 | { |
167 | #ifndef CONFIG_X86_32 | ||
168 | u32 *gdb_regs32 = (u32 *)gdb_regs; | ||
169 | #endif | ||
156 | regs->ax = gdb_regs[GDB_AX]; | 170 | regs->ax = gdb_regs[GDB_AX]; |
157 | regs->bx = gdb_regs[GDB_BX]; | 171 | regs->bx = gdb_regs[GDB_BX]; |
158 | regs->cx = gdb_regs[GDB_CX]; | 172 | regs->cx = gdb_regs[GDB_CX]; |
@@ -160,9 +174,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
160 | regs->si = gdb_regs[GDB_SI]; | 174 | regs->si = gdb_regs[GDB_SI]; |
161 | regs->di = gdb_regs[GDB_DI]; | 175 | regs->di = gdb_regs[GDB_DI]; |
162 | regs->bp = gdb_regs[GDB_BP]; | 176 | regs->bp = gdb_regs[GDB_BP]; |
163 | regs->flags = gdb_regs[GDB_PS]; | ||
164 | regs->ip = gdb_regs[GDB_PC]; | 177 | regs->ip = gdb_regs[GDB_PC]; |
165 | #ifdef CONFIG_X86_32 | 178 | #ifdef CONFIG_X86_32 |
179 | regs->flags = gdb_regs[GDB_PS]; | ||
166 | regs->ds = gdb_regs[GDB_DS]; | 180 | regs->ds = gdb_regs[GDB_DS]; |
167 | regs->es = gdb_regs[GDB_ES]; | 181 | regs->es = gdb_regs[GDB_ES]; |
168 | regs->cs = gdb_regs[GDB_CS]; | 182 | regs->cs = gdb_regs[GDB_CS]; |
@@ -175,6 +189,9 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
175 | regs->r13 = gdb_regs[GDB_R13]; | 189 | regs->r13 = gdb_regs[GDB_R13]; |
176 | regs->r14 = gdb_regs[GDB_R14]; | 190 | regs->r14 = gdb_regs[GDB_R14]; |
177 | regs->r15 = gdb_regs[GDB_R15]; | 191 | regs->r15 = gdb_regs[GDB_R15]; |
192 | regs->flags = gdb_regs32[GDB_PS]; | ||
193 | regs->cs = gdb_regs32[GDB_CS]; | ||
194 | regs->ss = gdb_regs32[GDB_SS]; | ||
178 | #endif | 195 | #endif |
179 | } | 196 | } |
180 | 197 | ||
@@ -378,10 +395,8 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, | |||
378 | if (remcomInBuffer[0] == 's') { | 395 | if (remcomInBuffer[0] == 's') { |
379 | linux_regs->flags |= X86_EFLAGS_TF; | 396 | linux_regs->flags |= X86_EFLAGS_TF; |
380 | kgdb_single_step = 1; | 397 | kgdb_single_step = 1; |
381 | if (kgdb_contthread) { | 398 | atomic_set(&kgdb_cpu_doing_single_step, |
382 | atomic_set(&kgdb_cpu_doing_single_step, | 399 | raw_smp_processor_id()); |
383 | raw_smp_processor_id()); | ||
384 | } | ||
385 | } | 400 | } |
386 | 401 | ||
387 | get_debugreg(dr6, 6); | 402 | get_debugreg(dr6, 6); |
@@ -466,9 +481,15 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd) | |||
466 | 481 | ||
467 | case DIE_DEBUG: | 482 | case DIE_DEBUG: |
468 | if (atomic_read(&kgdb_cpu_doing_single_step) == | 483 | if (atomic_read(&kgdb_cpu_doing_single_step) == |
469 | raw_smp_processor_id() && | 484 | raw_smp_processor_id()) { |
470 | user_mode(regs)) | 485 | if (user_mode(regs)) |
471 | return single_step_cont(regs, args); | 486 | return single_step_cont(regs, args); |
487 | break; | ||
488 | } else if (test_thread_flag(TIF_SINGLESTEP)) | ||
489 | /* This means a user thread is single stepping | ||
490 | * a system call which should be ignored | ||
491 | */ | ||
492 | return NOTIFY_DONE; | ||
472 | /* fall through */ | 493 | /* fall through */ |
473 | default: | 494 | default: |
474 | if (user_mode(regs)) | 495 | if (user_mode(regs)) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7fc4d5b0a6a0..876e91890777 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -246,6 +246,14 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
246 | return 1; | 246 | return 1; |
247 | } | 247 | } |
248 | 248 | ||
249 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
250 | static int c1e_detected; | ||
251 | |||
252 | void c1e_remove_cpu(int cpu) | ||
253 | { | ||
254 | cpu_clear(cpu, c1e_mask); | ||
255 | } | ||
256 | |||
249 | /* | 257 | /* |
250 | * C1E aware idle routine. We check for C1E active in the interrupt | 258 | * C1E aware idle routine. We check for C1E active in the interrupt |
251 | * pending message MSR. If we detect C1E, then we handle it the same | 259 | * pending message MSR. If we detect C1E, then we handle it the same |
@@ -253,9 +261,6 @@ static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | |||
253 | */ | 261 | */ |
254 | static void c1e_idle(void) | 262 | static void c1e_idle(void) |
255 | { | 263 | { |
256 | static cpumask_t c1e_mask = CPU_MASK_NONE; | ||
257 | static int c1e_detected; | ||
258 | |||
259 | if (need_resched()) | 264 | if (need_resched()) |
260 | return; | 265 | return; |
261 | 266 | ||
@@ -265,8 +270,10 @@ static void c1e_idle(void) | |||
265 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 270 | rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); |
266 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 271 | if (lo & K8_INTP_C1E_ACTIVE_MASK) { |
267 | c1e_detected = 1; | 272 | c1e_detected = 1; |
268 | mark_tsc_unstable("TSC halt in C1E"); | 273 | if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) |
269 | printk(KERN_INFO "System has C1E enabled\n"); | 274 | mark_tsc_unstable("TSC halt in AMD C1E"); |
275 | printk(KERN_INFO "System has AMD C1E enabled\n"); | ||
276 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); | ||
270 | } | 277 | } |
271 | } | 278 | } |
272 | 279 | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 3b7a1ddcc0bc..31f40b24bf5d 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <asm/tlbflush.h> | 55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | 56 | #include <asm/cpu.h> |
57 | #include <asm/kdebug.h> | 57 | #include <asm/kdebug.h> |
58 | #include <asm/idle.h> | ||
58 | 59 | ||
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | 60 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | 61 | ||
@@ -88,6 +89,7 @@ static void cpu_exit_clear(void) | |||
88 | cpu_clear(cpu, cpu_callin_map); | 89 | cpu_clear(cpu, cpu_callin_map); |
89 | 90 | ||
90 | numa_remove_cpu(cpu); | 91 | numa_remove_cpu(cpu); |
92 | c1e_remove_cpu(cpu); | ||
91 | } | 93 | } |
92 | 94 | ||
93 | /* We don't actually take CPU down, just spin without interrupts. */ | 95 | /* We don't actually take CPU down, just spin without interrupts. */ |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 71553b664e2a..e12e0e4dd256 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -93,6 +93,8 @@ DECLARE_PER_CPU(int, cpu_state); | |||
93 | static inline void play_dead(void) | 93 | static inline void play_dead(void) |
94 | { | 94 | { |
95 | idle_task_exit(); | 95 | idle_task_exit(); |
96 | c1e_remove_cpu(raw_smp_processor_id()); | ||
97 | |||
96 | mb(); | 98 | mb(); |
97 | /* Ack it */ | 99 | /* Ack it */ |
98 | __get_cpu_var(cpu_state) = CPU_DEAD; | 100 | __get_cpu_var(cpu_state) = CPU_DEAD; |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 6ca515d6db54..edfb09f30479 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -235,7 +235,7 @@ static void vmi_write_ldt_entry(struct desc_struct *dt, int entry, | |||
235 | const void *desc) | 235 | const void *desc) |
236 | { | 236 | { |
237 | u32 *ldt_entry = (u32 *)desc; | 237 | u32 *ldt_entry = (u32 *)desc; |
238 | vmi_ops.write_idt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); | 238 | vmi_ops.write_ldt_entry(dt, entry, ldt_entry[0], ldt_entry[1]); |
239 | } | 239 | } |
240 | 240 | ||
241 | static void vmi_load_sp0(struct tss_struct *tss, | 241 | static void vmi_load_sp0(struct tss_struct *tss, |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 0c029e8959c7..7766d36983fc 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
@@ -61,7 +61,7 @@ static void vsmp_irq_enable(void) | |||
61 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | 61 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); |
62 | } | 62 | } |
63 | 63 | ||
64 | static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, | 64 | static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, |
65 | unsigned long addr, unsigned len) | 65 | unsigned long addr, unsigned len) |
66 | { | 66 | { |
67 | switch (type) { | 67 | switch (type) { |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 0227694f7dab..8a5f1614a3d5 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -295,10 +295,12 @@ static void nmi_cpu_shutdown(void *dummy) | |||
295 | 295 | ||
296 | static void nmi_shutdown(void) | 296 | static void nmi_shutdown(void) |
297 | { | 297 | { |
298 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); | 298 | struct op_msrs *msrs; |
299 | |||
299 | nmi_enabled = 0; | 300 | nmi_enabled = 0; |
300 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); | 301 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
301 | unregister_die_notifier(&profile_exceptions_nb); | 302 | unregister_die_notifier(&profile_exceptions_nb); |
303 | msrs = &get_cpu_var(cpu_msrs); | ||
302 | model->shutdown(msrs); | 304 | model->shutdown(msrs); |
303 | free_msrs(); | 305 | free_msrs(); |
304 | put_cpu_var(cpu_msrs); | 306 | put_cpu_var(cpu_msrs); |
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c index 0a5f6b2114c5..d672cfe7ca59 100644 --- a/drivers/accessibility/braille/braille_console.c +++ b/drivers/accessibility/braille/braille_console.c | |||
@@ -376,6 +376,8 @@ int braille_register_console(struct console *console, int index, | |||
376 | console->flags |= CON_ENABLED; | 376 | console->flags |= CON_ENABLED; |
377 | console->index = index; | 377 | console->index = index; |
378 | braille_co = console; | 378 | braille_co = console; |
379 | register_keyboard_notifier(&keyboard_notifier_block); | ||
380 | register_vt_notifier(&vt_notifier_block); | ||
379 | return 0; | 381 | return 0; |
380 | } | 382 | } |
381 | 383 | ||
@@ -383,15 +385,8 @@ int braille_unregister_console(struct console *console) | |||
383 | { | 385 | { |
384 | if (braille_co != console) | 386 | if (braille_co != console) |
385 | return -EINVAL; | 387 | return -EINVAL; |
388 | unregister_keyboard_notifier(&keyboard_notifier_block); | ||
389 | unregister_vt_notifier(&vt_notifier_block); | ||
386 | braille_co = NULL; | 390 | braille_co = NULL; |
387 | return 0; | 391 | return 0; |
388 | } | 392 | } |
389 | |||
390 | static int __init braille_init(void) | ||
391 | { | ||
392 | register_keyboard_notifier(&keyboard_notifier_block); | ||
393 | register_vt_notifier(&vt_notifier_block); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | console_initcall(braille_init); | ||
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 084109507c9f..8dd3336efd7e 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
165 | "firmware_node"); | 165 | "firmware_node"); |
166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
167 | "physical_node"); | 167 | "physical_node"); |
168 | if (acpi_dev->wakeup.flags.valid) | 168 | if (acpi_dev->wakeup.flags.valid) { |
169 | device_set_wakeup_capable(dev, true); | 169 | device_set_wakeup_capable(dev, true); |
170 | device_set_wakeup_enable(dev, | ||
171 | acpi_dev->wakeup.state.enabled); | ||
172 | } | ||
170 | } | 173 | } |
171 | 174 | ||
172 | return 0; | 175 | return 0; |
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 4ebbba2b6b19..bf5b04de02d1 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | static void physical_device_enable_wakeup(struct acpi_device *adev) | ||
381 | { | ||
382 | struct device *dev = acpi_get_physical_device(adev->handle); | ||
383 | |||
384 | if (dev && device_can_wakeup(dev)) | ||
385 | device_set_wakeup_enable(dev, adev->wakeup.state.enabled); | ||
386 | } | ||
387 | |||
380 | static ssize_t | 388 | static ssize_t |
381 | acpi_system_write_wakeup_device(struct file *file, | 389 | acpi_system_write_wakeup_device(struct file *file, |
382 | const char __user * buffer, | 390 | const char __user * buffer, |
@@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
411 | } | 419 | } |
412 | } | 420 | } |
413 | if (found_dev) { | 421 | if (found_dev) { |
422 | physical_device_enable_wakeup(found_dev); | ||
414 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 423 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
415 | struct acpi_device *dev = container_of(node, | 424 | struct acpi_device *dev = container_of(node, |
416 | struct | 425 | struct |
@@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
428 | dev->pnp.bus_id, found_dev->pnp.bus_id); | 437 | dev->pnp.bus_id, found_dev->pnp.bus_id); |
429 | dev->wakeup.state.enabled = | 438 | dev->wakeup.state.enabled = |
430 | found_dev->wakeup.state.enabled; | 439 | found_dev->wakeup.state.enabled; |
440 | physical_device_enable_wakeup(dev); | ||
431 | } | 441 | } |
432 | } | 442 | } |
433 | } | 443 | } |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 1e1f3f3757ae..14601dc05e41 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -309,6 +309,8 @@ static void nv_nf2_freeze(struct ata_port *ap); | |||
309 | static void nv_nf2_thaw(struct ata_port *ap); | 309 | static void nv_nf2_thaw(struct ata_port *ap); |
310 | static void nv_ck804_freeze(struct ata_port *ap); | 310 | static void nv_ck804_freeze(struct ata_port *ap); |
311 | static void nv_ck804_thaw(struct ata_port *ap); | 311 | static void nv_ck804_thaw(struct ata_port *ap); |
312 | static int nv_hardreset(struct ata_link *link, unsigned int *class, | ||
313 | unsigned long deadline); | ||
312 | static int nv_adma_slave_config(struct scsi_device *sdev); | 314 | static int nv_adma_slave_config(struct scsi_device *sdev); |
313 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); | 315 | static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); |
314 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); | 316 | static void nv_adma_qc_prep(struct ata_queued_cmd *qc); |
@@ -403,28 +405,45 @@ static struct scsi_host_template nv_swncq_sht = { | |||
403 | .slave_configure = nv_swncq_slave_config, | 405 | .slave_configure = nv_swncq_slave_config, |
404 | }; | 406 | }; |
405 | 407 | ||
406 | static struct ata_port_operations nv_generic_ops = { | 408 | /* OSDL bz3352 reports that some nv controllers can't determine device |
409 | * signature reliably and nv_hardreset is implemented to work around | ||
410 | * the problem. This was reported on nf3 and it's unclear whether any | ||
411 | * other controllers are affected. However, the workaround has been | ||
412 | * applied to all variants and there isn't much to gain by trying to | ||
413 | * find out exactly which ones are affected at this point especially | ||
414 | * because NV has moved over to ahci for newer controllers. | ||
415 | */ | ||
416 | static struct ata_port_operations nv_common_ops = { | ||
407 | .inherits = &ata_bmdma_port_ops, | 417 | .inherits = &ata_bmdma_port_ops, |
408 | .hardreset = ATA_OP_NULL, | 418 | .hardreset = nv_hardreset, |
409 | .scr_read = nv_scr_read, | 419 | .scr_read = nv_scr_read, |
410 | .scr_write = nv_scr_write, | 420 | .scr_write = nv_scr_write, |
411 | }; | 421 | }; |
412 | 422 | ||
423 | /* OSDL bz11195 reports that link doesn't come online after hardreset | ||
424 | * on generic nv's and there have been several other similar reports | ||
425 | * on linux-ide. Disable hardreset for generic nv's. | ||
426 | */ | ||
427 | static struct ata_port_operations nv_generic_ops = { | ||
428 | .inherits = &nv_common_ops, | ||
429 | .hardreset = ATA_OP_NULL, | ||
430 | }; | ||
431 | |||
413 | static struct ata_port_operations nv_nf2_ops = { | 432 | static struct ata_port_operations nv_nf2_ops = { |
414 | .inherits = &nv_generic_ops, | 433 | .inherits = &nv_common_ops, |
415 | .freeze = nv_nf2_freeze, | 434 | .freeze = nv_nf2_freeze, |
416 | .thaw = nv_nf2_thaw, | 435 | .thaw = nv_nf2_thaw, |
417 | }; | 436 | }; |
418 | 437 | ||
419 | static struct ata_port_operations nv_ck804_ops = { | 438 | static struct ata_port_operations nv_ck804_ops = { |
420 | .inherits = &nv_generic_ops, | 439 | .inherits = &nv_common_ops, |
421 | .freeze = nv_ck804_freeze, | 440 | .freeze = nv_ck804_freeze, |
422 | .thaw = nv_ck804_thaw, | 441 | .thaw = nv_ck804_thaw, |
423 | .host_stop = nv_ck804_host_stop, | 442 | .host_stop = nv_ck804_host_stop, |
424 | }; | 443 | }; |
425 | 444 | ||
426 | static struct ata_port_operations nv_adma_ops = { | 445 | static struct ata_port_operations nv_adma_ops = { |
427 | .inherits = &nv_generic_ops, | 446 | .inherits = &nv_common_ops, |
428 | 447 | ||
429 | .check_atapi_dma = nv_adma_check_atapi_dma, | 448 | .check_atapi_dma = nv_adma_check_atapi_dma, |
430 | .sff_tf_read = nv_adma_tf_read, | 449 | .sff_tf_read = nv_adma_tf_read, |
@@ -448,7 +467,7 @@ static struct ata_port_operations nv_adma_ops = { | |||
448 | }; | 467 | }; |
449 | 468 | ||
450 | static struct ata_port_operations nv_swncq_ops = { | 469 | static struct ata_port_operations nv_swncq_ops = { |
451 | .inherits = &nv_generic_ops, | 470 | .inherits = &nv_common_ops, |
452 | 471 | ||
453 | .qc_defer = ata_std_qc_defer, | 472 | .qc_defer = ata_std_qc_defer, |
454 | .qc_prep = nv_swncq_qc_prep, | 473 | .qc_prep = nv_swncq_qc_prep, |
@@ -1586,6 +1605,21 @@ static void nv_mcp55_thaw(struct ata_port *ap) | |||
1586 | ata_sff_thaw(ap); | 1605 | ata_sff_thaw(ap); |
1587 | } | 1606 | } |
1588 | 1607 | ||
1608 | static int nv_hardreset(struct ata_link *link, unsigned int *class, | ||
1609 | unsigned long deadline) | ||
1610 | { | ||
1611 | int rc; | ||
1612 | |||
1613 | /* SATA hardreset fails to retrieve proper device signature on | ||
1614 | * some controllers. Request follow up SRST. For more info, | ||
1615 | * see http://bugzilla.kernel.org/show_bug.cgi?id=3352 | ||
1616 | */ | ||
1617 | rc = sata_sff_hardreset(link, class, deadline); | ||
1618 | if (rc) | ||
1619 | return rc; | ||
1620 | return -EAGAIN; | ||
1621 | } | ||
1622 | |||
1589 | static void nv_adma_error_handler(struct ata_port *ap) | 1623 | static void nv_adma_error_handler(struct ata_port *ap) |
1590 | { | 1624 | { |
1591 | struct nv_adma_port_priv *pp = ap->private_data; | 1625 | struct nv_adma_port_priv *pp = ap->private_data; |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 6a010681ecf3..29ae99817c60 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -104,6 +104,9 @@ static struct usb_device_id blacklist_table[] = { | |||
104 | /* Broadcom BCM2046 */ | 104 | /* Broadcom BCM2046 */ |
105 | { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, | 105 | { USB_DEVICE(0x0a5c, 0x2151), .driver_info = BTUSB_RESET }, |
106 | 106 | ||
107 | /* Apple MacBook Pro with Broadcom chip */ | ||
108 | { USB_DEVICE(0x05ac, 0x820f), .driver_info = BTUSB_RESET }, | ||
109 | |||
107 | /* IBM/Lenovo ThinkPad with Broadcom chip */ | 110 | /* IBM/Lenovo ThinkPad with Broadcom chip */ |
108 | { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, | 111 | { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, |
109 | { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, | 112 | { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_RESET | BTUSB_WRONG_SCO_MTU }, |
@@ -169,6 +172,7 @@ static struct usb_device_id blacklist_table[] = { | |||
169 | struct btusb_data { | 172 | struct btusb_data { |
170 | struct hci_dev *hdev; | 173 | struct hci_dev *hdev; |
171 | struct usb_device *udev; | 174 | struct usb_device *udev; |
175 | struct usb_interface *intf; | ||
172 | struct usb_interface *isoc; | 176 | struct usb_interface *isoc; |
173 | 177 | ||
174 | spinlock_t lock; | 178 | spinlock_t lock; |
@@ -516,7 +520,7 @@ static int btusb_open(struct hci_dev *hdev) | |||
516 | 520 | ||
517 | err = btusb_submit_intr_urb(hdev); | 521 | err = btusb_submit_intr_urb(hdev); |
518 | if (err < 0) { | 522 | if (err < 0) { |
519 | clear_bit(BTUSB_INTR_RUNNING, &hdev->flags); | 523 | clear_bit(BTUSB_INTR_RUNNING, &data->flags); |
520 | clear_bit(HCI_RUNNING, &hdev->flags); | 524 | clear_bit(HCI_RUNNING, &hdev->flags); |
521 | } | 525 | } |
522 | 526 | ||
@@ -532,8 +536,10 @@ static int btusb_close(struct hci_dev *hdev) | |||
532 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) | 536 | if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) |
533 | return 0; | 537 | return 0; |
534 | 538 | ||
539 | cancel_work_sync(&data->work); | ||
540 | |||
535 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | 541 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); |
536 | usb_kill_anchored_urbs(&data->intr_anchor); | 542 | usb_kill_anchored_urbs(&data->isoc_anchor); |
537 | 543 | ||
538 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); | 544 | clear_bit(BTUSB_BULK_RUNNING, &data->flags); |
539 | usb_kill_anchored_urbs(&data->bulk_anchor); | 545 | usb_kill_anchored_urbs(&data->bulk_anchor); |
@@ -821,6 +827,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
821 | } | 827 | } |
822 | 828 | ||
823 | data->udev = interface_to_usbdev(intf); | 829 | data->udev = interface_to_usbdev(intf); |
830 | data->intf = intf; | ||
824 | 831 | ||
825 | spin_lock_init(&data->lock); | 832 | spin_lock_init(&data->lock); |
826 | 833 | ||
@@ -889,7 +896,7 @@ static int btusb_probe(struct usb_interface *intf, | |||
889 | 896 | ||
890 | if (data->isoc) { | 897 | if (data->isoc) { |
891 | err = usb_driver_claim_interface(&btusb_driver, | 898 | err = usb_driver_claim_interface(&btusb_driver, |
892 | data->isoc, NULL); | 899 | data->isoc, data); |
893 | if (err < 0) { | 900 | if (err < 0) { |
894 | hci_free_dev(hdev); | 901 | hci_free_dev(hdev); |
895 | kfree(data); | 902 | kfree(data); |
@@ -921,13 +928,22 @@ static void btusb_disconnect(struct usb_interface *intf) | |||
921 | 928 | ||
922 | hdev = data->hdev; | 929 | hdev = data->hdev; |
923 | 930 | ||
924 | if (data->isoc) | 931 | __hci_dev_hold(hdev); |
925 | usb_driver_release_interface(&btusb_driver, data->isoc); | ||
926 | 932 | ||
927 | usb_set_intfdata(intf, NULL); | 933 | usb_set_intfdata(data->intf, NULL); |
934 | |||
935 | if (data->isoc) | ||
936 | usb_set_intfdata(data->isoc, NULL); | ||
928 | 937 | ||
929 | hci_unregister_dev(hdev); | 938 | hci_unregister_dev(hdev); |
930 | 939 | ||
940 | if (intf == data->isoc) | ||
941 | usb_driver_release_interface(&btusb_driver, data->intf); | ||
942 | else if (data->isoc) | ||
943 | usb_driver_release_interface(&btusb_driver, data->isoc); | ||
944 | |||
945 | __hci_dev_put(hdev); | ||
946 | |||
931 | hci_free_dev(hdev); | 947 | hci_free_dev(hdev); |
932 | } | 948 | } |
933 | 949 | ||
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index daeb8f766971..e4dce8709541 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -695,13 +695,23 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) | |||
695 | { | 695 | { |
696 | struct tty_driver *p, *res = NULL; | 696 | struct tty_driver *p, *res = NULL; |
697 | int tty_line = 0; | 697 | int tty_line = 0; |
698 | int len; | ||
698 | char *str; | 699 | char *str; |
699 | 700 | ||
701 | for (str = name; *str; str++) | ||
702 | if ((*str >= '0' && *str <= '9') || *str == ',') | ||
703 | break; | ||
704 | if (!*str) | ||
705 | return NULL; | ||
706 | |||
707 | len = str - name; | ||
708 | tty_line = simple_strtoul(str, &str, 10); | ||
709 | |||
700 | mutex_lock(&tty_mutex); | 710 | mutex_lock(&tty_mutex); |
701 | /* Search through the tty devices to look for a match */ | 711 | /* Search through the tty devices to look for a match */ |
702 | list_for_each_entry(p, &tty_drivers, tty_drivers) { | 712 | list_for_each_entry(p, &tty_drivers, tty_drivers) { |
703 | str = name + strlen(p->name); | 713 | if (strncmp(name, p->name, len) != 0) |
704 | tty_line = simple_strtoul(str, &str, 10); | 714 | continue; |
705 | if (*str == ',') | 715 | if (*str == ',') |
706 | str++; | 716 | str++; |
707 | if (*str == '\0') | 717 | if (*str == '\0') |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 94df91771243..0778d99aea7c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
364 | int i; | 364 | int i; |
365 | 365 | ||
366 | status_block = dma_readl(dw, RAW.BLOCK); | 366 | status_block = dma_readl(dw, RAW.BLOCK); |
367 | status_xfer = dma_readl(dw, RAW.BLOCK); | 367 | status_xfer = dma_readl(dw, RAW.XFER); |
368 | status_err = dma_readl(dw, RAW.ERROR); | 368 | status_err = dma_readl(dw, RAW.ERROR); |
369 | 369 | ||
370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", |
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 22f6d5c00d80..0e7b1c6724aa 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c | |||
@@ -180,7 +180,7 @@ static const struct i2c_algorithm i2c_powermac_algorithm = { | |||
180 | }; | 180 | }; |
181 | 181 | ||
182 | 182 | ||
183 | static int i2c_powermac_remove(struct platform_device *dev) | 183 | static int __devexit i2c_powermac_remove(struct platform_device *dev) |
184 | { | 184 | { |
185 | struct i2c_adapter *adapter = platform_get_drvdata(dev); | 185 | struct i2c_adapter *adapter = platform_get_drvdata(dev); |
186 | struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter); | 186 | struct pmac_i2c_bus *bus = i2c_get_adapdata(adapter); |
@@ -200,7 +200,7 @@ static int i2c_powermac_remove(struct platform_device *dev) | |||
200 | } | 200 | } |
201 | 201 | ||
202 | 202 | ||
203 | static int __devexit i2c_powermac_probe(struct platform_device *dev) | 203 | static int __devinit i2c_powermac_probe(struct platform_device *dev) |
204 | { | 204 | { |
205 | struct pmac_i2c_bus *bus = dev->dev.platform_data; | 205 | struct pmac_i2c_bus *bus = dev->dev.platform_data; |
206 | struct device_node *parent = NULL; | 206 | struct device_node *parent = NULL; |
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index af4491fa7e34..307d976c9b69 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c | |||
@@ -583,8 +583,10 @@ static int __init i2c_dev_init(void) | |||
583 | goto out; | 583 | goto out; |
584 | 584 | ||
585 | i2c_dev_class = class_create(THIS_MODULE, "i2c-dev"); | 585 | i2c_dev_class = class_create(THIS_MODULE, "i2c-dev"); |
586 | if (IS_ERR(i2c_dev_class)) | 586 | if (IS_ERR(i2c_dev_class)) { |
587 | res = PTR_ERR(i2c_dev_class); | ||
587 | goto out_unreg_chrdev; | 588 | goto out_unreg_chrdev; |
589 | } | ||
588 | 590 | ||
589 | res = i2c_add_driver(&i2cdev_driver); | 591 | res = i2c_add_driver(&i2cdev_driver); |
590 | if (res) | 592 | if (res) |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index fc735ab08ff4..8e93a797c93d 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -292,6 +292,20 @@ config IDE_GENERIC | |||
292 | tristate "generic/default IDE chipset support" | 292 | tristate "generic/default IDE chipset support" |
293 | depends on ALPHA || X86 || IA64 || M32R || MIPS | 293 | depends on ALPHA || X86 || IA64 || M32R || MIPS |
294 | help | 294 | help |
295 | This is the generic IDE driver. This driver attaches to the | ||
296 | fixed legacy ports (e.g. on PCs 0x1f0/0x170, 0x1e8/0x168 and | ||
297 | so on). Please note that if this driver is built into the | ||
298 | kernel or loaded before other ATA (IDE or libata) drivers | ||
299 | and the controller is located at legacy ports, this driver | ||
300 | may grab those ports and thus can prevent the controller | ||
301 | specific driver from attaching. | ||
302 | |||
303 | Also, currently, IDE generic doesn't allow IRQ sharing | ||
304 | meaning that the IRQs it grabs won't be available to other | ||
305 | controllers sharing those IRQs which usually makes drivers | ||
306 | for those controllers fail. Generally, it's not a good idea | ||
307 | to load IDE generic driver on modern systems. | ||
308 | |||
295 | If unsure, say N. | 309 | If unsure, say N. |
296 | 310 | ||
297 | config BLK_DEV_PLATFORM | 311 | config BLK_DEV_PLATFORM |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 1bce84b56630..3833189144ed 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -2338,7 +2338,7 @@ static void idetape_get_inquiry_results(ide_drive_t *drive) | |||
2338 | { | 2338 | { |
2339 | idetape_tape_t *tape = drive->driver_data; | 2339 | idetape_tape_t *tape = drive->driver_data; |
2340 | struct ide_atapi_pc pc; | 2340 | struct ide_atapi_pc pc; |
2341 | char fw_rev[6], vendor_id[10], product_id[18]; | 2341 | char fw_rev[4], vendor_id[8], product_id[16]; |
2342 | 2342 | ||
2343 | idetape_create_inquiry_cmd(&pc); | 2343 | idetape_create_inquiry_cmd(&pc); |
2344 | if (idetape_queue_pc_tail(drive, &pc)) { | 2344 | if (idetape_queue_pc_tail(drive, &pc)) { |
@@ -2350,11 +2350,11 @@ static void idetape_get_inquiry_results(ide_drive_t *drive) | |||
2350 | memcpy(product_id, &pc.buf[16], 16); | 2350 | memcpy(product_id, &pc.buf[16], 16); |
2351 | memcpy(fw_rev, &pc.buf[32], 4); | 2351 | memcpy(fw_rev, &pc.buf[32], 4); |
2352 | 2352 | ||
2353 | ide_fixstring(vendor_id, 10, 0); | 2353 | ide_fixstring(vendor_id, 8, 0); |
2354 | ide_fixstring(product_id, 18, 0); | 2354 | ide_fixstring(product_id, 16, 0); |
2355 | ide_fixstring(fw_rev, 6, 0); | 2355 | ide_fixstring(fw_rev, 4, 0); |
2356 | 2356 | ||
2357 | printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n", | 2357 | printk(KERN_INFO "ide-tape: %s <-> %s: %.8s %.16s rev %.4s\n", |
2358 | drive->name, tape->name, vendor_id, product_id, fw_rev); | 2358 | drive->name, tape->name, vendor_id, product_id, fw_rev); |
2359 | } | 2359 | } |
2360 | 2360 | ||
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c index badf79fc9e3a..39c9ee995857 100644 --- a/drivers/ide/mips/swarm.c +++ b/drivers/ide/mips/swarm.c | |||
@@ -107,6 +107,7 @@ static int __devinit swarm_ide_probe(struct device *dev) | |||
107 | 107 | ||
108 | base = ioremap(offset, size); | 108 | base = ioremap(offset, size); |
109 | 109 | ||
110 | memset(&hw, 0, sizeof(hw)); | ||
110 | for (i = 0; i <= 7; i++) | 111 | for (i = 0; i <= 7; i++) |
111 | hw.io_ports_array[i] = | 112 | hw.io_ports_array[i] = |
112 | (unsigned long)(base + ((0x1f0 + i) << 5)); | 113 | (unsigned long)(base + ((0x1f0 + i) << 5)); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 1b1df5cc4113..e9ca3cb57d52 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -404,7 +404,7 @@ static void path_rec_completion(int status, | |||
404 | struct net_device *dev = path->dev; | 404 | struct net_device *dev = path->dev; |
405 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 405 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
406 | struct ipoib_ah *ah = NULL; | 406 | struct ipoib_ah *ah = NULL; |
407 | struct ipoib_ah *old_ah; | 407 | struct ipoib_ah *old_ah = NULL; |
408 | struct ipoib_neigh *neigh, *tn; | 408 | struct ipoib_neigh *neigh, *tn; |
409 | struct sk_buff_head skqueue; | 409 | struct sk_buff_head skqueue; |
410 | struct sk_buff *skb; | 410 | struct sk_buff *skb; |
@@ -428,12 +428,12 @@ static void path_rec_completion(int status, | |||
428 | 428 | ||
429 | spin_lock_irqsave(&priv->lock, flags); | 429 | spin_lock_irqsave(&priv->lock, flags); |
430 | 430 | ||
431 | old_ah = path->ah; | ||
432 | path->ah = ah; | ||
433 | |||
434 | if (ah) { | 431 | if (ah) { |
435 | path->pathrec = *pathrec; | 432 | path->pathrec = *pathrec; |
436 | 433 | ||
434 | old_ah = path->ah; | ||
435 | path->ah = ah; | ||
436 | |||
437 | ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", | 437 | ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", |
438 | ah, be16_to_cpu(pathrec->dlid), pathrec->sl); | 438 | ah, be16_to_cpu(pathrec->dlid), pathrec->sl); |
439 | 439 | ||
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 18f4d7f6ce6d..2998a6ac9ae4 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c | |||
@@ -351,8 +351,9 @@ static int report_tp_state(struct bcm5974 *dev, int size) | |||
351 | #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 | 351 | #define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 |
352 | #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 | 352 | #define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 |
353 | #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 | 353 | #define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 |
354 | #define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 | ||
354 | 355 | ||
355 | static int bcm5974_wellspring_mode(struct bcm5974 *dev) | 356 | static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) |
356 | { | 357 | { |
357 | char *data = kmalloc(8, GFP_KERNEL); | 358 | char *data = kmalloc(8, GFP_KERNEL); |
358 | int retval = 0, size; | 359 | int retval = 0, size; |
@@ -377,7 +378,9 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev) | |||
377 | } | 378 | } |
378 | 379 | ||
379 | /* apply the mode switch */ | 380 | /* apply the mode switch */ |
380 | data[0] = BCM5974_WELLSPRING_MODE_VENDOR_VALUE; | 381 | data[0] = on ? |
382 | BCM5974_WELLSPRING_MODE_VENDOR_VALUE : | ||
383 | BCM5974_WELLSPRING_MODE_NORMAL_VALUE; | ||
381 | 384 | ||
382 | /* write configuration */ | 385 | /* write configuration */ |
383 | size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), | 386 | size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), |
@@ -392,7 +395,8 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev) | |||
392 | goto out; | 395 | goto out; |
393 | } | 396 | } |
394 | 397 | ||
395 | dprintk(2, "bcm5974: switched to wellspring mode.\n"); | 398 | dprintk(2, "bcm5974: switched to %s mode.\n", |
399 | on ? "wellspring" : "normal"); | ||
396 | 400 | ||
397 | out: | 401 | out: |
398 | kfree(data); | 402 | kfree(data); |
@@ -481,7 +485,7 @@ exit: | |||
481 | */ | 485 | */ |
482 | static int bcm5974_start_traffic(struct bcm5974 *dev) | 486 | static int bcm5974_start_traffic(struct bcm5974 *dev) |
483 | { | 487 | { |
484 | if (bcm5974_wellspring_mode(dev)) { | 488 | if (bcm5974_wellspring_mode(dev, true)) { |
485 | dprintk(1, "bcm5974: mode switch failed\n"); | 489 | dprintk(1, "bcm5974: mode switch failed\n"); |
486 | goto error; | 490 | goto error; |
487 | } | 491 | } |
@@ -504,6 +508,7 @@ static void bcm5974_pause_traffic(struct bcm5974 *dev) | |||
504 | { | 508 | { |
505 | usb_kill_urb(dev->tp_urb); | 509 | usb_kill_urb(dev->tp_urb); |
506 | usb_kill_urb(dev->bt_urb); | 510 | usb_kill_urb(dev->bt_urb); |
511 | bcm5974_wellspring_mode(dev, false); | ||
507 | } | 512 | } |
508 | 513 | ||
509 | /* | 514 | /* |
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c index bf44f9d68342..c8b7e8a45c4d 100644 --- a/drivers/input/touchscreen/jornada720_ts.c +++ b/drivers/input/touchscreen/jornada720_ts.c | |||
@@ -119,8 +119,8 @@ static int __devinit jornada720_ts_probe(struct platform_device *pdev) | |||
119 | input_dev->id.bustype = BUS_HOST; | 119 | input_dev->id.bustype = BUS_HOST; |
120 | input_dev->dev.parent = &pdev->dev; | 120 | input_dev->dev.parent = &pdev->dev; |
121 | 121 | ||
122 | input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); | 122 | input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); |
123 | input_dev->keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); | 123 | input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); |
124 | input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); | 124 | input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0); |
125 | input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); | 125 | input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0); |
126 | 126 | ||
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c index be0e12144b8b..34935155c1c0 100644 --- a/drivers/leds/leds-fsg.c +++ b/drivers/leds/leds-fsg.c | |||
@@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
161 | { | 161 | { |
162 | int ret; | 162 | int ret; |
163 | 163 | ||
164 | /* Map the LED chip select address space */ | ||
165 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
166 | if (!latch_address) { | ||
167 | ret = -ENOMEM; | ||
168 | goto failremap; | ||
169 | } | ||
170 | |||
171 | latch_value = 0xffff; | ||
172 | *latch_address = latch_value; | ||
173 | |||
164 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); | 174 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); |
165 | if (ret < 0) | 175 | if (ret < 0) |
166 | goto failwlan; | 176 | goto failwlan; |
@@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
185 | if (ret < 0) | 195 | if (ret < 0) |
186 | goto failring; | 196 | goto failring; |
187 | 197 | ||
188 | /* Map the LED chip select address space */ | ||
189 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
190 | if (!latch_address) { | ||
191 | ret = -ENOMEM; | ||
192 | goto failremap; | ||
193 | } | ||
194 | |||
195 | latch_value = 0xffff; | ||
196 | *latch_address = latch_value; | ||
197 | |||
198 | return ret; | 198 | return ret; |
199 | 199 | ||
200 | failremap: | ||
201 | led_classdev_unregister(&fsg_ring_led); | ||
202 | failring: | 200 | failring: |
203 | led_classdev_unregister(&fsg_sync_led); | 201 | led_classdev_unregister(&fsg_sync_led); |
204 | failsync: | 202 | failsync: |
@@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
210 | failwan: | 208 | failwan: |
211 | led_classdev_unregister(&fsg_wlan_led); | 209 | led_classdev_unregister(&fsg_wlan_led); |
212 | failwlan: | 210 | failwlan: |
211 | iounmap(latch_address); | ||
212 | failremap: | ||
213 | 213 | ||
214 | return ret; | 214 | return ret; |
215 | } | 215 | } |
216 | 216 | ||
217 | static int fsg_led_remove(struct platform_device *pdev) | 217 | static int fsg_led_remove(struct platform_device *pdev) |
218 | { | 218 | { |
219 | iounmap(latch_address); | ||
220 | |||
221 | led_classdev_unregister(&fsg_wlan_led); | 219 | led_classdev_unregister(&fsg_wlan_led); |
222 | led_classdev_unregister(&fsg_wan_led); | 220 | led_classdev_unregister(&fsg_wan_led); |
223 | led_classdev_unregister(&fsg_sata_led); | 221 | led_classdev_unregister(&fsg_sata_led); |
@@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev) | |||
225 | led_classdev_unregister(&fsg_sync_led); | 223 | led_classdev_unregister(&fsg_sync_led); |
226 | led_classdev_unregister(&fsg_ring_led); | 224 | led_classdev_unregister(&fsg_ring_led); |
227 | 225 | ||
226 | iounmap(latch_address); | ||
227 | |||
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 146c06972863..f508729123b5 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c | |||
@@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
248 | const struct i2c_device_id *id) | 248 | const struct i2c_device_id *id) |
249 | { | 249 | { |
250 | struct pca955x_led *pca955x; | 250 | struct pca955x_led *pca955x; |
251 | int i; | ||
252 | int err = -ENODEV; | ||
253 | struct pca955x_chipdef *chip; | 251 | struct pca955x_chipdef *chip; |
254 | struct i2c_adapter *adapter; | 252 | struct i2c_adapter *adapter; |
255 | struct led_platform_data *pdata; | 253 | struct led_platform_data *pdata; |
254 | int i, err; | ||
256 | 255 | ||
257 | chip = &pca955x_chipdefs[id->driver_data]; | 256 | chip = &pca955x_chipdefs[id->driver_data]; |
258 | adapter = to_i2c_adapter(client->dev.parent); | 257 | adapter = to_i2c_adapter(client->dev.parent); |
@@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
282 | } | 281 | } |
283 | } | 282 | } |
284 | 283 | ||
284 | pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); | ||
285 | if (!pca955x) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | i2c_set_clientdata(client, pca955x); | ||
289 | |||
285 | for (i = 0; i < chip->bits; i++) { | 290 | for (i = 0; i < chip->bits; i++) { |
286 | pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL); | 291 | pca955x[i].chipdef = chip; |
287 | if (!pca955x) { | 292 | pca955x[i].client = client; |
288 | err = -ENOMEM; | 293 | pca955x[i].led_num = i; |
289 | goto exit; | ||
290 | } | ||
291 | 294 | ||
292 | pca955x->chipdef = chip; | ||
293 | pca955x->client = client; | ||
294 | pca955x->led_num = i; | ||
295 | /* Platform data can specify LED names and default triggers */ | 295 | /* Platform data can specify LED names and default triggers */ |
296 | if (pdata) { | 296 | if (pdata) { |
297 | if (pdata->leds[i].name) | 297 | if (pdata->leds[i].name) |
298 | snprintf(pca955x->name, 32, "pca955x:%s", | 298 | snprintf(pca955x[i].name, |
299 | pdata->leds[i].name); | 299 | sizeof(pca955x[i].name), "pca955x:%s", |
300 | pdata->leds[i].name); | ||
300 | if (pdata->leds[i].default_trigger) | 301 | if (pdata->leds[i].default_trigger) |
301 | pca955x->led_cdev.default_trigger = | 302 | pca955x[i].led_cdev.default_trigger = |
302 | pdata->leds[i].default_trigger; | 303 | pdata->leds[i].default_trigger; |
303 | } else { | 304 | } else { |
304 | snprintf(pca955x->name, 32, "pca955x:%d", i); | 305 | snprintf(pca955x[i].name, sizeof(pca955x[i].name), |
306 | "pca955x:%d", i); | ||
305 | } | 307 | } |
306 | spin_lock_init(&pca955x->lock); | ||
307 | 308 | ||
308 | pca955x->led_cdev.name = pca955x->name; | 309 | spin_lock_init(&pca955x[i].lock); |
309 | pca955x->led_cdev.brightness_set = | ||
310 | pca955x_led_set; | ||
311 | 310 | ||
312 | /* | 311 | pca955x[i].led_cdev.name = pca955x[i].name; |
313 | * Client data is a pointer to the _first_ pca955x_led | 312 | pca955x[i].led_cdev.brightness_set = pca955x_led_set; |
314 | * struct | ||
315 | */ | ||
316 | if (i == 0) | ||
317 | i2c_set_clientdata(client, pca955x); | ||
318 | 313 | ||
319 | INIT_WORK(&(pca955x->work), pca955x_led_work); | 314 | INIT_WORK(&pca955x[i].work, pca955x_led_work); |
320 | 315 | ||
321 | led_classdev_register(&client->dev, &(pca955x->led_cdev)); | 316 | err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); |
317 | if (err < 0) | ||
318 | goto exit; | ||
322 | } | 319 | } |
323 | 320 | ||
324 | /* Turn off LEDs */ | 321 | /* Turn off LEDs */ |
@@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
336 | pca955x_write_psc(client, 1, 0); | 333 | pca955x_write_psc(client, 1, 0); |
337 | 334 | ||
338 | return 0; | 335 | return 0; |
336 | |||
339 | exit: | 337 | exit: |
338 | while (i--) { | ||
339 | led_classdev_unregister(&pca955x[i].led_cdev); | ||
340 | cancel_work_sync(&pca955x[i].work); | ||
341 | } | ||
342 | |||
343 | kfree(pca955x); | ||
344 | i2c_set_clientdata(client, NULL); | ||
345 | |||
340 | return err; | 346 | return err; |
341 | } | 347 | } |
342 | 348 | ||
343 | static int __devexit pca955x_remove(struct i2c_client *client) | 349 | static int __devexit pca955x_remove(struct i2c_client *client) |
344 | { | 350 | { |
345 | struct pca955x_led *pca955x = i2c_get_clientdata(client); | 351 | struct pca955x_led *pca955x = i2c_get_clientdata(client); |
346 | int leds = pca955x->chipdef->bits; | ||
347 | int i; | 352 | int i; |
348 | 353 | ||
349 | for (i = 0; i < leds; i++) { | 354 | for (i = 0; i < pca955x->chipdef->bits; i++) { |
350 | led_classdev_unregister(&(pca955x->led_cdev)); | 355 | led_classdev_unregister(&pca955x[i].led_cdev); |
351 | cancel_work_sync(&(pca955x->work)); | 356 | cancel_work_sync(&pca955x[i].work); |
352 | kfree(pca955x); | ||
353 | pca955x = pca955x + 1; | ||
354 | } | 357 | } |
355 | 358 | ||
359 | kfree(pca955x); | ||
360 | i2c_set_clientdata(client, NULL); | ||
361 | |||
356 | return 0; | 362 | return 0; |
357 | } | 363 | } |
358 | 364 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 71dd65aa31b6..c2fcf28b4c70 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -63,6 +63,7 @@ struct multipath { | |||
63 | 63 | ||
64 | const char *hw_handler_name; | 64 | const char *hw_handler_name; |
65 | struct work_struct activate_path; | 65 | struct work_struct activate_path; |
66 | struct pgpath *pgpath_to_activate; | ||
66 | unsigned nr_priority_groups; | 67 | unsigned nr_priority_groups; |
67 | struct list_head priority_groups; | 68 | struct list_head priority_groups; |
68 | unsigned pg_init_required; /* pg_init needs calling? */ | 69 | unsigned pg_init_required; /* pg_init needs calling? */ |
@@ -146,6 +147,7 @@ static struct priority_group *alloc_priority_group(void) | |||
146 | 147 | ||
147 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | 148 | static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) |
148 | { | 149 | { |
150 | unsigned long flags; | ||
149 | struct pgpath *pgpath, *tmp; | 151 | struct pgpath *pgpath, *tmp; |
150 | struct multipath *m = ti->private; | 152 | struct multipath *m = ti->private; |
151 | 153 | ||
@@ -154,6 +156,10 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) | |||
154 | if (m->hw_handler_name) | 156 | if (m->hw_handler_name) |
155 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); | 157 | scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); |
156 | dm_put_device(ti, pgpath->path.dev); | 158 | dm_put_device(ti, pgpath->path.dev); |
159 | spin_lock_irqsave(&m->lock, flags); | ||
160 | if (m->pgpath_to_activate == pgpath) | ||
161 | m->pgpath_to_activate = NULL; | ||
162 | spin_unlock_irqrestore(&m->lock, flags); | ||
157 | free_pgpath(pgpath); | 163 | free_pgpath(pgpath); |
158 | } | 164 | } |
159 | } | 165 | } |
@@ -421,6 +427,7 @@ static void process_queued_ios(struct work_struct *work) | |||
421 | __choose_pgpath(m); | 427 | __choose_pgpath(m); |
422 | 428 | ||
423 | pgpath = m->current_pgpath; | 429 | pgpath = m->current_pgpath; |
430 | m->pgpath_to_activate = m->current_pgpath; | ||
424 | 431 | ||
425 | if ((pgpath && !m->queue_io) || | 432 | if ((pgpath && !m->queue_io) || |
426 | (!pgpath && !m->queue_if_no_path)) | 433 | (!pgpath && !m->queue_if_no_path)) |
@@ -1093,8 +1100,15 @@ static void activate_path(struct work_struct *work) | |||
1093 | int ret; | 1100 | int ret; |
1094 | struct multipath *m = | 1101 | struct multipath *m = |
1095 | container_of(work, struct multipath, activate_path); | 1102 | container_of(work, struct multipath, activate_path); |
1096 | struct dm_path *path = &m->current_pgpath->path; | 1103 | struct dm_path *path; |
1104 | unsigned long flags; | ||
1097 | 1105 | ||
1106 | spin_lock_irqsave(&m->lock, flags); | ||
1107 | path = &m->pgpath_to_activate->path; | ||
1108 | m->pgpath_to_activate = NULL; | ||
1109 | spin_unlock_irqrestore(&m->lock, flags); | ||
1110 | if (!path) | ||
1111 | return; | ||
1098 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); | 1112 | ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev)); |
1099 | pg_init_done(path, ret); | 1113 | pg_init_done(path, ret); |
1100 | } | 1114 | } |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index bca448e11878..ace998ce59f6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -837,12 +837,14 @@ static int dm_merge_bvec(struct request_queue *q, | |||
837 | struct dm_table *map = dm_get_table(md); | 837 | struct dm_table *map = dm_get_table(md); |
838 | struct dm_target *ti; | 838 | struct dm_target *ti; |
839 | sector_t max_sectors; | 839 | sector_t max_sectors; |
840 | int max_size; | 840 | int max_size = 0; |
841 | 841 | ||
842 | if (unlikely(!map)) | 842 | if (unlikely(!map)) |
843 | return 0; | 843 | goto out; |
844 | 844 | ||
845 | ti = dm_table_find_target(map, bvm->bi_sector); | 845 | ti = dm_table_find_target(map, bvm->bi_sector); |
846 | if (!dm_target_is_valid(ti)) | ||
847 | goto out_table; | ||
846 | 848 | ||
847 | /* | 849 | /* |
848 | * Find maximum amount of I/O that won't need splitting | 850 | * Find maximum amount of I/O that won't need splitting |
@@ -861,14 +863,16 @@ static int dm_merge_bvec(struct request_queue *q, | |||
861 | if (max_size && ti->type->merge) | 863 | if (max_size && ti->type->merge) |
862 | max_size = ti->type->merge(ti, bvm, biovec, max_size); | 864 | max_size = ti->type->merge(ti, bvm, biovec, max_size); |
863 | 865 | ||
866 | out_table: | ||
867 | dm_table_put(map); | ||
868 | |||
869 | out: | ||
864 | /* | 870 | /* |
865 | * Always allow an entire first page | 871 | * Always allow an entire first page |
866 | */ | 872 | */ |
867 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) | 873 | if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT)) |
868 | max_size = biovec->bv_len; | 874 | max_size = biovec->bv_len; |
869 | 875 | ||
870 | dm_table_put(map); | ||
871 | |||
872 | return max_size; | 876 | return max_size; |
873 | } | 877 | } |
874 | 878 | ||
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 10c44d3fe01a..68dc8d9eb24e 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -21,7 +21,7 @@ config MFD_SM501 | |||
21 | 21 | ||
22 | config MFD_SM501_GPIO | 22 | config MFD_SM501_GPIO |
23 | bool "Export GPIO via GPIO layer" | 23 | bool "Export GPIO via GPIO layer" |
24 | depends on MFD_SM501 && HAVE_GPIO_LIB | 24 | depends on MFD_SM501 && GPIOLIB |
25 | ---help--- | 25 | ---help--- |
26 | This option uses the gpio library layer to export the 64 GPIO | 26 | This option uses the gpio library layer to export the 64 GPIO |
27 | lines on the SM501. The platform data is used to supply the | 27 | lines on the SM501. The platform data is used to supply the |
@@ -29,7 +29,7 @@ config MFD_SM501_GPIO | |||
29 | 29 | ||
30 | config MFD_ASIC3 | 30 | config MFD_ASIC3 |
31 | bool "Support for Compaq ASIC3" | 31 | bool "Support for Compaq ASIC3" |
32 | depends on GENERIC_HARDIRQS && HAVE_GPIO_LIB && ARM | 32 | depends on GENERIC_HARDIRQS && GPIOLIB && ARM |
33 | ---help--- | 33 | ---help--- |
34 | This driver supports the ASIC3 multifunction chip found on many | 34 | This driver supports the ASIC3 multifunction chip found on many |
35 | PDAs (mainly iPAQ and HTC based ones) | 35 | PDAs (mainly iPAQ and HTC based ones) |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index bc2a807f210d..ba5aa2008273 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -312,7 +312,6 @@ static int __init asic3_irq_probe(struct platform_device *pdev) | |||
312 | struct asic3 *asic = platform_get_drvdata(pdev); | 312 | struct asic3 *asic = platform_get_drvdata(pdev); |
313 | unsigned long clksel = 0; | 313 | unsigned long clksel = 0; |
314 | unsigned int irq, irq_base; | 314 | unsigned int irq, irq_base; |
315 | int map_size; | ||
316 | int ret; | 315 | int ret; |
317 | 316 | ||
318 | ret = platform_get_irq(pdev, 0); | 317 | ret = platform_get_irq(pdev, 0); |
@@ -534,6 +533,7 @@ static int __init asic3_probe(struct platform_device *pdev) | |||
534 | struct asic3 *asic; | 533 | struct asic3 *asic; |
535 | struct resource *mem; | 534 | struct resource *mem; |
536 | unsigned long clksel; | 535 | unsigned long clksel; |
536 | int map_size; | ||
537 | int ret = 0; | 537 | int ret = 0; |
538 | 538 | ||
539 | asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); | 539 | asic = kzalloc(sizeof(struct asic3), GFP_KERNEL); |
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index ea8d7a3490d9..1ce21d4c8608 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -114,6 +114,17 @@ config MMC_ATMELMCI | |||
114 | 114 | ||
115 | If unsure, say N. | 115 | If unsure, say N. |
116 | 116 | ||
117 | config MMC_ATMELMCI_DMA | ||
118 | bool "Atmel MCI DMA support (EXPERIMENTAL)" | ||
119 | depends on MMC_ATMELMCI && DMA_ENGINE && EXPERIMENTAL | ||
120 | help | ||
121 | Say Y here to have the Atmel MCI driver use a DMA engine to | ||
122 | do data transfers and thus increase the throughput and | ||
123 | reduce the CPU utilization. Note that this is highly | ||
124 | experimental and may cause the driver to lock up. | ||
125 | |||
126 | If unsure, say N. | ||
127 | |||
117 | config MMC_IMX | 128 | config MMC_IMX |
118 | tristate "Motorola i.MX Multimedia Card Interface support" | 129 | tristate "Motorola i.MX Multimedia Card Interface support" |
119 | depends on ARCH_IMX | 130 | depends on ARCH_IMX |
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index 26bd80e65031..b58364ed6bba 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h | |||
@@ -25,8 +25,10 @@ | |||
25 | #define MCI_SDCR 0x000c /* SD Card / SDIO */ | 25 | #define MCI_SDCR 0x000c /* SD Card / SDIO */ |
26 | # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ | 26 | # define MCI_SDCSEL_SLOT_A ( 0 << 0) /* Select SD slot A */ |
27 | # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ | 27 | # define MCI_SDCSEL_SLOT_B ( 1 << 0) /* Select SD slot A */ |
28 | # define MCI_SDCBUS_1BIT ( 0 << 7) /* 1-bit data bus */ | 28 | # define MCI_SDCSEL_MASK ( 3 << 0) |
29 | # define MCI_SDCBUS_4BIT ( 1 << 7) /* 4-bit data bus */ | 29 | # define MCI_SDCBUS_1BIT ( 0 << 6) /* 1-bit data bus */ |
30 | # define MCI_SDCBUS_4BIT ( 2 << 6) /* 4-bit data bus */ | ||
31 | # define MCI_SDCBUS_MASK ( 3 << 6) | ||
30 | #define MCI_ARGR 0x0010 /* Command Argument */ | 32 | #define MCI_ARGR 0x0010 /* Command Argument */ |
31 | #define MCI_CMDR 0x0014 /* Command */ | 33 | #define MCI_CMDR 0x0014 /* Command */ |
32 | # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ | 34 | # define MCI_CMDR_CMDNB(x) ((x) << 0) /* Command Opcode */ |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 917035e16da4..7a3f2436b011 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/dmaengine.h> | ||
15 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/err.h> | 16 | #include <linux/err.h> |
15 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
@@ -33,64 +35,178 @@ | |||
33 | #include "atmel-mci-regs.h" | 35 | #include "atmel-mci-regs.h" |
34 | 36 | ||
35 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) | 37 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) |
38 | #define ATMCI_DMA_THRESHOLD 16 | ||
36 | 39 | ||
37 | enum { | 40 | enum { |
38 | EVENT_CMD_COMPLETE = 0, | 41 | EVENT_CMD_COMPLETE = 0, |
39 | EVENT_DATA_ERROR, | ||
40 | EVENT_DATA_COMPLETE, | ||
41 | EVENT_STOP_SENT, | ||
42 | EVENT_STOP_COMPLETE, | ||
43 | EVENT_XFER_COMPLETE, | 42 | EVENT_XFER_COMPLETE, |
43 | EVENT_DATA_COMPLETE, | ||
44 | EVENT_DATA_ERROR, | ||
45 | }; | ||
46 | |||
47 | enum atmel_mci_state { | ||
48 | STATE_IDLE = 0, | ||
49 | STATE_SENDING_CMD, | ||
50 | STATE_SENDING_DATA, | ||
51 | STATE_DATA_BUSY, | ||
52 | STATE_SENDING_STOP, | ||
53 | STATE_DATA_ERROR, | ||
54 | }; | ||
55 | |||
56 | struct atmel_mci_dma { | ||
57 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
58 | struct dma_client client; | ||
59 | struct dma_chan *chan; | ||
60 | struct dma_async_tx_descriptor *data_desc; | ||
61 | #endif | ||
44 | }; | 62 | }; |
45 | 63 | ||
64 | /** | ||
65 | * struct atmel_mci - MMC controller state shared between all slots | ||
66 | * @lock: Spinlock protecting the queue and associated data. | ||
67 | * @regs: Pointer to MMIO registers. | ||
68 | * @sg: Scatterlist entry currently being processed by PIO code, if any. | ||
69 | * @pio_offset: Offset into the current scatterlist entry. | ||
70 | * @cur_slot: The slot which is currently using the controller. | ||
71 | * @mrq: The request currently being processed on @cur_slot, | ||
72 | * or NULL if the controller is idle. | ||
73 | * @cmd: The command currently being sent to the card, or NULL. | ||
74 | * @data: The data currently being transferred, or NULL if no data | ||
75 | * transfer is in progress. | ||
76 | * @dma: DMA client state. | ||
77 | * @data_chan: DMA channel being used for the current data transfer. | ||
78 | * @cmd_status: Snapshot of SR taken upon completion of the current | ||
79 | * command. Only valid when EVENT_CMD_COMPLETE is pending. | ||
80 | * @data_status: Snapshot of SR taken upon completion of the current | ||
81 | * data transfer. Only valid when EVENT_DATA_COMPLETE or | ||
82 | * EVENT_DATA_ERROR is pending. | ||
83 | * @stop_cmdr: Value to be loaded into CMDR when the stop command is | ||
84 | * to be sent. | ||
85 | * @tasklet: Tasklet running the request state machine. | ||
86 | * @pending_events: Bitmask of events flagged by the interrupt handler | ||
87 | * to be processed by the tasklet. | ||
88 | * @completed_events: Bitmask of events which the state machine has | ||
89 | * processed. | ||
90 | * @state: Tasklet state. | ||
91 | * @queue: List of slots waiting for access to the controller. | ||
92 | * @need_clock_update: Update the clock rate before the next request. | ||
93 | * @need_reset: Reset controller before next request. | ||
94 | * @mode_reg: Value of the MR register. | ||
95 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus | ||
96 | * rate and timeout calculations. | ||
97 | * @mapbase: Physical address of the MMIO registers. | ||
98 | * @mck: The peripheral bus clock hooked up to the MMC controller. | ||
99 | * @pdev: Platform device associated with the MMC controller. | ||
100 | * @slot: Slots sharing this MMC controller. | ||
101 | * | ||
102 | * Locking | ||
103 | * ======= | ||
104 | * | ||
105 | * @lock is a softirq-safe spinlock protecting @queue as well as | ||
106 | * @cur_slot, @mrq and @state. These must always be updated | ||
107 | * at the same time while holding @lock. | ||
108 | * | ||
109 | * @lock also protects mode_reg and need_clock_update since these are | ||
110 | * used to synchronize mode register updates with the queue | ||
111 | * processing. | ||
112 | * | ||
113 | * The @mrq field of struct atmel_mci_slot is also protected by @lock, | ||
114 | * and must always be written at the same time as the slot is added to | ||
115 | * @queue. | ||
116 | * | ||
117 | * @pending_events and @completed_events are accessed using atomic bit | ||
118 | * operations, so they don't need any locking. | ||
119 | * | ||
120 | * None of the fields touched by the interrupt handler need any | ||
121 | * locking. However, ordering is important: Before EVENT_DATA_ERROR or | ||
122 | * EVENT_DATA_COMPLETE is set in @pending_events, all data-related | ||
123 | * interrupts must be disabled and @data_status updated with a | ||
124 | * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the | ||
125 | * CMDRDY interupt must be disabled and @cmd_status updated with a | ||
126 | * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the | ||
127 | * bytes_xfered field of @data must be written. This is ensured by | ||
128 | * using barriers. | ||
129 | */ | ||
46 | struct atmel_mci { | 130 | struct atmel_mci { |
47 | struct mmc_host *mmc; | 131 | spinlock_t lock; |
48 | void __iomem *regs; | 132 | void __iomem *regs; |
49 | 133 | ||
50 | struct scatterlist *sg; | 134 | struct scatterlist *sg; |
51 | unsigned int pio_offset; | 135 | unsigned int pio_offset; |
52 | 136 | ||
137 | struct atmel_mci_slot *cur_slot; | ||
53 | struct mmc_request *mrq; | 138 | struct mmc_request *mrq; |
54 | struct mmc_command *cmd; | 139 | struct mmc_command *cmd; |
55 | struct mmc_data *data; | 140 | struct mmc_data *data; |
56 | 141 | ||
142 | struct atmel_mci_dma dma; | ||
143 | struct dma_chan *data_chan; | ||
144 | |||
57 | u32 cmd_status; | 145 | u32 cmd_status; |
58 | u32 data_status; | 146 | u32 data_status; |
59 | u32 stop_status; | ||
60 | u32 stop_cmdr; | 147 | u32 stop_cmdr; |
61 | 148 | ||
62 | u32 mode_reg; | ||
63 | u32 sdc_reg; | ||
64 | |||
65 | struct tasklet_struct tasklet; | 149 | struct tasklet_struct tasklet; |
66 | unsigned long pending_events; | 150 | unsigned long pending_events; |
67 | unsigned long completed_events; | 151 | unsigned long completed_events; |
152 | enum atmel_mci_state state; | ||
153 | struct list_head queue; | ||
68 | 154 | ||
69 | int present; | 155 | bool need_clock_update; |
70 | int detect_pin; | 156 | bool need_reset; |
71 | int wp_pin; | 157 | u32 mode_reg; |
72 | |||
73 | /* For detect pin debouncing */ | ||
74 | struct timer_list detect_timer; | ||
75 | |||
76 | unsigned long bus_hz; | 158 | unsigned long bus_hz; |
77 | unsigned long mapbase; | 159 | unsigned long mapbase; |
78 | struct clk *mck; | 160 | struct clk *mck; |
79 | struct platform_device *pdev; | 161 | struct platform_device *pdev; |
162 | |||
163 | struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; | ||
164 | }; | ||
165 | |||
166 | /** | ||
167 | * struct atmel_mci_slot - MMC slot state | ||
168 | * @mmc: The mmc_host representing this slot. | ||
169 | * @host: The MMC controller this slot is using. | ||
170 | * @sdc_reg: Value of SDCR to be written before using this slot. | ||
171 | * @mrq: mmc_request currently being processed or waiting to be | ||
172 | * processed, or NULL when the slot is idle. | ||
173 | * @queue_node: List node for placing this node in the @queue list of | ||
174 | * &struct atmel_mci. | ||
175 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. | ||
176 | * @flags: Random state bits associated with the slot. | ||
177 | * @detect_pin: GPIO pin used for card detection, or negative if not | ||
178 | * available. | ||
179 | * @wp_pin: GPIO pin used for card write protect sending, or negative | ||
180 | * if not available. | ||
181 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. | ||
182 | */ | ||
183 | struct atmel_mci_slot { | ||
184 | struct mmc_host *mmc; | ||
185 | struct atmel_mci *host; | ||
186 | |||
187 | u32 sdc_reg; | ||
188 | |||
189 | struct mmc_request *mrq; | ||
190 | struct list_head queue_node; | ||
191 | |||
192 | unsigned int clock; | ||
193 | unsigned long flags; | ||
194 | #define ATMCI_CARD_PRESENT 0 | ||
195 | #define ATMCI_CARD_NEED_INIT 1 | ||
196 | #define ATMCI_SHUTDOWN 2 | ||
197 | |||
198 | int detect_pin; | ||
199 | int wp_pin; | ||
200 | |||
201 | struct timer_list detect_timer; | ||
80 | }; | 202 | }; |
81 | 203 | ||
82 | #define atmci_is_completed(host, event) \ | ||
83 | test_bit(event, &host->completed_events) | ||
84 | #define atmci_test_and_clear_pending(host, event) \ | 204 | #define atmci_test_and_clear_pending(host, event) \ |
85 | test_and_clear_bit(event, &host->pending_events) | 205 | test_and_clear_bit(event, &host->pending_events) |
86 | #define atmci_test_and_set_completed(host, event) \ | ||
87 | test_and_set_bit(event, &host->completed_events) | ||
88 | #define atmci_set_completed(host, event) \ | 206 | #define atmci_set_completed(host, event) \ |
89 | set_bit(event, &host->completed_events) | 207 | set_bit(event, &host->completed_events) |
90 | #define atmci_set_pending(host, event) \ | 208 | #define atmci_set_pending(host, event) \ |
91 | set_bit(event, &host->pending_events) | 209 | set_bit(event, &host->pending_events) |
92 | #define atmci_clear_pending(host, event) \ | ||
93 | clear_bit(event, &host->pending_events) | ||
94 | 210 | ||
95 | /* | 211 | /* |
96 | * The debugfs stuff below is mostly optimized away when | 212 | * The debugfs stuff below is mostly optimized away when |
@@ -98,14 +214,15 @@ struct atmel_mci { | |||
98 | */ | 214 | */ |
99 | static int atmci_req_show(struct seq_file *s, void *v) | 215 | static int atmci_req_show(struct seq_file *s, void *v) |
100 | { | 216 | { |
101 | struct atmel_mci *host = s->private; | 217 | struct atmel_mci_slot *slot = s->private; |
102 | struct mmc_request *mrq = host->mrq; | 218 | struct mmc_request *mrq; |
103 | struct mmc_command *cmd; | 219 | struct mmc_command *cmd; |
104 | struct mmc_command *stop; | 220 | struct mmc_command *stop; |
105 | struct mmc_data *data; | 221 | struct mmc_data *data; |
106 | 222 | ||
107 | /* Make sure we get a consistent snapshot */ | 223 | /* Make sure we get a consistent snapshot */ |
108 | spin_lock_irq(&host->mmc->lock); | 224 | spin_lock_bh(&slot->host->lock); |
225 | mrq = slot->mrq; | ||
109 | 226 | ||
110 | if (mrq) { | 227 | if (mrq) { |
111 | cmd = mrq->cmd; | 228 | cmd = mrq->cmd; |
@@ -130,7 +247,7 @@ static int atmci_req_show(struct seq_file *s, void *v) | |||
130 | stop->resp[2], stop->error); | 247 | stop->resp[2], stop->error); |
131 | } | 248 | } |
132 | 249 | ||
133 | spin_unlock_irq(&host->mmc->lock); | 250 | spin_unlock_bh(&slot->host->lock); |
134 | 251 | ||
135 | return 0; | 252 | return 0; |
136 | } | 253 | } |
@@ -193,12 +310,16 @@ static int atmci_regs_show(struct seq_file *s, void *v) | |||
193 | if (!buf) | 310 | if (!buf) |
194 | return -ENOMEM; | 311 | return -ENOMEM; |
195 | 312 | ||
196 | /* Grab a more or less consistent snapshot */ | 313 | /* |
197 | spin_lock_irq(&host->mmc->lock); | 314 | * Grab a more or less consistent snapshot. Note that we're |
315 | * not disabling interrupts, so IMR and SR may not be | ||
316 | * consistent. | ||
317 | */ | ||
318 | spin_lock_bh(&host->lock); | ||
198 | clk_enable(host->mck); | 319 | clk_enable(host->mck); |
199 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); | 320 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); |
200 | clk_disable(host->mck); | 321 | clk_disable(host->mck); |
201 | spin_unlock_irq(&host->mmc->lock); | 322 | spin_unlock_bh(&host->lock); |
202 | 323 | ||
203 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", | 324 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", |
204 | buf[MCI_MR / 4], | 325 | buf[MCI_MR / 4], |
@@ -236,13 +357,13 @@ static const struct file_operations atmci_regs_fops = { | |||
236 | .release = single_release, | 357 | .release = single_release, |
237 | }; | 358 | }; |
238 | 359 | ||
239 | static void atmci_init_debugfs(struct atmel_mci *host) | 360 | static void atmci_init_debugfs(struct atmel_mci_slot *slot) |
240 | { | 361 | { |
241 | struct mmc_host *mmc; | 362 | struct mmc_host *mmc = slot->mmc; |
242 | struct dentry *root; | 363 | struct atmel_mci *host = slot->host; |
243 | struct dentry *node; | 364 | struct dentry *root; |
365 | struct dentry *node; | ||
244 | 366 | ||
245 | mmc = host->mmc; | ||
246 | root = mmc->debugfs_root; | 367 | root = mmc->debugfs_root; |
247 | if (!root) | 368 | if (!root) |
248 | return; | 369 | return; |
@@ -254,7 +375,11 @@ static void atmci_init_debugfs(struct atmel_mci *host) | |||
254 | if (!node) | 375 | if (!node) |
255 | goto err; | 376 | goto err; |
256 | 377 | ||
257 | node = debugfs_create_file("req", S_IRUSR, root, host, &atmci_req_fops); | 378 | node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); |
379 | if (!node) | ||
380 | goto err; | ||
381 | |||
382 | node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); | ||
258 | if (!node) | 383 | if (!node) |
259 | goto err; | 384 | goto err; |
260 | 385 | ||
@@ -271,25 +396,7 @@ static void atmci_init_debugfs(struct atmel_mci *host) | |||
271 | return; | 396 | return; |
272 | 397 | ||
273 | err: | 398 | err: |
274 | dev_err(&host->pdev->dev, | 399 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); |
275 | "failed to initialize debugfs for controller\n"); | ||
276 | } | ||
277 | |||
278 | static void atmci_enable(struct atmel_mci *host) | ||
279 | { | ||
280 | clk_enable(host->mck); | ||
281 | mci_writel(host, CR, MCI_CR_MCIEN); | ||
282 | mci_writel(host, MR, host->mode_reg); | ||
283 | mci_writel(host, SDCR, host->sdc_reg); | ||
284 | } | ||
285 | |||
286 | static void atmci_disable(struct atmel_mci *host) | ||
287 | { | ||
288 | mci_writel(host, CR, MCI_CR_SWRST); | ||
289 | |||
290 | /* Stall until write is complete, then disable the bus clock */ | ||
291 | mci_readl(host, SR); | ||
292 | clk_disable(host->mck); | ||
293 | } | 400 | } |
294 | 401 | ||
295 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, | 402 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, |
@@ -299,7 +406,7 @@ static inline unsigned int ns_to_clocks(struct atmel_mci *host, | |||
299 | } | 406 | } |
300 | 407 | ||
301 | static void atmci_set_timeout(struct atmel_mci *host, | 408 | static void atmci_set_timeout(struct atmel_mci *host, |
302 | struct mmc_data *data) | 409 | struct atmel_mci_slot *slot, struct mmc_data *data) |
303 | { | 410 | { |
304 | static unsigned dtomul_to_shift[] = { | 411 | static unsigned dtomul_to_shift[] = { |
305 | 0, 4, 7, 8, 10, 12, 16, 20 | 412 | 0, 4, 7, 8, 10, 12, 16, 20 |
@@ -322,7 +429,7 @@ static void atmci_set_timeout(struct atmel_mci *host, | |||
322 | dtocyc = 15; | 429 | dtocyc = 15; |
323 | } | 430 | } |
324 | 431 | ||
325 | dev_vdbg(&host->mmc->class_dev, "setting timeout to %u cycles\n", | 432 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", |
326 | dtocyc << dtomul_to_shift[dtomul]); | 433 | dtocyc << dtomul_to_shift[dtomul]); |
327 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); | 434 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); |
328 | } | 435 | } |
@@ -375,15 +482,12 @@ static u32 atmci_prepare_command(struct mmc_host *mmc, | |||
375 | } | 482 | } |
376 | 483 | ||
377 | static void atmci_start_command(struct atmel_mci *host, | 484 | static void atmci_start_command(struct atmel_mci *host, |
378 | struct mmc_command *cmd, | 485 | struct mmc_command *cmd, u32 cmd_flags) |
379 | u32 cmd_flags) | ||
380 | { | 486 | { |
381 | /* Must read host->cmd after testing event flags */ | ||
382 | smp_rmb(); | ||
383 | WARN_ON(host->cmd); | 487 | WARN_ON(host->cmd); |
384 | host->cmd = cmd; | 488 | host->cmd = cmd; |
385 | 489 | ||
386 | dev_vdbg(&host->mmc->class_dev, | 490 | dev_vdbg(&host->pdev->dev, |
387 | "start command: ARGR=0x%08x CMDR=0x%08x\n", | 491 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
388 | cmd->arg, cmd_flags); | 492 | cmd->arg, cmd_flags); |
389 | 493 | ||
@@ -391,34 +495,157 @@ static void atmci_start_command(struct atmel_mci *host, | |||
391 | mci_writel(host, CMDR, cmd_flags); | 495 | mci_writel(host, CMDR, cmd_flags); |
392 | } | 496 | } |
393 | 497 | ||
394 | static void send_stop_cmd(struct mmc_host *mmc, struct mmc_data *data) | 498 | static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) |
395 | { | 499 | { |
396 | struct atmel_mci *host = mmc_priv(mmc); | ||
397 | |||
398 | atmci_start_command(host, data->stop, host->stop_cmdr); | 500 | atmci_start_command(host, data->stop, host->stop_cmdr); |
399 | mci_writel(host, IER, MCI_CMDRDY); | 501 | mci_writel(host, IER, MCI_CMDRDY); |
400 | } | 502 | } |
401 | 503 | ||
402 | static void atmci_request_end(struct mmc_host *mmc, struct mmc_request *mrq) | 504 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
505 | static void atmci_dma_cleanup(struct atmel_mci *host) | ||
403 | { | 506 | { |
404 | struct atmel_mci *host = mmc_priv(mmc); | 507 | struct mmc_data *data = host->data; |
405 | 508 | ||
406 | WARN_ON(host->cmd || host->data); | 509 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, |
407 | host->mrq = NULL; | 510 | ((data->flags & MMC_DATA_WRITE) |
511 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | ||
512 | } | ||
513 | |||
514 | static void atmci_stop_dma(struct atmel_mci *host) | ||
515 | { | ||
516 | struct dma_chan *chan = host->data_chan; | ||
517 | |||
518 | if (chan) { | ||
519 | chan->device->device_terminate_all(chan); | ||
520 | atmci_dma_cleanup(host); | ||
521 | } else { | ||
522 | /* Data transfer was stopped by the interrupt handler */ | ||
523 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | ||
524 | mci_writel(host, IER, MCI_NOTBUSY); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | /* This function is called by the DMA driver from tasklet context. */ | ||
529 | static void atmci_dma_complete(void *arg) | ||
530 | { | ||
531 | struct atmel_mci *host = arg; | ||
532 | struct mmc_data *data = host->data; | ||
533 | |||
534 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); | ||
535 | |||
536 | atmci_dma_cleanup(host); | ||
537 | |||
538 | /* | ||
539 | * If the card was removed, data will be NULL. No point trying | ||
540 | * to send the stop command or waiting for NBUSY in this case. | ||
541 | */ | ||
542 | if (data) { | ||
543 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | ||
544 | tasklet_schedule(&host->tasklet); | ||
545 | |||
546 | /* | ||
547 | * Regardless of what the documentation says, we have | ||
548 | * to wait for NOTBUSY even after block read | ||
549 | * operations. | ||
550 | * | ||
551 | * When the DMA transfer is complete, the controller | ||
552 | * may still be reading the CRC from the card, i.e. | ||
553 | * the data transfer is still in progress and we | ||
554 | * haven't seen all the potential error bits yet. | ||
555 | * | ||
556 | * The interrupt handler will schedule a different | ||
557 | * tasklet to finish things up when the data transfer | ||
558 | * is completely done. | ||
559 | * | ||
560 | * We may not complete the mmc request here anyway | ||
561 | * because the mmc layer may call back and cause us to | ||
562 | * violate the "don't submit new operations from the | ||
563 | * completion callback" rule of the dma engine | ||
564 | * framework. | ||
565 | */ | ||
566 | mci_writel(host, IER, MCI_NOTBUSY); | ||
567 | } | ||
568 | } | ||
569 | |||
570 | static int | ||
571 | atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | ||
572 | { | ||
573 | struct dma_chan *chan; | ||
574 | struct dma_async_tx_descriptor *desc; | ||
575 | struct scatterlist *sg; | ||
576 | unsigned int i; | ||
577 | enum dma_data_direction direction; | ||
578 | |||
579 | /* | ||
580 | * We don't do DMA on "complex" transfers, i.e. with | ||
581 | * non-word-aligned buffers or lengths. Also, we don't bother | ||
582 | * with all the DMA setup overhead for short transfers. | ||
583 | */ | ||
584 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) | ||
585 | return -EINVAL; | ||
586 | if (data->blksz & 3) | ||
587 | return -EINVAL; | ||
588 | |||
589 | for_each_sg(data->sg, sg, data->sg_len, i) { | ||
590 | if (sg->offset & 3 || sg->length & 3) | ||
591 | return -EINVAL; | ||
592 | } | ||
593 | |||
594 | /* If we don't have a channel, we can't do DMA */ | ||
595 | chan = host->dma.chan; | ||
596 | if (chan) { | ||
597 | dma_chan_get(chan); | ||
598 | host->data_chan = chan; | ||
599 | } | ||
600 | |||
601 | if (!chan) | ||
602 | return -ENODEV; | ||
603 | |||
604 | if (data->flags & MMC_DATA_READ) | ||
605 | direction = DMA_FROM_DEVICE; | ||
606 | else | ||
607 | direction = DMA_TO_DEVICE; | ||
608 | |||
609 | desc = chan->device->device_prep_slave_sg(chan, | ||
610 | data->sg, data->sg_len, direction, | ||
611 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
612 | if (!desc) | ||
613 | return -ENOMEM; | ||
408 | 614 | ||
409 | atmci_disable(host); | 615 | host->dma.data_desc = desc; |
616 | desc->callback = atmci_dma_complete; | ||
617 | desc->callback_param = host; | ||
618 | desc->tx_submit(desc); | ||
410 | 619 | ||
411 | mmc_request_done(mmc, mrq); | 620 | /* Go! */ |
621 | chan->device->device_issue_pending(chan); | ||
622 | |||
623 | return 0; | ||
624 | } | ||
625 | |||
626 | #else /* CONFIG_MMC_ATMELMCI_DMA */ | ||
627 | |||
628 | static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) | ||
629 | { | ||
630 | return -ENOSYS; | ||
412 | } | 631 | } |
413 | 632 | ||
633 | static void atmci_stop_dma(struct atmel_mci *host) | ||
634 | { | ||
635 | /* Data transfer was stopped by the interrupt handler */ | ||
636 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | ||
637 | mci_writel(host, IER, MCI_NOTBUSY); | ||
638 | } | ||
639 | |||
640 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
641 | |||
414 | /* | 642 | /* |
415 | * Returns a mask of interrupt flags to be enabled after the whole | 643 | * Returns a mask of interrupt flags to be enabled after the whole |
416 | * request has been prepared. | 644 | * request has been prepared. |
417 | */ | 645 | */ |
418 | static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) | 646 | static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) |
419 | { | 647 | { |
420 | struct atmel_mci *host = mmc_priv(mmc); | 648 | u32 iflags; |
421 | u32 iflags; | ||
422 | 649 | ||
423 | data->error = -EINPROGRESS; | 650 | data->error = -EINPROGRESS; |
424 | 651 | ||
@@ -426,75 +653,89 @@ static u32 atmci_submit_data(struct mmc_host *mmc, struct mmc_data *data) | |||
426 | host->sg = NULL; | 653 | host->sg = NULL; |
427 | host->data = data; | 654 | host->data = data; |
428 | 655 | ||
429 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | ||
430 | | MCI_BLKLEN(data->blksz)); | ||
431 | dev_vdbg(&mmc->class_dev, "BLKR=0x%08x\n", | ||
432 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); | ||
433 | |||
434 | iflags = ATMCI_DATA_ERROR_FLAGS; | 656 | iflags = ATMCI_DATA_ERROR_FLAGS; |
435 | host->sg = data->sg; | 657 | if (atmci_submit_data_dma(host, data)) { |
436 | host->pio_offset = 0; | 658 | host->data_chan = NULL; |
437 | if (data->flags & MMC_DATA_READ) | 659 | |
438 | iflags |= MCI_RXRDY; | 660 | /* |
439 | else | 661 | * Errata: MMC data write operation with less than 12 |
440 | iflags |= MCI_TXRDY; | 662 | * bytes is impossible. |
663 | * | ||
664 | * Errata: MCI Transmit Data Register (TDR) FIFO | ||
665 | * corruption when length is not multiple of 4. | ||
666 | */ | ||
667 | if (data->blocks * data->blksz < 12 | ||
668 | || (data->blocks * data->blksz) & 3) | ||
669 | host->need_reset = true; | ||
670 | |||
671 | host->sg = data->sg; | ||
672 | host->pio_offset = 0; | ||
673 | if (data->flags & MMC_DATA_READ) | ||
674 | iflags |= MCI_RXRDY; | ||
675 | else | ||
676 | iflags |= MCI_TXRDY; | ||
677 | } | ||
441 | 678 | ||
442 | return iflags; | 679 | return iflags; |
443 | } | 680 | } |
444 | 681 | ||
445 | static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | 682 | static void atmci_start_request(struct atmel_mci *host, |
683 | struct atmel_mci_slot *slot) | ||
446 | { | 684 | { |
447 | struct atmel_mci *host = mmc_priv(mmc); | 685 | struct mmc_request *mrq; |
448 | struct mmc_data *data; | ||
449 | struct mmc_command *cmd; | 686 | struct mmc_command *cmd; |
687 | struct mmc_data *data; | ||
450 | u32 iflags; | 688 | u32 iflags; |
451 | u32 cmdflags = 0; | 689 | u32 cmdflags; |
452 | |||
453 | iflags = mci_readl(host, IMR); | ||
454 | if (iflags) | ||
455 | dev_warn(&mmc->class_dev, "WARNING: IMR=0x%08x\n", | ||
456 | mci_readl(host, IMR)); | ||
457 | |||
458 | WARN_ON(host->mrq != NULL); | ||
459 | |||
460 | /* | ||
461 | * We may "know" the card is gone even though there's still an | ||
462 | * electrical connection. If so, we really need to communicate | ||
463 | * this to the MMC core since there won't be any more | ||
464 | * interrupts as the card is completely removed. Otherwise, | ||
465 | * the MMC core might believe the card is still there even | ||
466 | * though the card was just removed very slowly. | ||
467 | */ | ||
468 | if (!host->present) { | ||
469 | mrq->cmd->error = -ENOMEDIUM; | ||
470 | mmc_request_done(mmc, mrq); | ||
471 | return; | ||
472 | } | ||
473 | 690 | ||
691 | mrq = slot->mrq; | ||
692 | host->cur_slot = slot; | ||
474 | host->mrq = mrq; | 693 | host->mrq = mrq; |
694 | |||
475 | host->pending_events = 0; | 695 | host->pending_events = 0; |
476 | host->completed_events = 0; | 696 | host->completed_events = 0; |
697 | host->data_status = 0; | ||
477 | 698 | ||
478 | atmci_enable(host); | 699 | if (host->need_reset) { |
700 | mci_writel(host, CR, MCI_CR_SWRST); | ||
701 | mci_writel(host, CR, MCI_CR_MCIEN); | ||
702 | mci_writel(host, MR, host->mode_reg); | ||
703 | host->need_reset = false; | ||
704 | } | ||
705 | mci_writel(host, SDCR, slot->sdc_reg); | ||
479 | 706 | ||
480 | /* We don't support multiple blocks of weird lengths. */ | 707 | iflags = mci_readl(host, IMR); |
708 | if (iflags) | ||
709 | dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", | ||
710 | iflags); | ||
711 | |||
712 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { | ||
713 | /* Send init sequence (74 clock cycles) */ | ||
714 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); | ||
715 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) | ||
716 | cpu_relax(); | ||
717 | } | ||
481 | data = mrq->data; | 718 | data = mrq->data; |
482 | if (data) { | 719 | if (data) { |
483 | if (data->blocks > 1 && data->blksz & 3) | 720 | atmci_set_timeout(host, slot, data); |
484 | goto fail; | 721 | |
485 | atmci_set_timeout(host, data); | 722 | /* Must set block count/size before sending command */ |
723 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | ||
724 | | MCI_BLKLEN(data->blksz)); | ||
725 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", | ||
726 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); | ||
486 | } | 727 | } |
487 | 728 | ||
488 | iflags = MCI_CMDRDY; | 729 | iflags = MCI_CMDRDY; |
489 | cmd = mrq->cmd; | 730 | cmd = mrq->cmd; |
490 | cmdflags = atmci_prepare_command(mmc, cmd); | 731 | cmdflags = atmci_prepare_command(slot->mmc, cmd); |
491 | atmci_start_command(host, cmd, cmdflags); | 732 | atmci_start_command(host, cmd, cmdflags); |
492 | 733 | ||
493 | if (data) | 734 | if (data) |
494 | iflags |= atmci_submit_data(mmc, data); | 735 | iflags |= atmci_submit_data(host, data); |
495 | 736 | ||
496 | if (mrq->stop) { | 737 | if (mrq->stop) { |
497 | host->stop_cmdr = atmci_prepare_command(mmc, mrq->stop); | 738 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); |
498 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; | 739 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; |
499 | if (!(data->flags & MMC_DATA_WRITE)) | 740 | if (!(data->flags & MMC_DATA_WRITE)) |
500 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; | 741 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; |
@@ -511,59 +752,156 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | |||
511 | * prepared yet.) | 752 | * prepared yet.) |
512 | */ | 753 | */ |
513 | mci_writel(host, IER, iflags); | 754 | mci_writel(host, IER, iflags); |
755 | } | ||
514 | 756 | ||
515 | return; | 757 | static void atmci_queue_request(struct atmel_mci *host, |
758 | struct atmel_mci_slot *slot, struct mmc_request *mrq) | ||
759 | { | ||
760 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", | ||
761 | host->state); | ||
762 | |||
763 | spin_lock_bh(&host->lock); | ||
764 | slot->mrq = mrq; | ||
765 | if (host->state == STATE_IDLE) { | ||
766 | host->state = STATE_SENDING_CMD; | ||
767 | atmci_start_request(host, slot); | ||
768 | } else { | ||
769 | list_add_tail(&slot->queue_node, &host->queue); | ||
770 | } | ||
771 | spin_unlock_bh(&host->lock); | ||
772 | } | ||
516 | 773 | ||
517 | fail: | 774 | static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
518 | atmci_disable(host); | 775 | { |
519 | host->mrq = NULL; | 776 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
520 | mrq->cmd->error = -EINVAL; | 777 | struct atmel_mci *host = slot->host; |
521 | mmc_request_done(mmc, mrq); | 778 | struct mmc_data *data; |
779 | |||
780 | WARN_ON(slot->mrq); | ||
781 | |||
782 | /* | ||
783 | * We may "know" the card is gone even though there's still an | ||
784 | * electrical connection. If so, we really need to communicate | ||
785 | * this to the MMC core since there won't be any more | ||
786 | * interrupts as the card is completely removed. Otherwise, | ||
787 | * the MMC core might believe the card is still there even | ||
788 | * though the card was just removed very slowly. | ||
789 | */ | ||
790 | if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { | ||
791 | mrq->cmd->error = -ENOMEDIUM; | ||
792 | mmc_request_done(mmc, mrq); | ||
793 | return; | ||
794 | } | ||
795 | |||
796 | /* We don't support multiple blocks of weird lengths. */ | ||
797 | data = mrq->data; | ||
798 | if (data && data->blocks > 1 && data->blksz & 3) { | ||
799 | mrq->cmd->error = -EINVAL; | ||
800 | mmc_request_done(mmc, mrq); | ||
801 | } | ||
802 | |||
803 | atmci_queue_request(host, slot, mrq); | ||
522 | } | 804 | } |
523 | 805 | ||
524 | static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 806 | static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
525 | { | 807 | { |
526 | struct atmel_mci *host = mmc_priv(mmc); | 808 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
809 | struct atmel_mci *host = slot->host; | ||
810 | unsigned int i; | ||
811 | |||
812 | slot->sdc_reg &= ~MCI_SDCBUS_MASK; | ||
813 | switch (ios->bus_width) { | ||
814 | case MMC_BUS_WIDTH_1: | ||
815 | slot->sdc_reg |= MCI_SDCBUS_1BIT; | ||
816 | break; | ||
817 | case MMC_BUS_WIDTH_4: | ||
818 | slot->sdc_reg = MCI_SDCBUS_4BIT; | ||
819 | break; | ||
820 | } | ||
527 | 821 | ||
528 | if (ios->clock) { | 822 | if (ios->clock) { |
823 | unsigned int clock_min = ~0U; | ||
529 | u32 clkdiv; | 824 | u32 clkdiv; |
530 | 825 | ||
531 | /* Set clock rate */ | 826 | spin_lock_bh(&host->lock); |
532 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * ios->clock) - 1; | 827 | if (!host->mode_reg) { |
828 | clk_enable(host->mck); | ||
829 | mci_writel(host, CR, MCI_CR_SWRST); | ||
830 | mci_writel(host, CR, MCI_CR_MCIEN); | ||
831 | } | ||
832 | |||
833 | /* | ||
834 | * Use mirror of ios->clock to prevent race with mmc | ||
835 | * core ios update when finding the minimum. | ||
836 | */ | ||
837 | slot->clock = ios->clock; | ||
838 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | ||
839 | if (host->slot[i] && host->slot[i]->clock | ||
840 | && host->slot[i]->clock < clock_min) | ||
841 | clock_min = host->slot[i]->clock; | ||
842 | } | ||
843 | |||
844 | /* Calculate clock divider */ | ||
845 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; | ||
533 | if (clkdiv > 255) { | 846 | if (clkdiv > 255) { |
534 | dev_warn(&mmc->class_dev, | 847 | dev_warn(&mmc->class_dev, |
535 | "clock %u too slow; using %lu\n", | 848 | "clock %u too slow; using %lu\n", |
536 | ios->clock, host->bus_hz / (2 * 256)); | 849 | clock_min, host->bus_hz / (2 * 256)); |
537 | clkdiv = 255; | 850 | clkdiv = 255; |
538 | } | 851 | } |
539 | 852 | ||
853 | /* | ||
854 | * WRPROOF and RDPROOF prevent overruns/underruns by | ||
855 | * stopping the clock when the FIFO is full/empty. | ||
856 | * This state is not expected to last for long. | ||
857 | */ | ||
540 | host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF | 858 | host->mode_reg = MCI_MR_CLKDIV(clkdiv) | MCI_MR_WRPROOF |
541 | | MCI_MR_RDPROOF; | 859 | | MCI_MR_RDPROOF; |
542 | } | ||
543 | 860 | ||
544 | switch (ios->bus_width) { | 861 | if (list_empty(&host->queue)) |
545 | case MMC_BUS_WIDTH_1: | 862 | mci_writel(host, MR, host->mode_reg); |
546 | host->sdc_reg = 0; | 863 | else |
547 | break; | 864 | host->need_clock_update = true; |
548 | case MMC_BUS_WIDTH_4: | 865 | |
549 | host->sdc_reg = MCI_SDCBUS_4BIT; | 866 | spin_unlock_bh(&host->lock); |
550 | break; | 867 | } else { |
868 | bool any_slot_active = false; | ||
869 | |||
870 | spin_lock_bh(&host->lock); | ||
871 | slot->clock = 0; | ||
872 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | ||
873 | if (host->slot[i] && host->slot[i]->clock) { | ||
874 | any_slot_active = true; | ||
875 | break; | ||
876 | } | ||
877 | } | ||
878 | if (!any_slot_active) { | ||
879 | mci_writel(host, CR, MCI_CR_MCIDIS); | ||
880 | if (host->mode_reg) { | ||
881 | mci_readl(host, MR); | ||
882 | clk_disable(host->mck); | ||
883 | } | ||
884 | host->mode_reg = 0; | ||
885 | } | ||
886 | spin_unlock_bh(&host->lock); | ||
551 | } | 887 | } |
552 | 888 | ||
553 | switch (ios->power_mode) { | 889 | switch (ios->power_mode) { |
554 | case MMC_POWER_ON: | 890 | case MMC_POWER_UP: |
555 | /* Send init sequence (74 clock cycles) */ | 891 | set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); |
556 | atmci_enable(host); | ||
557 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); | ||
558 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) | ||
559 | cpu_relax(); | ||
560 | atmci_disable(host); | ||
561 | break; | 892 | break; |
562 | default: | 893 | default: |
563 | /* | 894 | /* |
564 | * TODO: None of the currently available AVR32-based | 895 | * TODO: None of the currently available AVR32-based |
565 | * boards allow MMC power to be turned off. Implement | 896 | * boards allow MMC power to be turned off. Implement |
566 | * power control when this can be tested properly. | 897 | * power control when this can be tested properly. |
898 | * | ||
899 | * We also need to hook this into the clock management | ||
900 | * somehow so that newly inserted cards aren't | ||
901 | * subjected to a fast clock before we have a chance | ||
902 | * to figure out what the maximum rate is. Currently, | ||
903 | * there's no way to avoid this, and there never will | ||
904 | * be for boards that don't support power control. | ||
567 | */ | 905 | */ |
568 | break; | 906 | break; |
569 | } | 907 | } |
@@ -571,31 +909,82 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
571 | 909 | ||
572 | static int atmci_get_ro(struct mmc_host *mmc) | 910 | static int atmci_get_ro(struct mmc_host *mmc) |
573 | { | 911 | { |
574 | int read_only = 0; | 912 | int read_only = -ENOSYS; |
575 | struct atmel_mci *host = mmc_priv(mmc); | 913 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
576 | 914 | ||
577 | if (gpio_is_valid(host->wp_pin)) { | 915 | if (gpio_is_valid(slot->wp_pin)) { |
578 | read_only = gpio_get_value(host->wp_pin); | 916 | read_only = gpio_get_value(slot->wp_pin); |
579 | dev_dbg(&mmc->class_dev, "card is %s\n", | 917 | dev_dbg(&mmc->class_dev, "card is %s\n", |
580 | read_only ? "read-only" : "read-write"); | 918 | read_only ? "read-only" : "read-write"); |
581 | } else { | ||
582 | dev_dbg(&mmc->class_dev, | ||
583 | "no pin for checking read-only switch." | ||
584 | " Assuming write-enable.\n"); | ||
585 | } | 919 | } |
586 | 920 | ||
587 | return read_only; | 921 | return read_only; |
588 | } | 922 | } |
589 | 923 | ||
590 | static struct mmc_host_ops atmci_ops = { | 924 | static int atmci_get_cd(struct mmc_host *mmc) |
925 | { | ||
926 | int present = -ENOSYS; | ||
927 | struct atmel_mci_slot *slot = mmc_priv(mmc); | ||
928 | |||
929 | if (gpio_is_valid(slot->detect_pin)) { | ||
930 | present = !gpio_get_value(slot->detect_pin); | ||
931 | dev_dbg(&mmc->class_dev, "card is %spresent\n", | ||
932 | present ? "" : "not "); | ||
933 | } | ||
934 | |||
935 | return present; | ||
936 | } | ||
937 | |||
938 | static const struct mmc_host_ops atmci_ops = { | ||
591 | .request = atmci_request, | 939 | .request = atmci_request, |
592 | .set_ios = atmci_set_ios, | 940 | .set_ios = atmci_set_ios, |
593 | .get_ro = atmci_get_ro, | 941 | .get_ro = atmci_get_ro, |
942 | .get_cd = atmci_get_cd, | ||
594 | }; | 943 | }; |
595 | 944 | ||
945 | /* Called with host->lock held */ | ||
946 | static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) | ||
947 | __releases(&host->lock) | ||
948 | __acquires(&host->lock) | ||
949 | { | ||
950 | struct atmel_mci_slot *slot = NULL; | ||
951 | struct mmc_host *prev_mmc = host->cur_slot->mmc; | ||
952 | |||
953 | WARN_ON(host->cmd || host->data); | ||
954 | |||
955 | /* | ||
956 | * Update the MMC clock rate if necessary. This may be | ||
957 | * necessary if set_ios() is called when a different slot is | ||
958 | * busy transfering data. | ||
959 | */ | ||
960 | if (host->need_clock_update) | ||
961 | mci_writel(host, MR, host->mode_reg); | ||
962 | |||
963 | host->cur_slot->mrq = NULL; | ||
964 | host->mrq = NULL; | ||
965 | if (!list_empty(&host->queue)) { | ||
966 | slot = list_entry(host->queue.next, | ||
967 | struct atmel_mci_slot, queue_node); | ||
968 | list_del(&slot->queue_node); | ||
969 | dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", | ||
970 | mmc_hostname(slot->mmc)); | ||
971 | host->state = STATE_SENDING_CMD; | ||
972 | atmci_start_request(host, slot); | ||
973 | } else { | ||
974 | dev_vdbg(&host->pdev->dev, "list empty\n"); | ||
975 | host->state = STATE_IDLE; | ||
976 | } | ||
977 | |||
978 | spin_unlock(&host->lock); | ||
979 | mmc_request_done(prev_mmc, mrq); | ||
980 | spin_lock(&host->lock); | ||
981 | } | ||
982 | |||
596 | static void atmci_command_complete(struct atmel_mci *host, | 983 | static void atmci_command_complete(struct atmel_mci *host, |
597 | struct mmc_command *cmd, u32 status) | 984 | struct mmc_command *cmd) |
598 | { | 985 | { |
986 | u32 status = host->cmd_status; | ||
987 | |||
599 | /* Read the response from the card (up to 16 bytes) */ | 988 | /* Read the response from the card (up to 16 bytes) */ |
600 | cmd->resp[0] = mci_readl(host, RSPR); | 989 | cmd->resp[0] = mci_readl(host, RSPR); |
601 | cmd->resp[1] = mci_readl(host, RSPR); | 990 | cmd->resp[1] = mci_readl(host, RSPR); |
@@ -612,11 +1001,12 @@ static void atmci_command_complete(struct atmel_mci *host, | |||
612 | cmd->error = 0; | 1001 | cmd->error = 0; |
613 | 1002 | ||
614 | if (cmd->error) { | 1003 | if (cmd->error) { |
615 | dev_dbg(&host->mmc->class_dev, | 1004 | dev_dbg(&host->pdev->dev, |
616 | "command error: status=0x%08x\n", status); | 1005 | "command error: status=0x%08x\n", status); |
617 | 1006 | ||
618 | if (cmd->data) { | 1007 | if (cmd->data) { |
619 | host->data = NULL; | 1008 | host->data = NULL; |
1009 | atmci_stop_dma(host); | ||
620 | mci_writel(host, IDR, MCI_NOTBUSY | 1010 | mci_writel(host, IDR, MCI_NOTBUSY |
621 | | MCI_TXRDY | MCI_RXRDY | 1011 | | MCI_TXRDY | MCI_RXRDY |
622 | | ATMCI_DATA_ERROR_FLAGS); | 1012 | | ATMCI_DATA_ERROR_FLAGS); |
@@ -626,146 +1016,222 @@ static void atmci_command_complete(struct atmel_mci *host, | |||
626 | 1016 | ||
627 | static void atmci_detect_change(unsigned long data) | 1017 | static void atmci_detect_change(unsigned long data) |
628 | { | 1018 | { |
629 | struct atmel_mci *host = (struct atmel_mci *)data; | 1019 | struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; |
630 | struct mmc_request *mrq = host->mrq; | 1020 | bool present; |
631 | int present; | 1021 | bool present_old; |
632 | 1022 | ||
633 | /* | 1023 | /* |
634 | * atmci_remove() sets detect_pin to -1 before freeing the | 1024 | * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before |
635 | * interrupt. We must not re-enable the interrupt if it has | 1025 | * freeing the interrupt. We must not re-enable the interrupt |
636 | * been freed. | 1026 | * if it has been freed, and if we're shutting down, it |
1027 | * doesn't really matter whether the card is present or not. | ||
637 | */ | 1028 | */ |
638 | smp_rmb(); | 1029 | smp_rmb(); |
639 | if (!gpio_is_valid(host->detect_pin)) | 1030 | if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) |
640 | return; | 1031 | return; |
641 | 1032 | ||
642 | enable_irq(gpio_to_irq(host->detect_pin)); | 1033 | enable_irq(gpio_to_irq(slot->detect_pin)); |
643 | present = !gpio_get_value(host->detect_pin); | 1034 | present = !gpio_get_value(slot->detect_pin); |
1035 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); | ||
1036 | |||
1037 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", | ||
1038 | present, present_old); | ||
644 | 1039 | ||
645 | dev_vdbg(&host->pdev->dev, "detect change: %d (was %d)\n", | 1040 | if (present != present_old) { |
646 | present, host->present); | 1041 | struct atmel_mci *host = slot->host; |
1042 | struct mmc_request *mrq; | ||
647 | 1043 | ||
648 | if (present != host->present) { | 1044 | dev_dbg(&slot->mmc->class_dev, "card %s\n", |
649 | dev_dbg(&host->mmc->class_dev, "card %s\n", | ||
650 | present ? "inserted" : "removed"); | 1045 | present ? "inserted" : "removed"); |
651 | host->present = present; | ||
652 | 1046 | ||
653 | /* Reset controller if card is gone */ | 1047 | spin_lock(&host->lock); |
654 | if (!present) { | 1048 | |
655 | mci_writel(host, CR, MCI_CR_SWRST); | 1049 | if (!present) |
656 | mci_writel(host, IDR, ~0UL); | 1050 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); |
657 | mci_writel(host, CR, MCI_CR_MCIEN); | 1051 | else |
658 | } | 1052 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); |
659 | 1053 | ||
660 | /* Clean up queue if present */ | 1054 | /* Clean up queue if present */ |
1055 | mrq = slot->mrq; | ||
661 | if (mrq) { | 1056 | if (mrq) { |
662 | /* | 1057 | if (mrq == host->mrq) { |
663 | * Reset controller to terminate any ongoing | 1058 | /* |
664 | * commands or data transfers. | 1059 | * Reset controller to terminate any ongoing |
665 | */ | 1060 | * commands or data transfers. |
666 | mci_writel(host, CR, MCI_CR_SWRST); | 1061 | */ |
1062 | mci_writel(host, CR, MCI_CR_SWRST); | ||
1063 | mci_writel(host, CR, MCI_CR_MCIEN); | ||
1064 | mci_writel(host, MR, host->mode_reg); | ||
667 | 1065 | ||
668 | if (!atmci_is_completed(host, EVENT_CMD_COMPLETE)) | ||
669 | mrq->cmd->error = -ENOMEDIUM; | ||
670 | |||
671 | if (mrq->data && !atmci_is_completed(host, | ||
672 | EVENT_DATA_COMPLETE)) { | ||
673 | host->data = NULL; | 1066 | host->data = NULL; |
674 | mrq->data->error = -ENOMEDIUM; | 1067 | host->cmd = NULL; |
1068 | |||
1069 | switch (host->state) { | ||
1070 | case STATE_IDLE: | ||
1071 | break; | ||
1072 | case STATE_SENDING_CMD: | ||
1073 | mrq->cmd->error = -ENOMEDIUM; | ||
1074 | if (!mrq->data) | ||
1075 | break; | ||
1076 | /* fall through */ | ||
1077 | case STATE_SENDING_DATA: | ||
1078 | mrq->data->error = -ENOMEDIUM; | ||
1079 | atmci_stop_dma(host); | ||
1080 | break; | ||
1081 | case STATE_DATA_BUSY: | ||
1082 | case STATE_DATA_ERROR: | ||
1083 | if (mrq->data->error == -EINPROGRESS) | ||
1084 | mrq->data->error = -ENOMEDIUM; | ||
1085 | if (!mrq->stop) | ||
1086 | break; | ||
1087 | /* fall through */ | ||
1088 | case STATE_SENDING_STOP: | ||
1089 | mrq->stop->error = -ENOMEDIUM; | ||
1090 | break; | ||
1091 | } | ||
1092 | |||
1093 | atmci_request_end(host, mrq); | ||
1094 | } else { | ||
1095 | list_del(&slot->queue_node); | ||
1096 | mrq->cmd->error = -ENOMEDIUM; | ||
1097 | if (mrq->data) | ||
1098 | mrq->data->error = -ENOMEDIUM; | ||
1099 | if (mrq->stop) | ||
1100 | mrq->stop->error = -ENOMEDIUM; | ||
1101 | |||
1102 | spin_unlock(&host->lock); | ||
1103 | mmc_request_done(slot->mmc, mrq); | ||
1104 | spin_lock(&host->lock); | ||
675 | } | 1105 | } |
676 | if (mrq->stop && !atmci_is_completed(host, | ||
677 | EVENT_STOP_COMPLETE)) | ||
678 | mrq->stop->error = -ENOMEDIUM; | ||
679 | |||
680 | host->cmd = NULL; | ||
681 | atmci_request_end(host->mmc, mrq); | ||
682 | } | 1106 | } |
1107 | spin_unlock(&host->lock); | ||
683 | 1108 | ||
684 | mmc_detect_change(host->mmc, 0); | 1109 | mmc_detect_change(slot->mmc, 0); |
685 | } | 1110 | } |
686 | } | 1111 | } |
687 | 1112 | ||
688 | static void atmci_tasklet_func(unsigned long priv) | 1113 | static void atmci_tasklet_func(unsigned long priv) |
689 | { | 1114 | { |
690 | struct mmc_host *mmc = (struct mmc_host *)priv; | 1115 | struct atmel_mci *host = (struct atmel_mci *)priv; |
691 | struct atmel_mci *host = mmc_priv(mmc); | ||
692 | struct mmc_request *mrq = host->mrq; | 1116 | struct mmc_request *mrq = host->mrq; |
693 | struct mmc_data *data = host->data; | 1117 | struct mmc_data *data = host->data; |
1118 | struct mmc_command *cmd = host->cmd; | ||
1119 | enum atmel_mci_state state = host->state; | ||
1120 | enum atmel_mci_state prev_state; | ||
1121 | u32 status; | ||
1122 | |||
1123 | spin_lock(&host->lock); | ||
694 | 1124 | ||
695 | dev_vdbg(&mmc->class_dev, | 1125 | state = host->state; |
696 | "tasklet: pending/completed/mask %lx/%lx/%x\n", | 1126 | |
697 | host->pending_events, host->completed_events, | 1127 | dev_vdbg(&host->pdev->dev, |
1128 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", | ||
1129 | state, host->pending_events, host->completed_events, | ||
698 | mci_readl(host, IMR)); | 1130 | mci_readl(host, IMR)); |
699 | 1131 | ||
700 | if (atmci_test_and_clear_pending(host, EVENT_CMD_COMPLETE)) { | 1132 | do { |
701 | /* | 1133 | prev_state = state; |
702 | * host->cmd must be set to NULL before the interrupt | ||
703 | * handler sees EVENT_CMD_COMPLETE | ||
704 | */ | ||
705 | host->cmd = NULL; | ||
706 | smp_wmb(); | ||
707 | atmci_set_completed(host, EVENT_CMD_COMPLETE); | ||
708 | atmci_command_complete(host, mrq->cmd, host->cmd_status); | ||
709 | |||
710 | if (!mrq->cmd->error && mrq->stop | ||
711 | && atmci_is_completed(host, EVENT_XFER_COMPLETE) | ||
712 | && !atmci_test_and_set_completed(host, | ||
713 | EVENT_STOP_SENT)) | ||
714 | send_stop_cmd(host->mmc, mrq->data); | ||
715 | } | ||
716 | if (atmci_test_and_clear_pending(host, EVENT_STOP_COMPLETE)) { | ||
717 | /* | ||
718 | * host->cmd must be set to NULL before the interrupt | ||
719 | * handler sees EVENT_STOP_COMPLETE | ||
720 | */ | ||
721 | host->cmd = NULL; | ||
722 | smp_wmb(); | ||
723 | atmci_set_completed(host, EVENT_STOP_COMPLETE); | ||
724 | atmci_command_complete(host, mrq->stop, host->stop_status); | ||
725 | } | ||
726 | if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { | ||
727 | u32 status = host->data_status; | ||
728 | 1134 | ||
729 | dev_vdbg(&mmc->class_dev, "data error: status=%08x\n", status); | 1135 | switch (state) { |
1136 | case STATE_IDLE: | ||
1137 | break; | ||
730 | 1138 | ||
731 | atmci_set_completed(host, EVENT_DATA_ERROR); | 1139 | case STATE_SENDING_CMD: |
732 | atmci_set_completed(host, EVENT_DATA_COMPLETE); | 1140 | if (!atmci_test_and_clear_pending(host, |
1141 | EVENT_CMD_COMPLETE)) | ||
1142 | break; | ||
733 | 1143 | ||
734 | if (status & MCI_DTOE) { | 1144 | host->cmd = NULL; |
735 | dev_dbg(&mmc->class_dev, | 1145 | atmci_set_completed(host, EVENT_CMD_COMPLETE); |
736 | "data timeout error\n"); | 1146 | atmci_command_complete(host, mrq->cmd); |
737 | data->error = -ETIMEDOUT; | 1147 | if (!mrq->data || cmd->error) { |
738 | } else if (status & MCI_DCRCE) { | 1148 | atmci_request_end(host, host->mrq); |
739 | dev_dbg(&mmc->class_dev, "data CRC error\n"); | 1149 | goto unlock; |
740 | data->error = -EILSEQ; | 1150 | } |
741 | } else { | 1151 | |
742 | dev_dbg(&mmc->class_dev, | 1152 | prev_state = state = STATE_SENDING_DATA; |
743 | "data FIFO error (status=%08x)\n", | 1153 | /* fall through */ |
744 | status); | 1154 | |
745 | data->error = -EIO; | 1155 | case STATE_SENDING_DATA: |
746 | } | 1156 | if (atmci_test_and_clear_pending(host, |
1157 | EVENT_DATA_ERROR)) { | ||
1158 | atmci_stop_dma(host); | ||
1159 | if (data->stop) | ||
1160 | send_stop_cmd(host, data); | ||
1161 | state = STATE_DATA_ERROR; | ||
1162 | break; | ||
1163 | } | ||
747 | 1164 | ||
748 | if (host->present && data->stop | 1165 | if (!atmci_test_and_clear_pending(host, |
749 | && atmci_is_completed(host, EVENT_CMD_COMPLETE) | 1166 | EVENT_XFER_COMPLETE)) |
750 | && !atmci_test_and_set_completed( | 1167 | break; |
751 | host, EVENT_STOP_SENT)) | ||
752 | send_stop_cmd(host->mmc, data); | ||
753 | 1168 | ||
754 | host->data = NULL; | 1169 | atmci_set_completed(host, EVENT_XFER_COMPLETE); |
755 | } | 1170 | prev_state = state = STATE_DATA_BUSY; |
756 | if (atmci_test_and_clear_pending(host, EVENT_DATA_COMPLETE)) { | 1171 | /* fall through */ |
757 | atmci_set_completed(host, EVENT_DATA_COMPLETE); | 1172 | |
1173 | case STATE_DATA_BUSY: | ||
1174 | if (!atmci_test_and_clear_pending(host, | ||
1175 | EVENT_DATA_COMPLETE)) | ||
1176 | break; | ||
758 | 1177 | ||
759 | if (!atmci_is_completed(host, EVENT_DATA_ERROR)) { | 1178 | host->data = NULL; |
760 | data->bytes_xfered = data->blocks * data->blksz; | 1179 | atmci_set_completed(host, EVENT_DATA_COMPLETE); |
761 | data->error = 0; | 1180 | status = host->data_status; |
1181 | if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { | ||
1182 | if (status & MCI_DTOE) { | ||
1183 | dev_dbg(&host->pdev->dev, | ||
1184 | "data timeout error\n"); | ||
1185 | data->error = -ETIMEDOUT; | ||
1186 | } else if (status & MCI_DCRCE) { | ||
1187 | dev_dbg(&host->pdev->dev, | ||
1188 | "data CRC error\n"); | ||
1189 | data->error = -EILSEQ; | ||
1190 | } else { | ||
1191 | dev_dbg(&host->pdev->dev, | ||
1192 | "data FIFO error (status=%08x)\n", | ||
1193 | status); | ||
1194 | data->error = -EIO; | ||
1195 | } | ||
1196 | } else { | ||
1197 | data->bytes_xfered = data->blocks * data->blksz; | ||
1198 | data->error = 0; | ||
1199 | } | ||
1200 | |||
1201 | if (!data->stop) { | ||
1202 | atmci_request_end(host, host->mrq); | ||
1203 | goto unlock; | ||
1204 | } | ||
1205 | |||
1206 | prev_state = state = STATE_SENDING_STOP; | ||
1207 | if (!data->error) | ||
1208 | send_stop_cmd(host, data); | ||
1209 | /* fall through */ | ||
1210 | |||
1211 | case STATE_SENDING_STOP: | ||
1212 | if (!atmci_test_and_clear_pending(host, | ||
1213 | EVENT_CMD_COMPLETE)) | ||
1214 | break; | ||
1215 | |||
1216 | host->cmd = NULL; | ||
1217 | atmci_command_complete(host, mrq->stop); | ||
1218 | atmci_request_end(host, host->mrq); | ||
1219 | goto unlock; | ||
1220 | |||
1221 | case STATE_DATA_ERROR: | ||
1222 | if (!atmci_test_and_clear_pending(host, | ||
1223 | EVENT_XFER_COMPLETE)) | ||
1224 | break; | ||
1225 | |||
1226 | state = STATE_DATA_BUSY; | ||
1227 | break; | ||
762 | } | 1228 | } |
1229 | } while (state != prev_state); | ||
763 | 1230 | ||
764 | host->data = NULL; | 1231 | host->state = state; |
765 | } | ||
766 | 1232 | ||
767 | if (host->mrq && !host->cmd && !host->data) | 1233 | unlock: |
768 | atmci_request_end(mmc, host->mrq); | 1234 | spin_unlock(&host->lock); |
769 | } | 1235 | } |
770 | 1236 | ||
771 | static void atmci_read_data_pio(struct atmel_mci *host) | 1237 | static void atmci_read_data_pio(struct atmel_mci *host) |
@@ -787,6 +1253,7 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
787 | nbytes += 4; | 1253 | nbytes += 4; |
788 | 1254 | ||
789 | if (offset == sg->length) { | 1255 | if (offset == sg->length) { |
1256 | flush_dcache_page(sg_page(sg)); | ||
790 | host->sg = sg = sg_next(sg); | 1257 | host->sg = sg = sg_next(sg); |
791 | if (!sg) | 1258 | if (!sg) |
792 | goto done; | 1259 | goto done; |
@@ -815,9 +1282,11 @@ static void atmci_read_data_pio(struct atmel_mci *host) | |||
815 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY | 1282 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY |
816 | | ATMCI_DATA_ERROR_FLAGS)); | 1283 | | ATMCI_DATA_ERROR_FLAGS)); |
817 | host->data_status = status; | 1284 | host->data_status = status; |
1285 | data->bytes_xfered += nbytes; | ||
1286 | smp_wmb(); | ||
818 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1287 | atmci_set_pending(host, EVENT_DATA_ERROR); |
819 | tasklet_schedule(&host->tasklet); | 1288 | tasklet_schedule(&host->tasklet); |
820 | break; | 1289 | return; |
821 | } | 1290 | } |
822 | } while (status & MCI_RXRDY); | 1291 | } while (status & MCI_RXRDY); |
823 | 1292 | ||
@@ -830,10 +1299,8 @@ done: | |||
830 | mci_writel(host, IDR, MCI_RXRDY); | 1299 | mci_writel(host, IDR, MCI_RXRDY); |
831 | mci_writel(host, IER, MCI_NOTBUSY); | 1300 | mci_writel(host, IER, MCI_NOTBUSY); |
832 | data->bytes_xfered += nbytes; | 1301 | data->bytes_xfered += nbytes; |
833 | atmci_set_completed(host, EVENT_XFER_COMPLETE); | 1302 | smp_wmb(); |
834 | if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) | 1303 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
835 | && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) | ||
836 | send_stop_cmd(host->mmc, data); | ||
837 | } | 1304 | } |
838 | 1305 | ||
839 | static void atmci_write_data_pio(struct atmel_mci *host) | 1306 | static void atmci_write_data_pio(struct atmel_mci *host) |
@@ -886,9 +1353,11 @@ static void atmci_write_data_pio(struct atmel_mci *host) | |||
886 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY | 1353 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY |
887 | | ATMCI_DATA_ERROR_FLAGS)); | 1354 | | ATMCI_DATA_ERROR_FLAGS)); |
888 | host->data_status = status; | 1355 | host->data_status = status; |
1356 | data->bytes_xfered += nbytes; | ||
1357 | smp_wmb(); | ||
889 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1358 | atmci_set_pending(host, EVENT_DATA_ERROR); |
890 | tasklet_schedule(&host->tasklet); | 1359 | tasklet_schedule(&host->tasklet); |
891 | break; | 1360 | return; |
892 | } | 1361 | } |
893 | } while (status & MCI_TXRDY); | 1362 | } while (status & MCI_TXRDY); |
894 | 1363 | ||
@@ -901,38 +1370,26 @@ done: | |||
901 | mci_writel(host, IDR, MCI_TXRDY); | 1370 | mci_writel(host, IDR, MCI_TXRDY); |
902 | mci_writel(host, IER, MCI_NOTBUSY); | 1371 | mci_writel(host, IER, MCI_NOTBUSY); |
903 | data->bytes_xfered += nbytes; | 1372 | data->bytes_xfered += nbytes; |
904 | atmci_set_completed(host, EVENT_XFER_COMPLETE); | 1373 | smp_wmb(); |
905 | if (data->stop && atmci_is_completed(host, EVENT_CMD_COMPLETE) | 1374 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
906 | && !atmci_test_and_set_completed(host, EVENT_STOP_SENT)) | ||
907 | send_stop_cmd(host->mmc, data); | ||
908 | } | 1375 | } |
909 | 1376 | ||
910 | static void atmci_cmd_interrupt(struct mmc_host *mmc, u32 status) | 1377 | static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) |
911 | { | 1378 | { |
912 | struct atmel_mci *host = mmc_priv(mmc); | ||
913 | |||
914 | mci_writel(host, IDR, MCI_CMDRDY); | 1379 | mci_writel(host, IDR, MCI_CMDRDY); |
915 | 1380 | ||
916 | if (atmci_is_completed(host, EVENT_STOP_SENT)) { | 1381 | host->cmd_status = status; |
917 | host->stop_status = status; | 1382 | smp_wmb(); |
918 | atmci_set_pending(host, EVENT_STOP_COMPLETE); | 1383 | atmci_set_pending(host, EVENT_CMD_COMPLETE); |
919 | } else { | ||
920 | host->cmd_status = status; | ||
921 | atmci_set_pending(host, EVENT_CMD_COMPLETE); | ||
922 | } | ||
923 | |||
924 | tasklet_schedule(&host->tasklet); | 1384 | tasklet_schedule(&host->tasklet); |
925 | } | 1385 | } |
926 | 1386 | ||
927 | static irqreturn_t atmci_interrupt(int irq, void *dev_id) | 1387 | static irqreturn_t atmci_interrupt(int irq, void *dev_id) |
928 | { | 1388 | { |
929 | struct mmc_host *mmc = dev_id; | 1389 | struct atmel_mci *host = dev_id; |
930 | struct atmel_mci *host = mmc_priv(mmc); | ||
931 | u32 status, mask, pending; | 1390 | u32 status, mask, pending; |
932 | unsigned int pass_count = 0; | 1391 | unsigned int pass_count = 0; |
933 | 1392 | ||
934 | spin_lock(&mmc->lock); | ||
935 | |||
936 | do { | 1393 | do { |
937 | status = mci_readl(host, SR); | 1394 | status = mci_readl(host, SR); |
938 | mask = mci_readl(host, IMR); | 1395 | mask = mci_readl(host, IMR); |
@@ -944,13 +1401,18 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) | |||
944 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS | 1401 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS |
945 | | MCI_RXRDY | MCI_TXRDY); | 1402 | | MCI_RXRDY | MCI_TXRDY); |
946 | pending &= mci_readl(host, IMR); | 1403 | pending &= mci_readl(host, IMR); |
1404 | |||
947 | host->data_status = status; | 1405 | host->data_status = status; |
1406 | smp_wmb(); | ||
948 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1407 | atmci_set_pending(host, EVENT_DATA_ERROR); |
949 | tasklet_schedule(&host->tasklet); | 1408 | tasklet_schedule(&host->tasklet); |
950 | } | 1409 | } |
951 | if (pending & MCI_NOTBUSY) { | 1410 | if (pending & MCI_NOTBUSY) { |
952 | mci_writel(host, IDR, (MCI_NOTBUSY | 1411 | mci_writel(host, IDR, |
953 | | ATMCI_DATA_ERROR_FLAGS)); | 1412 | ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); |
1413 | if (!host->data_status) | ||
1414 | host->data_status = status; | ||
1415 | smp_wmb(); | ||
954 | atmci_set_pending(host, EVENT_DATA_COMPLETE); | 1416 | atmci_set_pending(host, EVENT_DATA_COMPLETE); |
955 | tasklet_schedule(&host->tasklet); | 1417 | tasklet_schedule(&host->tasklet); |
956 | } | 1418 | } |
@@ -960,18 +1422,15 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) | |||
960 | atmci_write_data_pio(host); | 1422 | atmci_write_data_pio(host); |
961 | 1423 | ||
962 | if (pending & MCI_CMDRDY) | 1424 | if (pending & MCI_CMDRDY) |
963 | atmci_cmd_interrupt(mmc, status); | 1425 | atmci_cmd_interrupt(host, status); |
964 | } while (pass_count++ < 5); | 1426 | } while (pass_count++ < 5); |
965 | 1427 | ||
966 | spin_unlock(&mmc->lock); | ||
967 | |||
968 | return pass_count ? IRQ_HANDLED : IRQ_NONE; | 1428 | return pass_count ? IRQ_HANDLED : IRQ_NONE; |
969 | } | 1429 | } |
970 | 1430 | ||
971 | static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | 1431 | static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) |
972 | { | 1432 | { |
973 | struct mmc_host *mmc = dev_id; | 1433 | struct atmel_mci_slot *slot = dev_id; |
974 | struct atmel_mci *host = mmc_priv(mmc); | ||
975 | 1434 | ||
976 | /* | 1435 | /* |
977 | * Disable interrupts until the pin has stabilized and check | 1436 | * Disable interrupts until the pin has stabilized and check |
@@ -979,19 +1438,176 @@ static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | |||
979 | * middle of the timer routine when this interrupt triggers. | 1438 | * middle of the timer routine when this interrupt triggers. |
980 | */ | 1439 | */ |
981 | disable_irq_nosync(irq); | 1440 | disable_irq_nosync(irq); |
982 | mod_timer(&host->detect_timer, jiffies + msecs_to_jiffies(20)); | 1441 | mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); |
983 | 1442 | ||
984 | return IRQ_HANDLED; | 1443 | return IRQ_HANDLED; |
985 | } | 1444 | } |
986 | 1445 | ||
1446 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1447 | |||
1448 | static inline struct atmel_mci * | ||
1449 | dma_client_to_atmel_mci(struct dma_client *client) | ||
1450 | { | ||
1451 | return container_of(client, struct atmel_mci, dma.client); | ||
1452 | } | ||
1453 | |||
1454 | static enum dma_state_client atmci_dma_event(struct dma_client *client, | ||
1455 | struct dma_chan *chan, enum dma_state state) | ||
1456 | { | ||
1457 | struct atmel_mci *host; | ||
1458 | enum dma_state_client ret = DMA_NAK; | ||
1459 | |||
1460 | host = dma_client_to_atmel_mci(client); | ||
1461 | |||
1462 | switch (state) { | ||
1463 | case DMA_RESOURCE_AVAILABLE: | ||
1464 | spin_lock_bh(&host->lock); | ||
1465 | if (!host->dma.chan) { | ||
1466 | host->dma.chan = chan; | ||
1467 | ret = DMA_ACK; | ||
1468 | } | ||
1469 | spin_unlock_bh(&host->lock); | ||
1470 | |||
1471 | if (ret == DMA_ACK) | ||
1472 | dev_info(&host->pdev->dev, | ||
1473 | "Using %s for DMA transfers\n", | ||
1474 | chan->dev.bus_id); | ||
1475 | break; | ||
1476 | |||
1477 | case DMA_RESOURCE_REMOVED: | ||
1478 | spin_lock_bh(&host->lock); | ||
1479 | if (host->dma.chan == chan) { | ||
1480 | host->dma.chan = NULL; | ||
1481 | ret = DMA_ACK; | ||
1482 | } | ||
1483 | spin_unlock_bh(&host->lock); | ||
1484 | |||
1485 | if (ret == DMA_ACK) | ||
1486 | dev_info(&host->pdev->dev, | ||
1487 | "Lost %s, falling back to PIO\n", | ||
1488 | chan->dev.bus_id); | ||
1489 | break; | ||
1490 | |||
1491 | default: | ||
1492 | break; | ||
1493 | } | ||
1494 | |||
1495 | |||
1496 | return ret; | ||
1497 | } | ||
1498 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
1499 | |||
1500 | static int __init atmci_init_slot(struct atmel_mci *host, | ||
1501 | struct mci_slot_pdata *slot_data, unsigned int id, | ||
1502 | u32 sdc_reg) | ||
1503 | { | ||
1504 | struct mmc_host *mmc; | ||
1505 | struct atmel_mci_slot *slot; | ||
1506 | |||
1507 | mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); | ||
1508 | if (!mmc) | ||
1509 | return -ENOMEM; | ||
1510 | |||
1511 | slot = mmc_priv(mmc); | ||
1512 | slot->mmc = mmc; | ||
1513 | slot->host = host; | ||
1514 | slot->detect_pin = slot_data->detect_pin; | ||
1515 | slot->wp_pin = slot_data->wp_pin; | ||
1516 | slot->sdc_reg = sdc_reg; | ||
1517 | |||
1518 | mmc->ops = &atmci_ops; | ||
1519 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); | ||
1520 | mmc->f_max = host->bus_hz / 2; | ||
1521 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1522 | if (slot_data->bus_width >= 4) | ||
1523 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
1524 | |||
1525 | mmc->max_hw_segs = 64; | ||
1526 | mmc->max_phys_segs = 64; | ||
1527 | mmc->max_req_size = 32768 * 512; | ||
1528 | mmc->max_blk_size = 32768; | ||
1529 | mmc->max_blk_count = 512; | ||
1530 | |||
1531 | /* Assume card is present initially */ | ||
1532 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | ||
1533 | if (gpio_is_valid(slot->detect_pin)) { | ||
1534 | if (gpio_request(slot->detect_pin, "mmc_detect")) { | ||
1535 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); | ||
1536 | slot->detect_pin = -EBUSY; | ||
1537 | } else if (gpio_get_value(slot->detect_pin)) { | ||
1538 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | ||
1539 | } | ||
1540 | } | ||
1541 | |||
1542 | if (!gpio_is_valid(slot->detect_pin)) | ||
1543 | mmc->caps |= MMC_CAP_NEEDS_POLL; | ||
1544 | |||
1545 | if (gpio_is_valid(slot->wp_pin)) { | ||
1546 | if (gpio_request(slot->wp_pin, "mmc_wp")) { | ||
1547 | dev_dbg(&mmc->class_dev, "no WP pin available\n"); | ||
1548 | slot->wp_pin = -EBUSY; | ||
1549 | } | ||
1550 | } | ||
1551 | |||
1552 | host->slot[id] = slot; | ||
1553 | mmc_add_host(mmc); | ||
1554 | |||
1555 | if (gpio_is_valid(slot->detect_pin)) { | ||
1556 | int ret; | ||
1557 | |||
1558 | setup_timer(&slot->detect_timer, atmci_detect_change, | ||
1559 | (unsigned long)slot); | ||
1560 | |||
1561 | ret = request_irq(gpio_to_irq(slot->detect_pin), | ||
1562 | atmci_detect_interrupt, | ||
1563 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
1564 | "mmc-detect", slot); | ||
1565 | if (ret) { | ||
1566 | dev_dbg(&mmc->class_dev, | ||
1567 | "could not request IRQ %d for detect pin\n", | ||
1568 | gpio_to_irq(slot->detect_pin)); | ||
1569 | gpio_free(slot->detect_pin); | ||
1570 | slot->detect_pin = -EBUSY; | ||
1571 | } | ||
1572 | } | ||
1573 | |||
1574 | atmci_init_debugfs(slot); | ||
1575 | |||
1576 | return 0; | ||
1577 | } | ||
1578 | |||
1579 | static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | ||
1580 | unsigned int id) | ||
1581 | { | ||
1582 | /* Debugfs stuff is cleaned up by mmc core */ | ||
1583 | |||
1584 | set_bit(ATMCI_SHUTDOWN, &slot->flags); | ||
1585 | smp_wmb(); | ||
1586 | |||
1587 | mmc_remove_host(slot->mmc); | ||
1588 | |||
1589 | if (gpio_is_valid(slot->detect_pin)) { | ||
1590 | int pin = slot->detect_pin; | ||
1591 | |||
1592 | free_irq(gpio_to_irq(pin), slot); | ||
1593 | del_timer_sync(&slot->detect_timer); | ||
1594 | gpio_free(pin); | ||
1595 | } | ||
1596 | if (gpio_is_valid(slot->wp_pin)) | ||
1597 | gpio_free(slot->wp_pin); | ||
1598 | |||
1599 | slot->host->slot[id] = NULL; | ||
1600 | mmc_free_host(slot->mmc); | ||
1601 | } | ||
1602 | |||
987 | static int __init atmci_probe(struct platform_device *pdev) | 1603 | static int __init atmci_probe(struct platform_device *pdev) |
988 | { | 1604 | { |
989 | struct mci_platform_data *pdata; | 1605 | struct mci_platform_data *pdata; |
990 | struct atmel_mci *host; | 1606 | struct atmel_mci *host; |
991 | struct mmc_host *mmc; | 1607 | struct resource *regs; |
992 | struct resource *regs; | 1608 | unsigned int nr_slots; |
993 | int irq; | 1609 | int irq; |
994 | int ret; | 1610 | int ret; |
995 | 1611 | ||
996 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1612 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
997 | if (!regs) | 1613 | if (!regs) |
@@ -1003,15 +1619,13 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1003 | if (irq < 0) | 1619 | if (irq < 0) |
1004 | return irq; | 1620 | return irq; |
1005 | 1621 | ||
1006 | mmc = mmc_alloc_host(sizeof(struct atmel_mci), &pdev->dev); | 1622 | host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); |
1007 | if (!mmc) | 1623 | if (!host) |
1008 | return -ENOMEM; | 1624 | return -ENOMEM; |
1009 | 1625 | ||
1010 | host = mmc_priv(mmc); | ||
1011 | host->pdev = pdev; | 1626 | host->pdev = pdev; |
1012 | host->mmc = mmc; | 1627 | spin_lock_init(&host->lock); |
1013 | host->detect_pin = pdata->detect_pin; | 1628 | INIT_LIST_HEAD(&host->queue); |
1014 | host->wp_pin = pdata->wp_pin; | ||
1015 | 1629 | ||
1016 | host->mck = clk_get(&pdev->dev, "mci_clk"); | 1630 | host->mck = clk_get(&pdev->dev, "mci_clk"); |
1017 | if (IS_ERR(host->mck)) { | 1631 | if (IS_ERR(host->mck)) { |
@@ -1031,122 +1645,102 @@ static int __init atmci_probe(struct platform_device *pdev) | |||
1031 | 1645 | ||
1032 | host->mapbase = regs->start; | 1646 | host->mapbase = regs->start; |
1033 | 1647 | ||
1034 | mmc->ops = &atmci_ops; | 1648 | tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); |
1035 | mmc->f_min = (host->bus_hz + 511) / 512; | ||
1036 | mmc->f_max = host->bus_hz / 2; | ||
1037 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | ||
1038 | mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
1039 | |||
1040 | mmc->max_hw_segs = 64; | ||
1041 | mmc->max_phys_segs = 64; | ||
1042 | mmc->max_req_size = 32768 * 512; | ||
1043 | mmc->max_blk_size = 32768; | ||
1044 | mmc->max_blk_count = 512; | ||
1045 | |||
1046 | tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)mmc); | ||
1047 | 1649 | ||
1048 | ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, mmc); | 1650 | ret = request_irq(irq, atmci_interrupt, 0, pdev->dev.bus_id, host); |
1049 | if (ret) | 1651 | if (ret) |
1050 | goto err_request_irq; | 1652 | goto err_request_irq; |
1051 | 1653 | ||
1052 | /* Assume card is present if we don't have a detect pin */ | 1654 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1053 | host->present = 1; | 1655 | if (pdata->dma_slave) { |
1054 | if (gpio_is_valid(host->detect_pin)) { | 1656 | struct dma_slave *slave = pdata->dma_slave; |
1055 | if (gpio_request(host->detect_pin, "mmc_detect")) { | ||
1056 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); | ||
1057 | host->detect_pin = -1; | ||
1058 | } else { | ||
1059 | host->present = !gpio_get_value(host->detect_pin); | ||
1060 | } | ||
1061 | } | ||
1062 | 1657 | ||
1063 | if (!gpio_is_valid(host->detect_pin)) | 1658 | slave->tx_reg = regs->start + MCI_TDR; |
1064 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 1659 | slave->rx_reg = regs->start + MCI_RDR; |
1065 | 1660 | ||
1066 | if (gpio_is_valid(host->wp_pin)) { | 1661 | /* Try to grab a DMA channel */ |
1067 | if (gpio_request(host->wp_pin, "mmc_wp")) { | 1662 | host->dma.client.event_callback = atmci_dma_event; |
1068 | dev_dbg(&mmc->class_dev, "no WP pin available\n"); | 1663 | dma_cap_set(DMA_SLAVE, host->dma.client.cap_mask); |
1069 | host->wp_pin = -1; | 1664 | host->dma.client.slave = slave; |
1070 | } | 1665 | |
1666 | dma_async_client_register(&host->dma.client); | ||
1667 | dma_async_client_chan_request(&host->dma.client); | ||
1668 | } else { | ||
1669 | dev_notice(&pdev->dev, "DMA not available, using PIO\n"); | ||
1071 | } | 1670 | } |
1671 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | ||
1072 | 1672 | ||
1073 | platform_set_drvdata(pdev, host); | 1673 | platform_set_drvdata(pdev, host); |
1074 | 1674 | ||
1075 | mmc_add_host(mmc); | 1675 | /* We need at least one slot to succeed */ |
1076 | 1676 | nr_slots = 0; | |
1077 | if (gpio_is_valid(host->detect_pin)) { | 1677 | ret = -ENODEV; |
1078 | setup_timer(&host->detect_timer, atmci_detect_change, | 1678 | if (pdata->slot[0].bus_width) { |
1079 | (unsigned long)host); | 1679 | ret = atmci_init_slot(host, &pdata->slot[0], |
1080 | 1680 | MCI_SDCSEL_SLOT_A, 0); | |
1081 | ret = request_irq(gpio_to_irq(host->detect_pin), | 1681 | if (!ret) |
1082 | atmci_detect_interrupt, | 1682 | nr_slots++; |
1083 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 1683 | } |
1084 | "mmc-detect", mmc); | 1684 | if (pdata->slot[1].bus_width) { |
1085 | if (ret) { | 1685 | ret = atmci_init_slot(host, &pdata->slot[1], |
1086 | dev_dbg(&mmc->class_dev, | 1686 | MCI_SDCSEL_SLOT_B, 1); |
1087 | "could not request IRQ %d for detect pin\n", | 1687 | if (!ret) |
1088 | gpio_to_irq(host->detect_pin)); | 1688 | nr_slots++; |
1089 | gpio_free(host->detect_pin); | ||
1090 | host->detect_pin = -1; | ||
1091 | } | ||
1092 | } | 1689 | } |
1093 | 1690 | ||
1094 | dev_info(&mmc->class_dev, | 1691 | if (!nr_slots) |
1095 | "Atmel MCI controller at 0x%08lx irq %d\n", | 1692 | goto err_init_slot; |
1096 | host->mapbase, irq); | ||
1097 | 1693 | ||
1098 | atmci_init_debugfs(host); | 1694 | dev_info(&pdev->dev, |
1695 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", | ||
1696 | host->mapbase, irq, nr_slots); | ||
1099 | 1697 | ||
1100 | return 0; | 1698 | return 0; |
1101 | 1699 | ||
1700 | err_init_slot: | ||
1701 | #ifdef CONFIG_MMC_ATMELMCI_DMA | ||
1702 | if (pdata->dma_slave) | ||
1703 | dma_async_client_unregister(&host->dma.client); | ||
1704 | #endif | ||
1705 | free_irq(irq, host); | ||
1102 | err_request_irq: | 1706 | err_request_irq: |
1103 | iounmap(host->regs); | 1707 | iounmap(host->regs); |
1104 | err_ioremap: | 1708 | err_ioremap: |
1105 | clk_put(host->mck); | 1709 | clk_put(host->mck); |
1106 | err_clk_get: | 1710 | err_clk_get: |
1107 | mmc_free_host(mmc); | 1711 | kfree(host); |
1108 | return ret; | 1712 | return ret; |
1109 | } | 1713 | } |
1110 | 1714 | ||
1111 | static int __exit atmci_remove(struct platform_device *pdev) | 1715 | static int __exit atmci_remove(struct platform_device *pdev) |
1112 | { | 1716 | { |
1113 | struct atmel_mci *host = platform_get_drvdata(pdev); | 1717 | struct atmel_mci *host = platform_get_drvdata(pdev); |
1718 | unsigned int i; | ||
1114 | 1719 | ||
1115 | platform_set_drvdata(pdev, NULL); | 1720 | platform_set_drvdata(pdev, NULL); |
1116 | 1721 | ||
1117 | if (host) { | 1722 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1118 | /* Debugfs stuff is cleaned up by mmc core */ | 1723 | if (host->slot[i]) |
1119 | 1724 | atmci_cleanup_slot(host->slot[i], i); | |
1120 | if (gpio_is_valid(host->detect_pin)) { | 1725 | } |
1121 | int pin = host->detect_pin; | ||
1122 | |||
1123 | /* Make sure the timer doesn't enable the interrupt */ | ||
1124 | host->detect_pin = -1; | ||
1125 | smp_wmb(); | ||
1126 | |||
1127 | free_irq(gpio_to_irq(pin), host->mmc); | ||
1128 | del_timer_sync(&host->detect_timer); | ||
1129 | gpio_free(pin); | ||
1130 | } | ||
1131 | |||
1132 | mmc_remove_host(host->mmc); | ||
1133 | 1726 | ||
1134 | clk_enable(host->mck); | 1727 | clk_enable(host->mck); |
1135 | mci_writel(host, IDR, ~0UL); | 1728 | mci_writel(host, IDR, ~0UL); |
1136 | mci_writel(host, CR, MCI_CR_MCIDIS); | 1729 | mci_writel(host, CR, MCI_CR_MCIDIS); |
1137 | mci_readl(host, SR); | 1730 | mci_readl(host, SR); |
1138 | clk_disable(host->mck); | 1731 | clk_disable(host->mck); |
1139 | 1732 | ||
1140 | if (gpio_is_valid(host->wp_pin)) | 1733 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1141 | gpio_free(host->wp_pin); | 1734 | if (host->dma.client.slave) |
1735 | dma_async_client_unregister(&host->dma.client); | ||
1736 | #endif | ||
1142 | 1737 | ||
1143 | free_irq(platform_get_irq(pdev, 0), host->mmc); | 1738 | free_irq(platform_get_irq(pdev, 0), host); |
1144 | iounmap(host->regs); | 1739 | iounmap(host->regs); |
1145 | 1740 | ||
1146 | clk_put(host->mck); | 1741 | clk_put(host->mck); |
1742 | kfree(host); | ||
1147 | 1743 | ||
1148 | mmc_free_host(host->mmc); | ||
1149 | } | ||
1150 | return 0; | 1744 | return 0; |
1151 | } | 1745 | } |
1152 | 1746 | ||
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index ac4e506b4f88..5ea6b60fa377 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h | |||
@@ -257,7 +257,6 @@ struct e1000_adapter { | |||
257 | struct net_device *netdev; | 257 | struct net_device *netdev; |
258 | struct pci_dev *pdev; | 258 | struct pci_dev *pdev; |
259 | struct net_device_stats net_stats; | 259 | struct net_device_stats net_stats; |
260 | spinlock_t stats_lock; /* prevent concurrent stats updates */ | ||
261 | 260 | ||
262 | /* structs defined in e1000_hw.h */ | 261 | /* structs defined in e1000_hw.h */ |
263 | struct e1000_hw hw; | 262 | struct e1000_hw hw; |
@@ -284,6 +283,8 @@ struct e1000_adapter { | |||
284 | unsigned long led_status; | 283 | unsigned long led_status; |
285 | 284 | ||
286 | unsigned int flags; | 285 | unsigned int flags; |
286 | struct work_struct downshift_task; | ||
287 | struct work_struct update_phy_task; | ||
287 | }; | 288 | }; |
288 | 289 | ||
289 | struct e1000_info { | 290 | struct e1000_info { |
@@ -305,6 +306,7 @@ struct e1000_info { | |||
305 | #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) | 306 | #define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) |
306 | #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) | 307 | #define FLAG_HAS_SWSM_ON_LOAD (1 << 6) |
307 | #define FLAG_HAS_JUMBO_FRAMES (1 << 7) | 308 | #define FLAG_HAS_JUMBO_FRAMES (1 << 7) |
309 | #define FLAG_READ_ONLY_NVM (1 << 8) | ||
308 | #define FLAG_IS_ICH (1 << 9) | 310 | #define FLAG_IS_ICH (1 << 9) |
309 | #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) | 311 | #define FLAG_HAS_SMART_POWER_DOWN (1 << 11) |
310 | #define FLAG_IS_QUAD_PORT_A (1 << 12) | 312 | #define FLAG_IS_QUAD_PORT_A (1 << 12) |
@@ -385,6 +387,7 @@ extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); | |||
385 | extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); | 387 | extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw); |
386 | extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); | 388 | extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); |
387 | 389 | ||
390 | extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); | ||
388 | extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, | 391 | extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, |
389 | bool state); | 392 | bool state); |
390 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); | 393 | extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); |
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index e21c9e0f3738..33a3ff17b5d0 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c | |||
@@ -432,6 +432,10 @@ static void e1000_get_regs(struct net_device *netdev, | |||
432 | regs_buff[11] = er32(TIDV); | 432 | regs_buff[11] = er32(TIDV); |
433 | 433 | ||
434 | regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ | 434 | regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ |
435 | |||
436 | /* ethtool doesn't use anything past this point, so all this | ||
437 | * code is likely legacy junk for apps that may or may not | ||
438 | * exist */ | ||
435 | if (hw->phy.type == e1000_phy_m88) { | 439 | if (hw->phy.type == e1000_phy_m88) { |
436 | e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); | 440 | e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); |
437 | regs_buff[13] = (u32)phy_data; /* cable length */ | 441 | regs_buff[13] = (u32)phy_data; /* cable length */ |
@@ -447,7 +451,7 @@ static void e1000_get_regs(struct net_device *netdev, | |||
447 | regs_buff[22] = adapter->phy_stats.receive_errors; | 451 | regs_buff[22] = adapter->phy_stats.receive_errors; |
448 | regs_buff[23] = regs_buff[13]; /* mdix mode */ | 452 | regs_buff[23] = regs_buff[13]; /* mdix mode */ |
449 | } | 453 | } |
450 | regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ | 454 | regs_buff[21] = 0; /* was idle_errors */ |
451 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); | 455 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_data); |
452 | regs_buff[24] = (u32)phy_data; /* phy local receiver status */ | 456 | regs_buff[24] = (u32)phy_data; /* phy local receiver status */ |
453 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ | 457 | regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ |
@@ -529,6 +533,9 @@ static int e1000_set_eeprom(struct net_device *netdev, | |||
529 | if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) | 533 | if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) |
530 | return -EFAULT; | 534 | return -EFAULT; |
531 | 535 | ||
536 | if (adapter->flags & FLAG_READ_ONLY_NVM) | ||
537 | return -EINVAL; | ||
538 | |||
532 | max_len = hw->nvm.word_size * 2; | 539 | max_len = hw->nvm.word_size * 2; |
533 | 540 | ||
534 | first_word = eeprom->offset >> 1; | 541 | first_word = eeprom->offset >> 1; |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 9e38452a738c..bcd2bc477af2 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #define ICH_FLASH_HSFCTL 0x0006 | 58 | #define ICH_FLASH_HSFCTL 0x0006 |
59 | #define ICH_FLASH_FADDR 0x0008 | 59 | #define ICH_FLASH_FADDR 0x0008 |
60 | #define ICH_FLASH_FDATA0 0x0010 | 60 | #define ICH_FLASH_FDATA0 0x0010 |
61 | #define ICH_FLASH_PR0 0x0074 | ||
61 | 62 | ||
62 | #define ICH_FLASH_READ_COMMAND_TIMEOUT 500 | 63 | #define ICH_FLASH_READ_COMMAND_TIMEOUT 500 |
63 | #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 | 64 | #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500 |
@@ -150,6 +151,19 @@ union ich8_hws_flash_regacc { | |||
150 | u16 regval; | 151 | u16 regval; |
151 | }; | 152 | }; |
152 | 153 | ||
154 | /* ICH Flash Protected Region */ | ||
155 | union ich8_flash_protected_range { | ||
156 | struct ich8_pr { | ||
157 | u32 base:13; /* 0:12 Protected Range Base */ | ||
158 | u32 reserved1:2; /* 13:14 Reserved */ | ||
159 | u32 rpe:1; /* 15 Read Protection Enable */ | ||
160 | u32 limit:13; /* 16:28 Protected Range Limit */ | ||
161 | u32 reserved2:2; /* 29:30 Reserved */ | ||
162 | u32 wpe:1; /* 31 Write Protection Enable */ | ||
163 | } range; | ||
164 | u32 regval; | ||
165 | }; | ||
166 | |||
153 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); | 167 | static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw); |
154 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); | 168 | static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); |
155 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); | 169 | static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); |
@@ -366,6 +380,9 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
366 | return 0; | 380 | return 0; |
367 | } | 381 | } |
368 | 382 | ||
383 | static DEFINE_MUTEX(nvm_mutex); | ||
384 | static pid_t nvm_owner = -1; | ||
385 | |||
369 | /** | 386 | /** |
370 | * e1000_acquire_swflag_ich8lan - Acquire software control flag | 387 | * e1000_acquire_swflag_ich8lan - Acquire software control flag |
371 | * @hw: pointer to the HW structure | 388 | * @hw: pointer to the HW structure |
@@ -379,6 +396,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
379 | u32 extcnf_ctrl; | 396 | u32 extcnf_ctrl; |
380 | u32 timeout = PHY_CFG_TIMEOUT; | 397 | u32 timeout = PHY_CFG_TIMEOUT; |
381 | 398 | ||
399 | might_sleep(); | ||
400 | |||
401 | if (!mutex_trylock(&nvm_mutex)) { | ||
402 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", | ||
403 | nvm_owner); | ||
404 | mutex_lock(&nvm_mutex); | ||
405 | } | ||
406 | nvm_owner = current->pid; | ||
407 | |||
382 | while (timeout) { | 408 | while (timeout) { |
383 | extcnf_ctrl = er32(EXTCNF_CTRL); | 409 | extcnf_ctrl = er32(EXTCNF_CTRL); |
384 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; | 410 | extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; |
@@ -393,6 +419,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
393 | 419 | ||
394 | if (!timeout) { | 420 | if (!timeout) { |
395 | hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); | 421 | hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); |
422 | nvm_owner = -1; | ||
423 | mutex_unlock(&nvm_mutex); | ||
396 | return -E1000_ERR_CONFIG; | 424 | return -E1000_ERR_CONFIG; |
397 | } | 425 | } |
398 | 426 | ||
@@ -414,6 +442,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
414 | extcnf_ctrl = er32(EXTCNF_CTRL); | 442 | extcnf_ctrl = er32(EXTCNF_CTRL); |
415 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 443 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
416 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 444 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
445 | |||
446 | nvm_owner = -1; | ||
447 | mutex_unlock(&nvm_mutex); | ||
417 | } | 448 | } |
418 | 449 | ||
419 | /** | 450 | /** |
@@ -1284,6 +1315,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1284 | * programming failed. | 1315 | * programming failed. |
1285 | */ | 1316 | */ |
1286 | if (ret_val) { | 1317 | if (ret_val) { |
1318 | /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ | ||
1287 | hw_dbg(hw, "Flash commit failed.\n"); | 1319 | hw_dbg(hw, "Flash commit failed.\n"); |
1288 | e1000_release_swflag_ich8lan(hw); | 1320 | e1000_release_swflag_ich8lan(hw); |
1289 | return ret_val; | 1321 | return ret_val; |
@@ -1374,6 +1406,49 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) | |||
1374 | } | 1406 | } |
1375 | 1407 | ||
1376 | /** | 1408 | /** |
1409 | * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only | ||
1410 | * @hw: pointer to the HW structure | ||
1411 | * | ||
1412 | * To prevent malicious write/erase of the NVM, set it to be read-only | ||
1413 | * so that the hardware ignores all write/erase cycles of the NVM via | ||
1414 | * the flash control registers. The shadow-ram copy of the NVM will | ||
1415 | * still be updated, however any updates to this copy will not stick | ||
1416 | * across driver reloads. | ||
1417 | **/ | ||
1418 | void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) | ||
1419 | { | ||
1420 | union ich8_flash_protected_range pr0; | ||
1421 | union ich8_hws_flash_status hsfsts; | ||
1422 | u32 gfpreg; | ||
1423 | s32 ret_val; | ||
1424 | |||
1425 | ret_val = e1000_acquire_swflag_ich8lan(hw); | ||
1426 | if (ret_val) | ||
1427 | return; | ||
1428 | |||
1429 | gfpreg = er32flash(ICH_FLASH_GFPREG); | ||
1430 | |||
1431 | /* Write-protect GbE Sector of NVM */ | ||
1432 | pr0.regval = er32flash(ICH_FLASH_PR0); | ||
1433 | pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; | ||
1434 | pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); | ||
1435 | pr0.range.wpe = true; | ||
1436 | ew32flash(ICH_FLASH_PR0, pr0.regval); | ||
1437 | |||
1438 | /* | ||
1439 | * Lock down a subset of GbE Flash Control Registers, e.g. | ||
1440 | * PR0 to prevent the write-protection from being lifted. | ||
1441 | * Once FLOCKDN is set, the registers protected by it cannot | ||
1442 | * be written until FLOCKDN is cleared by a hardware reset. | ||
1443 | */ | ||
1444 | hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); | ||
1445 | hsfsts.hsf_status.flockdn = true; | ||
1446 | ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); | ||
1447 | |||
1448 | e1000_release_swflag_ich8lan(hw); | ||
1449 | } | ||
1450 | |||
1451 | /** | ||
1377 | * e1000_write_flash_data_ich8lan - Writes bytes to the NVM | 1452 | * e1000_write_flash_data_ich8lan - Writes bytes to the NVM |
1378 | * @hw: pointer to the HW structure | 1453 | * @hw: pointer to the HW structure |
1379 | * @offset: The offset (in bytes) of the byte/word to read. | 1454 | * @offset: The offset (in bytes) of the byte/word to read. |
@@ -1720,6 +1795,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) | |||
1720 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); | 1795 | ew32(CTRL, (ctrl | E1000_CTRL_RST)); |
1721 | msleep(20); | 1796 | msleep(20); |
1722 | 1797 | ||
1798 | /* release the swflag because it is not reset by hardware reset */ | ||
1799 | e1000_release_swflag_ich8lan(hw); | ||
1800 | |||
1723 | ret_val = e1000e_get_auto_rd_done(hw); | 1801 | ret_val = e1000e_get_auto_rd_done(hw); |
1724 | if (ret_val) { | 1802 | if (ret_val) { |
1725 | /* | 1803 | /* |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d266510c8a94..b81c4237b5d3 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #include "e1000.h" | 48 | #include "e1000.h" |
49 | 49 | ||
50 | #define DRV_VERSION "0.3.3.3-k2" | 50 | #define DRV_VERSION "0.3.3.3-k6" |
51 | char e1000e_driver_name[] = "e1000e"; | 51 | char e1000e_driver_name[] = "e1000e"; |
52 | const char e1000e_driver_version[] = DRV_VERSION; | 52 | const char e1000e_driver_version[] = DRV_VERSION; |
53 | 53 | ||
@@ -1115,6 +1115,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter) | |||
1115 | writel(0, adapter->hw.hw_addr + rx_ring->tail); | 1115 | writel(0, adapter->hw.hw_addr + rx_ring->tail); |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | static void e1000e_downshift_workaround(struct work_struct *work) | ||
1119 | { | ||
1120 | struct e1000_adapter *adapter = container_of(work, | ||
1121 | struct e1000_adapter, downshift_task); | ||
1122 | |||
1123 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | ||
1124 | } | ||
1125 | |||
1118 | /** | 1126 | /** |
1119 | * e1000_intr_msi - Interrupt Handler | 1127 | * e1000_intr_msi - Interrupt Handler |
1120 | * @irq: interrupt number | 1128 | * @irq: interrupt number |
@@ -1139,7 +1147,7 @@ static irqreturn_t e1000_intr_msi(int irq, void *data) | |||
1139 | */ | 1147 | */ |
1140 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 1148 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1141 | (!(er32(STATUS) & E1000_STATUS_LU))) | 1149 | (!(er32(STATUS) & E1000_STATUS_LU))) |
1142 | e1000e_gig_downshift_workaround_ich8lan(hw); | 1150 | schedule_work(&adapter->downshift_task); |
1143 | 1151 | ||
1144 | /* | 1152 | /* |
1145 | * 80003ES2LAN workaround-- For packet buffer work-around on | 1153 | * 80003ES2LAN workaround-- For packet buffer work-around on |
@@ -1205,7 +1213,7 @@ static irqreturn_t e1000_intr(int irq, void *data) | |||
1205 | */ | 1213 | */ |
1206 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && | 1214 | if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && |
1207 | (!(er32(STATUS) & E1000_STATUS_LU))) | 1215 | (!(er32(STATUS) & E1000_STATUS_LU))) |
1208 | e1000e_gig_downshift_workaround_ich8lan(hw); | 1216 | schedule_work(&adapter->downshift_task); |
1209 | 1217 | ||
1210 | /* | 1218 | /* |
1211 | * 80003ES2LAN workaround-- | 1219 | * 80003ES2LAN workaround-- |
@@ -2592,8 +2600,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) | |||
2592 | /* Explicitly disable IRQ since the NIC can be in any state. */ | 2600 | /* Explicitly disable IRQ since the NIC can be in any state. */ |
2593 | e1000_irq_disable(adapter); | 2601 | e1000_irq_disable(adapter); |
2594 | 2602 | ||
2595 | spin_lock_init(&adapter->stats_lock); | ||
2596 | |||
2597 | set_bit(__E1000_DOWN, &adapter->state); | 2603 | set_bit(__E1000_DOWN, &adapter->state); |
2598 | return 0; | 2604 | return 0; |
2599 | 2605 | ||
@@ -2912,6 +2918,21 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2912 | return 0; | 2918 | return 0; |
2913 | } | 2919 | } |
2914 | 2920 | ||
2921 | /** | ||
2922 | * e1000e_update_phy_task - work thread to update phy | ||
2923 | * @work: pointer to our work struct | ||
2924 | * | ||
2925 | * this worker thread exists because we must acquire a | ||
2926 | * semaphore to read the phy, which we could msleep while | ||
2927 | * waiting for it, and we can't msleep in a timer. | ||
2928 | **/ | ||
2929 | static void e1000e_update_phy_task(struct work_struct *work) | ||
2930 | { | ||
2931 | struct e1000_adapter *adapter = container_of(work, | ||
2932 | struct e1000_adapter, update_phy_task); | ||
2933 | e1000_get_phy_info(&adapter->hw); | ||
2934 | } | ||
2935 | |||
2915 | /* | 2936 | /* |
2916 | * Need to wait a few seconds after link up to get diagnostic information from | 2937 | * Need to wait a few seconds after link up to get diagnostic information from |
2917 | * the phy | 2938 | * the phy |
@@ -2919,7 +2940,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p) | |||
2919 | static void e1000_update_phy_info(unsigned long data) | 2940 | static void e1000_update_phy_info(unsigned long data) |
2920 | { | 2941 | { |
2921 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 2942 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
2922 | e1000_get_phy_info(&adapter->hw); | 2943 | schedule_work(&adapter->update_phy_task); |
2923 | } | 2944 | } |
2924 | 2945 | ||
2925 | /** | 2946 | /** |
@@ -2930,10 +2951,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2930 | { | 2951 | { |
2931 | struct e1000_hw *hw = &adapter->hw; | 2952 | struct e1000_hw *hw = &adapter->hw; |
2932 | struct pci_dev *pdev = adapter->pdev; | 2953 | struct pci_dev *pdev = adapter->pdev; |
2933 | unsigned long irq_flags; | ||
2934 | u16 phy_tmp; | ||
2935 | |||
2936 | #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF | ||
2937 | 2954 | ||
2938 | /* | 2955 | /* |
2939 | * Prevent stats update while adapter is being reset, or if the pci | 2956 | * Prevent stats update while adapter is being reset, or if the pci |
@@ -2944,14 +2961,6 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
2944 | if (pci_channel_offline(pdev)) | 2961 | if (pci_channel_offline(pdev)) |
2945 | return; | 2962 | return; |
2946 | 2963 | ||
2947 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); | ||
2948 | |||
2949 | /* | ||
2950 | * these counters are modified from e1000_adjust_tbi_stats, | ||
2951 | * called from the interrupt context, so they must only | ||
2952 | * be written while holding adapter->stats_lock | ||
2953 | */ | ||
2954 | |||
2955 | adapter->stats.crcerrs += er32(CRCERRS); | 2964 | adapter->stats.crcerrs += er32(CRCERRS); |
2956 | adapter->stats.gprc += er32(GPRC); | 2965 | adapter->stats.gprc += er32(GPRC); |
2957 | adapter->stats.gorc += er32(GORCL); | 2966 | adapter->stats.gorc += er32(GORCL); |
@@ -3022,21 +3031,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter) | |||
3022 | 3031 | ||
3023 | /* Tx Dropped needs to be maintained elsewhere */ | 3032 | /* Tx Dropped needs to be maintained elsewhere */ |
3024 | 3033 | ||
3025 | /* Phy Stats */ | ||
3026 | if (hw->phy.media_type == e1000_media_type_copper) { | ||
3027 | if ((adapter->link_speed == SPEED_1000) && | ||
3028 | (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { | ||
3029 | phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; | ||
3030 | adapter->phy_stats.idle_errors += phy_tmp; | ||
3031 | } | ||
3032 | } | ||
3033 | |||
3034 | /* Management Stats */ | 3034 | /* Management Stats */ |
3035 | adapter->stats.mgptc += er32(MGTPTC); | 3035 | adapter->stats.mgptc += er32(MGTPTC); |
3036 | adapter->stats.mgprc += er32(MGTPRC); | 3036 | adapter->stats.mgprc += er32(MGTPRC); |
3037 | adapter->stats.mgpdc += er32(MGTPDC); | 3037 | adapter->stats.mgpdc += er32(MGTPDC); |
3038 | |||
3039 | spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); | ||
3040 | } | 3038 | } |
3041 | 3039 | ||
3042 | /** | 3040 | /** |
@@ -3048,10 +3046,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
3048 | struct e1000_hw *hw = &adapter->hw; | 3046 | struct e1000_hw *hw = &adapter->hw; |
3049 | struct e1000_phy_regs *phy = &adapter->phy_regs; | 3047 | struct e1000_phy_regs *phy = &adapter->phy_regs; |
3050 | int ret_val; | 3048 | int ret_val; |
3051 | unsigned long irq_flags; | ||
3052 | |||
3053 | |||
3054 | spin_lock_irqsave(&adapter->stats_lock, irq_flags); | ||
3055 | 3049 | ||
3056 | if ((er32(STATUS) & E1000_STATUS_LU) && | 3050 | if ((er32(STATUS) & E1000_STATUS_LU) && |
3057 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { | 3051 | (adapter->hw.phy.media_type == e1000_media_type_copper)) { |
@@ -3082,8 +3076,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) | |||
3082 | phy->stat1000 = 0; | 3076 | phy->stat1000 = 0; |
3083 | phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); | 3077 | phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); |
3084 | } | 3078 | } |
3085 | |||
3086 | spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); | ||
3087 | } | 3079 | } |
3088 | 3080 | ||
3089 | static void e1000_print_link_info(struct e1000_adapter *adapter) | 3081 | static void e1000_print_link_info(struct e1000_adapter *adapter) |
@@ -4467,6 +4459,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4467 | 4459 | ||
4468 | adapter->bd_number = cards_found++; | 4460 | adapter->bd_number = cards_found++; |
4469 | 4461 | ||
4462 | e1000e_check_options(adapter); | ||
4463 | |||
4470 | /* setup adapter struct */ | 4464 | /* setup adapter struct */ |
4471 | err = e1000_sw_init(adapter); | 4465 | err = e1000_sw_init(adapter); |
4472 | if (err) | 4466 | if (err) |
@@ -4482,6 +4476,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4482 | if (err) | 4476 | if (err) |
4483 | goto err_hw_init; | 4477 | goto err_hw_init; |
4484 | 4478 | ||
4479 | if ((adapter->flags & FLAG_IS_ICH) && | ||
4480 | (adapter->flags & FLAG_READ_ONLY_NVM)) | ||
4481 | e1000e_write_protect_nvm_ich8lan(&adapter->hw); | ||
4482 | |||
4485 | hw->mac.ops.get_bus_info(&adapter->hw); | 4483 | hw->mac.ops.get_bus_info(&adapter->hw); |
4486 | 4484 | ||
4487 | adapter->hw.phy.autoneg_wait_to_complete = 0; | 4485 | adapter->hw.phy.autoneg_wait_to_complete = 0; |
@@ -4572,8 +4570,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
4572 | 4570 | ||
4573 | INIT_WORK(&adapter->reset_task, e1000_reset_task); | 4571 | INIT_WORK(&adapter->reset_task, e1000_reset_task); |
4574 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); | 4572 | INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); |
4575 | 4573 | INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); | |
4576 | e1000e_check_options(adapter); | 4574 | INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); |
4577 | 4575 | ||
4578 | /* Initialize link parameters. User can change them with ethtool */ | 4576 | /* Initialize link parameters. User can change them with ethtool */ |
4579 | adapter->hw.mac.autoneg = 1; | 4577 | adapter->hw.mac.autoneg = 1; |
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index ed912e023a72..d91dbf7ba434 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c | |||
@@ -133,6 +133,15 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); | |||
133 | */ | 133 | */ |
134 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); | 134 | E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); |
135 | 135 | ||
136 | /* | ||
137 | * Write Protect NVM | ||
138 | * | ||
139 | * Valid Range: 0, 1 | ||
140 | * | ||
141 | * Default Value: 1 (enabled) | ||
142 | */ | ||
143 | E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); | ||
144 | |||
136 | struct e1000_option { | 145 | struct e1000_option { |
137 | enum { enable_option, range_option, list_option } type; | 146 | enum { enable_option, range_option, list_option } type; |
138 | const char *name; | 147 | const char *name; |
@@ -388,4 +397,25 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) | |||
388 | opt.def); | 397 | opt.def); |
389 | } | 398 | } |
390 | } | 399 | } |
400 | { /* Write-protect NVM */ | ||
401 | const struct e1000_option opt = { | ||
402 | .type = enable_option, | ||
403 | .name = "Write-protect NVM", | ||
404 | .err = "defaulting to Enabled", | ||
405 | .def = OPTION_ENABLED | ||
406 | }; | ||
407 | |||
408 | if (adapter->flags & FLAG_IS_ICH) { | ||
409 | if (num_WriteProtectNVM > bd) { | ||
410 | unsigned int write_protect_nvm = WriteProtectNVM[bd]; | ||
411 | e1000_validate_option(&write_protect_nvm, &opt, | ||
412 | adapter); | ||
413 | if (write_protect_nvm) | ||
414 | adapter->flags |= FLAG_READ_ONLY_NVM; | ||
415 | } else { | ||
416 | if (opt.def) | ||
417 | adapter->flags |= FLAG_READ_ONLY_NVM; | ||
418 | } | ||
419 | } | ||
420 | } | ||
391 | } | 421 | } |
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c index f6c45288d0e7..87e37bc39145 100644 --- a/drivers/net/wireless/ath9k/core.c +++ b/drivers/net/wireless/ath9k/core.c | |||
@@ -294,8 +294,6 @@ static int ath_stop(struct ath_softc *sc) | |||
294 | * hardware is gone (invalid). | 294 | * hardware is gone (invalid). |
295 | */ | 295 | */ |
296 | 296 | ||
297 | if (!sc->sc_invalid) | ||
298 | ath9k_hw_set_interrupts(ah, 0); | ||
299 | ath_draintxq(sc, false); | 297 | ath_draintxq(sc, false); |
300 | if (!sc->sc_invalid) { | 298 | if (!sc->sc_invalid) { |
301 | ath_stoprecv(sc); | 299 | ath_stoprecv(sc); |
@@ -797,6 +795,12 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan) | |||
797 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) | 795 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) |
798 | sc->sc_imask |= ATH9K_INT_CST; | 796 | sc->sc_imask |= ATH9K_INT_CST; |
799 | 797 | ||
798 | /* Note: We disable MIB interrupts for now as we don't yet | ||
799 | * handle processing ANI, otherwise you will get an interrupt | ||
800 | * storm after about 7 hours of usage making the system unusable | ||
801 | * with huge latency. Once we do have ANI processing included | ||
802 | * we can re-enable this interrupt. */ | ||
803 | #if 0 | ||
800 | /* | 804 | /* |
801 | * Enable MIB interrupts when there are hardware phy counters. | 805 | * Enable MIB interrupts when there are hardware phy counters. |
802 | * Note we only do this (at the moment) for station mode. | 806 | * Note we only do this (at the moment) for station mode. |
@@ -804,6 +808,7 @@ int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan) | |||
804 | if (ath9k_hw_phycounters(ah) && | 808 | if (ath9k_hw_phycounters(ah) && |
805 | ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) | 809 | ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) |
806 | sc->sc_imask |= ATH9K_INT_MIB; | 810 | sc->sc_imask |= ATH9K_INT_MIB; |
811 | #endif | ||
807 | /* | 812 | /* |
808 | * Some hardware processes the TIM IE and fires an | 813 | * Some hardware processes the TIM IE and fires an |
809 | * interrupt when the TIM bit is set. For hardware | 814 | * interrupt when the TIM bit is set. For hardware |
@@ -1336,6 +1341,8 @@ void ath_deinit(struct ath_softc *sc) | |||
1336 | 1341 | ||
1337 | DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); | 1342 | DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); |
1338 | 1343 | ||
1344 | tasklet_kill(&sc->intr_tq); | ||
1345 | tasklet_kill(&sc->bcon_tasklet); | ||
1339 | ath_stop(sc); | 1346 | ath_stop(sc); |
1340 | if (!sc->sc_invalid) | 1347 | if (!sc->sc_invalid) |
1341 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); | 1348 | ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); |
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h index 4ee695b76b88..2f84093331ee 100644 --- a/drivers/net/wireless/ath9k/core.h +++ b/drivers/net/wireless/ath9k/core.h | |||
@@ -974,7 +974,6 @@ struct ath_softc { | |||
974 | u32 sc_keymax; /* size of key cache */ | 974 | u32 sc_keymax; /* size of key cache */ |
975 | DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */ | 975 | DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */ |
976 | u8 sc_splitmic; /* split TKIP MIC keys */ | 976 | u8 sc_splitmic; /* split TKIP MIC keys */ |
977 | int sc_keytype; | ||
978 | 977 | ||
979 | /* RX */ | 978 | /* RX */ |
980 | struct list_head sc_rxbuf; | 979 | struct list_head sc_rxbuf; |
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 99badf1404c3..acebdf1d20a8 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c | |||
@@ -206,8 +206,6 @@ static int ath_key_config(struct ath_softc *sc, | |||
206 | if (!ret) | 206 | if (!ret) |
207 | return -EIO; | 207 | return -EIO; |
208 | 208 | ||
209 | if (mac) | ||
210 | sc->sc_keytype = hk.kv_type; | ||
211 | return 0; | 209 | return 0; |
212 | } | 210 | } |
213 | 211 | ||
@@ -778,7 +776,6 @@ static int ath9k_set_key(struct ieee80211_hw *hw, | |||
778 | case DISABLE_KEY: | 776 | case DISABLE_KEY: |
779 | ath_key_delete(sc, key); | 777 | ath_key_delete(sc, key); |
780 | clear_bit(key->keyidx, sc->sc_keymap); | 778 | clear_bit(key->keyidx, sc->sc_keymap); |
781 | sc->sc_keytype = ATH9K_CIPHER_CLR; | ||
782 | break; | 779 | break; |
783 | default: | 780 | default: |
784 | ret = -EINVAL; | 781 | ret = -EINVAL; |
@@ -1414,10 +1411,17 @@ static void ath_pci_remove(struct pci_dev *pdev) | |||
1414 | { | 1411 | { |
1415 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); | 1412 | struct ieee80211_hw *hw = pci_get_drvdata(pdev); |
1416 | struct ath_softc *sc = hw->priv; | 1413 | struct ath_softc *sc = hw->priv; |
1414 | enum ath9k_int status; | ||
1417 | 1415 | ||
1418 | if (pdev->irq) | 1416 | if (pdev->irq) { |
1417 | ath9k_hw_set_interrupts(sc->sc_ah, 0); | ||
1418 | /* clear the ISR */ | ||
1419 | ath9k_hw_getisr(sc->sc_ah, &status); | ||
1420 | sc->sc_invalid = 1; | ||
1419 | free_irq(pdev->irq, sc); | 1421 | free_irq(pdev->irq, sc); |
1422 | } | ||
1420 | ath_detach(sc); | 1423 | ath_detach(sc); |
1424 | |||
1421 | pci_iounmap(pdev, sc->mem); | 1425 | pci_iounmap(pdev, sc->mem); |
1422 | pci_release_region(pdev, 0); | 1426 | pci_release_region(pdev, 0); |
1423 | pci_disable_device(pdev); | 1427 | pci_disable_device(pdev); |
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 550129f717e2..8b332e11a656 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -315,11 +315,11 @@ static int ath_tx_prepare(struct ath_softc *sc, | |||
315 | txctl->keyix = tx_info->control.hw_key->hw_key_idx; | 315 | txctl->keyix = tx_info->control.hw_key->hw_key_idx; |
316 | txctl->frmlen += tx_info->control.icv_len; | 316 | txctl->frmlen += tx_info->control.icv_len; |
317 | 317 | ||
318 | if (sc->sc_keytype == ATH9K_CIPHER_WEP) | 318 | if (tx_info->control.hw_key->alg == ALG_WEP) |
319 | txctl->keytype = ATH9K_KEY_TYPE_WEP; | 319 | txctl->keytype = ATH9K_KEY_TYPE_WEP; |
320 | else if (sc->sc_keytype == ATH9K_CIPHER_TKIP) | 320 | else if (tx_info->control.hw_key->alg == ALG_TKIP) |
321 | txctl->keytype = ATH9K_KEY_TYPE_TKIP; | 321 | txctl->keytype = ATH9K_KEY_TYPE_TKIP; |
322 | else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM) | 322 | else if (tx_info->control.hw_key->alg == ALG_CCMP) |
323 | txctl->keytype = ATH9K_KEY_TYPE_AES; | 323 | txctl->keytype = ATH9K_KEY_TYPE_AES; |
324 | } | 324 | } |
325 | 325 | ||
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index da8b7433e3a6..a60ae86bd5c9 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -58,6 +58,7 @@ static struct usb_device_id usb_ids[] = { | |||
58 | { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, | 58 | { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 }, |
59 | { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, | 59 | { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, |
60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, | 60 | { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, |
61 | { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, | ||
61 | /* ZD1211B */ | 62 | /* ZD1211B */ |
62 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, | 63 | { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, |
63 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, | 64 | { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9c718583a237..77baff022f71 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | 17 | ||
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/sched.h> | ||
19 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
20 | #include <linux/stat.h> | 21 | #include <linux/stat.h> |
21 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
@@ -484,6 +485,21 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, | |||
484 | #endif /* HAVE_PCI_LEGACY */ | 485 | #endif /* HAVE_PCI_LEGACY */ |
485 | 486 | ||
486 | #ifdef HAVE_PCI_MMAP | 487 | #ifdef HAVE_PCI_MMAP |
488 | |||
489 | static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) | ||
490 | { | ||
491 | unsigned long nr, start, size; | ||
492 | |||
493 | nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
494 | start = vma->vm_pgoff; | ||
495 | size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; | ||
496 | if (start < size && size - start >= nr) | ||
497 | return 1; | ||
498 | WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", | ||
499 | current->comm, start, start+nr, pci_name(pdev), resno, size); | ||
500 | return 0; | ||
501 | } | ||
502 | |||
487 | /** | 503 | /** |
488 | * pci_mmap_resource - map a PCI resource into user memory space | 504 | * pci_mmap_resource - map a PCI resource into user memory space |
489 | * @kobj: kobject for mapping | 505 | * @kobj: kobject for mapping |
@@ -510,6 +526,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, | |||
510 | if (i >= PCI_ROM_RESOURCE) | 526 | if (i >= PCI_ROM_RESOURCE) |
511 | return -ENODEV; | 527 | return -ENODEV; |
512 | 528 | ||
529 | if (!pci_mmap_fits(pdev, i, vma)) | ||
530 | return -EINVAL; | ||
531 | |||
513 | /* pci_mmap_page_range() expects the same kind of entry as coming | 532 | /* pci_mmap_page_range() expects the same kind of entry as coming |
514 | * from /proc/bus/pci/ which is a "user visible" value. If this is | 533 | * from /proc/bus/pci/ which is a "user visible" value. If this is |
515 | * different from the resource itself, arch will do necessary fixup. | 534 | * different from the resource itself, arch will do necessary fixup. |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 9a7c9e1408a4..851f5b83cdbc 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
@@ -527,7 +527,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) | |||
527 | */ | 527 | */ |
528 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, | 528 | pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, |
529 | ®32); | 529 | ®32); |
530 | if (!(reg32 & PCI_EXP_DEVCAP_RBER && !aspm_force)) { | 530 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { |
531 | printk("Pre-1.1 PCIe device detected, " | 531 | printk("Pre-1.1 PCIe device detected, " |
532 | "disable ASPM for %s. It can be enabled forcedly" | 532 | "disable ASPM for %s. It can be enabled forcedly" |
533 | " with 'pcie_aspm=force'\n", pci_name(pdev)); | 533 | " with 'pcie_aspm=force'\n", pci_name(pdev)); |
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 3b3b5f178797..4edfc4731bd4 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
@@ -162,7 +162,7 @@ EXPORT_SYMBOL(pci_find_slot); | |||
162 | * time. | 162 | * time. |
163 | */ | 163 | */ |
164 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, | 164 | struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, |
165 | const struct pci_dev *from) | 165 | struct pci_dev *from) |
166 | { | 166 | { |
167 | struct pci_dev *pdev; | 167 | struct pci_dev *pdev; |
168 | 168 | ||
@@ -263,7 +263,7 @@ static int match_pci_dev_by_id(struct device *dev, void *data) | |||
263 | * this file. | 263 | * this file. |
264 | */ | 264 | */ |
265 | static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, | 265 | static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, |
266 | const struct pci_dev *from) | 266 | struct pci_dev *from) |
267 | { | 267 | { |
268 | struct device *dev; | 268 | struct device *dev; |
269 | struct device *dev_start = NULL; | 269 | struct device *dev_start = NULL; |
@@ -303,7 +303,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, | |||
303 | */ | 303 | */ |
304 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, | 304 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, |
305 | unsigned int ss_vendor, unsigned int ss_device, | 305 | unsigned int ss_vendor, unsigned int ss_device, |
306 | const struct pci_dev *from) | 306 | struct pci_dev *from) |
307 | { | 307 | { |
308 | struct pci_dev *pdev; | 308 | struct pci_dev *pdev; |
309 | struct pci_device_id *id; | 309 | struct pci_device_id *id; |
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 4174d9656e35..34c83d3ca0fa 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c | |||
@@ -427,6 +427,18 @@ static int pcmcia_device_probe(struct device * dev) | |||
427 | p_drv = to_pcmcia_drv(dev->driver); | 427 | p_drv = to_pcmcia_drv(dev->driver); |
428 | s = p_dev->socket; | 428 | s = p_dev->socket; |
429 | 429 | ||
430 | /* The PCMCIA code passes the match data in via dev->driver_data | ||
431 | * which is an ugly hack. Once the driver probe is called it may | ||
432 | * and often will overwrite the match data so we must save it first | ||
433 | * | ||
434 | * handle pseudo multifunction devices: | ||
435 | * there are at most two pseudo multifunction devices. | ||
436 | * if we're matching against the first, schedule a | ||
437 | * call which will then check whether there are two | ||
438 | * pseudo devices, and if not, add the second one. | ||
439 | */ | ||
440 | did = p_dev->dev.driver_data; | ||
441 | |||
430 | ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id, | 442 | ds_dbg(1, "trying to bind %s to %s\n", p_dev->dev.bus_id, |
431 | p_drv->drv.name); | 443 | p_drv->drv.name); |
432 | 444 | ||
@@ -455,21 +467,14 @@ static int pcmcia_device_probe(struct device * dev) | |||
455 | goto put_module; | 467 | goto put_module; |
456 | } | 468 | } |
457 | 469 | ||
458 | /* handle pseudo multifunction devices: | ||
459 | * there are at most two pseudo multifunction devices. | ||
460 | * if we're matching against the first, schedule a | ||
461 | * call which will then check whether there are two | ||
462 | * pseudo devices, and if not, add the second one. | ||
463 | */ | ||
464 | did = p_dev->dev.driver_data; | ||
465 | if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && | 470 | if (did && (did->match_flags & PCMCIA_DEV_ID_MATCH_DEVICE_NO) && |
466 | (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) | 471 | (p_dev->socket->device_count == 1) && (p_dev->device_no == 0)) |
467 | pcmcia_add_device_later(p_dev->socket, 0); | 472 | pcmcia_add_device_later(p_dev->socket, 0); |
468 | 473 | ||
469 | put_module: | 474 | put_module: |
470 | if (ret) | 475 | if (ret) |
471 | module_put(p_drv->owner); | 476 | module_put(p_drv->owner); |
472 | put_dev: | 477 | put_dev: |
473 | if (ret) | 478 | if (ret) |
474 | put_device(dev); | 479 | put_device(dev); |
475 | return (ret); | 480 | return (ret); |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index f118252f3a9f..52e2743b04ec 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -422,6 +422,12 @@ done: | |||
422 | return err; | 422 | return err; |
423 | } | 423 | } |
424 | 424 | ||
425 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
426 | { | ||
427 | struct rtc_device *rtc = file->private_data; | ||
428 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
429 | } | ||
430 | |||
425 | static int rtc_dev_release(struct inode *inode, struct file *file) | 431 | static int rtc_dev_release(struct inode *inode, struct file *file) |
426 | { | 432 | { |
427 | struct rtc_device *rtc = file->private_data; | 433 | struct rtc_device *rtc = file->private_data; |
@@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file) | |||
434 | if (rtc->ops->release) | 440 | if (rtc->ops->release) |
435 | rtc->ops->release(rtc->dev.parent); | 441 | rtc->ops->release(rtc->dev.parent); |
436 | 442 | ||
443 | if (file->f_flags & FASYNC) | ||
444 | rtc_dev_fasync(-1, file, 0); | ||
445 | |||
437 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); | 446 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); |
438 | return 0; | 447 | return 0; |
439 | } | 448 | } |
440 | 449 | ||
441 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
442 | { | ||
443 | struct rtc_device *rtc = file->private_data; | ||
444 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
445 | } | ||
446 | |||
447 | static const struct file_operations rtc_dev_fops = { | 450 | static const struct file_operations rtc_dev_fops = { |
448 | .owner = THIS_MODULE, | 451 | .owner = THIS_MODULE, |
449 | .llseek = no_llseek, | 452 | .llseek = no_llseek, |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 1679e2f91c94..a0b6b46e7466 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 447 | { |
448 | char s[80]; | 448 | char s[80]; |
449 | 449 | ||
450 | sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); | 450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); |
451 | |||
452 | switch (irq_ptr->qib.qfmt) { | 451 | switch (irq_ptr->qib.qfmt) { |
453 | case QDIO_QETH_QFMT: | 452 | case QDIO_QETH_QFMT: |
454 | sprintf(s + strlen(s), "OSADE "); | 453 | sprintf(s + strlen(s), "OSA "); |
455 | break; | 454 | break; |
456 | case QDIO_ZFCP_QFMT: | 455 | case QDIO_ZFCP_QFMT: |
457 | sprintf(s + strlen(s), "ZFCP "); | 456 | sprintf(s + strlen(s), "ZFCP "); |
458 | break; | 457 | break; |
459 | case QDIO_IQDIO_QFMT: | 458 | case QDIO_IQDIO_QFMT: |
460 | sprintf(s + strlen(s), "HiperSockets "); | 459 | sprintf(s + strlen(s), "HS "); |
461 | break; | 460 | break; |
462 | } | 461 | } |
463 | sprintf(s + strlen(s), "using: "); | 462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); |
464 | 463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | |
465 | if (!is_thinint_irq(irq_ptr)) | 464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); |
466 | sprintf(s + strlen(s), "no"); | 465 | sprintf(s + strlen(s), "PCI:%d ", |
467 | sprintf(s + strlen(s), "AdapterInterrupts "); | 466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); |
468 | if (!(irq_ptr->sch_token != 0)) | 467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); |
469 | sprintf(s + strlen(s), "no"); | 468 | sprintf(s + strlen(s), "SIGA:"); |
470 | sprintf(s + strlen(s), "QEBSM "); | 469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); |
471 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) | 470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); |
472 | sprintf(s + strlen(s), "no"); | 471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); |
473 | sprintf(s + strlen(s), "OutboundPCI "); | 472 | sprintf(s + strlen(s), "%s", |
474 | if (!css_general_characteristics.aif_tdd) | 473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); |
475 | sprintf(s + strlen(s), "no"); | 474 | sprintf(s + strlen(s), "%s", |
476 | sprintf(s + strlen(s), "TDD\n"); | 475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); |
477 | printk(KERN_INFO "qdio: %s", s); | 476 | sprintf(s + strlen(s), "%s", |
478 | 477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | |
479 | memset(s, 0, sizeof(s)); | ||
480 | sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); | ||
481 | if (irq_ptr->siga_flag.input) | ||
482 | sprintf(s + strlen(s), "Read "); | ||
483 | if (irq_ptr->siga_flag.output) | ||
484 | sprintf(s + strlen(s), "Write "); | ||
485 | if (irq_ptr->siga_flag.sync) | ||
486 | sprintf(s + strlen(s), "Sync "); | ||
487 | if (!irq_ptr->siga_flag.no_sync_ti) | ||
488 | sprintf(s + strlen(s), "SyncAI "); | ||
489 | if (!irq_ptr->siga_flag.no_sync_out_ti) | ||
490 | sprintf(s + strlen(s), "SyncOutAI "); | ||
491 | if (!irq_ptr->siga_flag.no_sync_out_pci) | ||
492 | sprintf(s + strlen(s), "SyncOutPCI"); | ||
493 | sprintf(s + strlen(s), "\n"); | 478 | sprintf(s + strlen(s), "\n"); |
494 | printk(KERN_INFO "qdio: %s", s); | 479 | printk(KERN_INFO "%s", s); |
495 | } | 480 | } |
496 | 481 | ||
497 | int __init qdio_setup_init(void) | 482 | int __init qdio_setup_init(void) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 45a3b93eed57..bf41887cdd65 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1834,7 +1834,6 @@ clear_risc_ints: | |||
1834 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); | 1834 | WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); |
1835 | } | 1835 | } |
1836 | spin_unlock_irq(&ha->hardware_lock); | 1836 | spin_unlock_irq(&ha->hardware_lock); |
1837 | ha->isp_ops->enable_intrs(ha); | ||
1838 | 1837 | ||
1839 | fail: | 1838 | fail: |
1840 | return ret; | 1839 | return ret; |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 26afe44265c7..6d0f0e5f2827 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1740,6 +1740,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1740 | if (ret) | 1740 | if (ret) |
1741 | goto probe_failed; | 1741 | goto probe_failed; |
1742 | 1742 | ||
1743 | ha->isp_ops->enable_intrs(ha); | ||
1744 | |||
1743 | scsi_scan_host(host); | 1745 | scsi_scan_host(host); |
1744 | 1746 | ||
1745 | qla2x00_alloc_sysfs_attr(ha); | 1747 | qla2x00_alloc_sysfs_attr(ha); |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 4a1cf6377f6c..905350896725 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -914,6 +914,7 @@ static inline int load_cmd(struct scsi_cmnd *Cmnd, struct Command_Entry *cmd, | |||
914 | ds[i].d_count = sg_dma_len(s); | 914 | ds[i].d_count = sg_dma_len(s); |
915 | } | 915 | } |
916 | sg_count -= n; | 916 | sg_count -= n; |
917 | sg = s; | ||
917 | } | 918 | } |
918 | } else { | 919 | } else { |
919 | cmd->dataseg[0].d_base = 0; | 920 | cmd->dataseg[0].d_base = 0; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ff5d56b3ee4d..62307bd794a9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -852,7 +852,7 @@ static void scsi_end_bidi_request(struct scsi_cmnd *cmd) | |||
852 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | 852 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
853 | { | 853 | { |
854 | int result = cmd->result; | 854 | int result = cmd->result; |
855 | int this_count = scsi_bufflen(cmd); | 855 | int this_count; |
856 | struct request_queue *q = cmd->device->request_queue; | 856 | struct request_queue *q = cmd->device->request_queue; |
857 | struct request *req = cmd->request; | 857 | struct request *req = cmd->request; |
858 | int error = 0; | 858 | int error = 0; |
@@ -908,6 +908,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
908 | */ | 908 | */ |
909 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) | 909 | if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) |
910 | return; | 910 | return; |
911 | this_count = blk_rq_bytes(req); | ||
911 | 912 | ||
912 | /* good_bytes = 0, or (inclusive) there were leftovers and | 913 | /* good_bytes = 0, or (inclusive) there were leftovers and |
913 | * result = 0, so scsi_end_request couldn't retry. | 914 | * result = 0, so scsi_end_request couldn't retry. |
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 3a6da80b081c..61fb8b6d19af 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c | |||
@@ -131,7 +131,8 @@ struct atmel_uart_char { | |||
131 | struct atmel_uart_port { | 131 | struct atmel_uart_port { |
132 | struct uart_port uart; /* uart */ | 132 | struct uart_port uart; /* uart */ |
133 | struct clk *clk; /* uart clock */ | 133 | struct clk *clk; /* uart clock */ |
134 | unsigned short suspended; /* is port suspended? */ | 134 | int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */ |
135 | u32 backup_imr; /* IMR saved during suspend */ | ||
135 | int break_active; /* break being received */ | 136 | int break_active; /* break being received */ |
136 | 137 | ||
137 | short use_dma_rx; /* enable PDC receiver */ | 138 | short use_dma_rx; /* enable PDC receiver */ |
@@ -984,8 +985,15 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state, | |||
984 | * This is called on uart_open() or a resume event. | 985 | * This is called on uart_open() or a resume event. |
985 | */ | 986 | */ |
986 | clk_enable(atmel_port->clk); | 987 | clk_enable(atmel_port->clk); |
988 | |||
989 | /* re-enable interrupts if we disabled some on suspend */ | ||
990 | UART_PUT_IER(port, atmel_port->backup_imr); | ||
987 | break; | 991 | break; |
988 | case 3: | 992 | case 3: |
993 | /* Back up the interrupt mask and disable all interrupts */ | ||
994 | atmel_port->backup_imr = UART_GET_IMR(port); | ||
995 | UART_PUT_IDR(port, -1); | ||
996 | |||
989 | /* | 997 | /* |
990 | * Disable the peripheral clock for this serial port. | 998 | * Disable the peripheral clock for this serial port. |
991 | * This is called on uart_close() or a suspend event. | 999 | * This is called on uart_close() or a suspend event. |
@@ -1475,13 +1483,12 @@ static int atmel_serial_suspend(struct platform_device *pdev, | |||
1475 | cpu_relax(); | 1483 | cpu_relax(); |
1476 | } | 1484 | } |
1477 | 1485 | ||
1478 | if (device_may_wakeup(&pdev->dev) | 1486 | /* we can not wake up if we're running on slow clock */ |
1479 | && !atmel_serial_clk_will_stop()) | 1487 | atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); |
1480 | enable_irq_wake(port->irq); | 1488 | if (atmel_serial_clk_will_stop()) |
1481 | else { | 1489 | device_set_wakeup_enable(&pdev->dev, 0); |
1482 | uart_suspend_port(&atmel_uart, port); | 1490 | |
1483 | atmel_port->suspended = 1; | 1491 | uart_suspend_port(&atmel_uart, port); |
1484 | } | ||
1485 | 1492 | ||
1486 | return 0; | 1493 | return 0; |
1487 | } | 1494 | } |
@@ -1491,11 +1498,8 @@ static int atmel_serial_resume(struct platform_device *pdev) | |||
1491 | struct uart_port *port = platform_get_drvdata(pdev); | 1498 | struct uart_port *port = platform_get_drvdata(pdev); |
1492 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | 1499 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
1493 | 1500 | ||
1494 | if (atmel_port->suspended) { | 1501 | uart_resume_port(&atmel_uart, port); |
1495 | uart_resume_port(&atmel_uart, port); | 1502 | device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); |
1496 | atmel_port->suspended = 0; | ||
1497 | } else | ||
1498 | disable_irq_wake(port->irq); | ||
1499 | 1503 | ||
1500 | return 0; | 1504 | return 0; |
1501 | } | 1505 | } |
@@ -1513,6 +1517,8 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) | |||
1513 | BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); | 1517 | BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); |
1514 | 1518 | ||
1515 | port = &atmel_ports[pdev->id]; | 1519 | port = &atmel_ports[pdev->id]; |
1520 | port->backup_imr = 0; | ||
1521 | |||
1516 | atmel_init_port(port, pdev); | 1522 | atmel_init_port(port, pdev); |
1517 | 1523 | ||
1518 | if (!atmel_use_dma_rx(&port->uart)) { | 1524 | if (!atmel_use_dma_rx(&port->uart)) { |
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index c4eaacd6e553..b872bfaf4bd2 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c | |||
@@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
427 | goto msg_rejected; | 427 | goto msg_rejected; |
428 | } | 428 | } |
429 | 429 | ||
430 | if (t->speed_hz < orion_spi->min_speed) { | 430 | if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { |
431 | dev_err(&spi->dev, | 431 | dev_err(&spi->dev, |
432 | "message rejected : " | 432 | "message rejected : " |
433 | "device min speed (%d Hz) exceeds " | 433 | "device min speed (%d Hz) exceeds " |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index 0e53354c1cfe..d47d3636227f 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -49,7 +49,7 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
49 | 49 | ||
50 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | 50 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) |
51 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | 51 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) |
52 | #define IS_DMA_ALIGNED(x) (((x) & 0x07) == 0) | 52 | #define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) |
53 | #define MAX_DMA_LEN 8191 | 53 | #define MAX_DMA_LEN 8191 |
54 | 54 | ||
55 | /* | 55 | /* |
@@ -896,7 +896,7 @@ static void pump_transfers(unsigned long data) | |||
896 | || transfer->rx_dma || transfer->tx_dma) { | 896 | || transfer->rx_dma || transfer->tx_dma) { |
897 | dev_err(&drv_data->pdev->dev, | 897 | dev_err(&drv_data->pdev->dev, |
898 | "pump_transfers: mapped transfer length " | 898 | "pump_transfers: mapped transfer length " |
899 | "of %lu is greater than %d\n", | 899 | "of %u is greater than %d\n", |
900 | transfer->len, MAX_DMA_LEN); | 900 | transfer->len, MAX_DMA_LEN); |
901 | message->status = -EINVAL; | 901 | message->status = -EINVAL; |
902 | giveback(drv_data); | 902 | giveback(drv_data); |
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 87ab2443e66d..0ffabf5c0b60 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c | |||
@@ -471,6 +471,7 @@ static int ssb_devices_register(struct ssb_bus *bus) | |||
471 | #endif | 471 | #endif |
472 | break; | 472 | break; |
473 | case SSB_BUSTYPE_SSB: | 473 | case SSB_BUSTYPE_SSB: |
474 | dev->dma_mask = &dev->coherent_dma_mask; | ||
474 | break; | 475 | break; |
475 | } | 476 | } |
476 | 477 | ||
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 8abd4e59bf4a..8ab389dca2b9 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1876,7 +1876,8 @@ int usb_add_hcd(struct usb_hcd *hcd, | |||
1876 | * with IRQF_SHARED. As usb_hcd_irq() will always disable | 1876 | * with IRQF_SHARED. As usb_hcd_irq() will always disable |
1877 | * interrupts we can remove it here. | 1877 | * interrupts we can remove it here. |
1878 | */ | 1878 | */ |
1879 | irqflags &= ~IRQF_DISABLED; | 1879 | if (irqflags & IRQF_SHARED) |
1880 | irqflags &= ~IRQF_DISABLED; | ||
1880 | 1881 | ||
1881 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", | 1882 | snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", |
1882 | hcd->driver->description, hcd->self.busnum); | 1883 | hcd->driver->description, hcd->self.busnum); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 6a5cb018383d..d99963873e37 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2683,35 +2683,17 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2683 | USB_PORT_STAT_C_ENABLE); | 2683 | USB_PORT_STAT_C_ENABLE); |
2684 | #endif | 2684 | #endif |
2685 | 2685 | ||
2686 | /* Try to use the debounce delay for protection against | ||
2687 | * port-enable changes caused, for example, by EMI. | ||
2688 | */ | ||
2689 | if (portchange & (USB_PORT_STAT_C_CONNECTION | | ||
2690 | USB_PORT_STAT_C_ENABLE)) { | ||
2691 | status = hub_port_debounce(hub, port1); | ||
2692 | if (status < 0) { | ||
2693 | if (printk_ratelimit()) | ||
2694 | dev_err (hub_dev, "connect-debounce failed, " | ||
2695 | "port %d disabled\n", port1); | ||
2696 | portstatus &= ~USB_PORT_STAT_CONNECTION; | ||
2697 | } else { | ||
2698 | portstatus = status; | ||
2699 | } | ||
2700 | } | ||
2701 | |||
2702 | /* Try to resuscitate an existing device */ | 2686 | /* Try to resuscitate an existing device */ |
2703 | udev = hdev->children[port1-1]; | 2687 | udev = hdev->children[port1-1]; |
2704 | if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && | 2688 | if ((portstatus & USB_PORT_STAT_CONNECTION) && udev && |
2705 | udev->state != USB_STATE_NOTATTACHED) { | 2689 | udev->state != USB_STATE_NOTATTACHED) { |
2706 | |||
2707 | usb_lock_device(udev); | 2690 | usb_lock_device(udev); |
2708 | if (portstatus & USB_PORT_STAT_ENABLE) { | 2691 | if (portstatus & USB_PORT_STAT_ENABLE) { |
2709 | status = 0; /* Nothing to do */ | 2692 | status = 0; /* Nothing to do */ |
2710 | } else if (!udev->persist_enabled) { | ||
2711 | status = -ENODEV; /* Mustn't resuscitate */ | ||
2712 | 2693 | ||
2713 | #ifdef CONFIG_USB_SUSPEND | 2694 | #ifdef CONFIG_USB_SUSPEND |
2714 | } else if (udev->state == USB_STATE_SUSPENDED) { | 2695 | } else if (udev->state == USB_STATE_SUSPENDED && |
2696 | udev->persist_enabled) { | ||
2715 | /* For a suspended device, treat this as a | 2697 | /* For a suspended device, treat this as a |
2716 | * remote wakeup event. | 2698 | * remote wakeup event. |
2717 | */ | 2699 | */ |
@@ -2726,7 +2708,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2726 | #endif | 2708 | #endif |
2727 | 2709 | ||
2728 | } else { | 2710 | } else { |
2729 | status = usb_reset_device(udev); | 2711 | status = -ENODEV; /* Don't resuscitate */ |
2730 | } | 2712 | } |
2731 | usb_unlock_device(udev); | 2713 | usb_unlock_device(udev); |
2732 | 2714 | ||
@@ -2741,6 +2723,19 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2741 | usb_disconnect(&hdev->children[port1-1]); | 2723 | usb_disconnect(&hdev->children[port1-1]); |
2742 | clear_bit(port1, hub->change_bits); | 2724 | clear_bit(port1, hub->change_bits); |
2743 | 2725 | ||
2726 | if (portchange & (USB_PORT_STAT_C_CONNECTION | | ||
2727 | USB_PORT_STAT_C_ENABLE)) { | ||
2728 | status = hub_port_debounce(hub, port1); | ||
2729 | if (status < 0) { | ||
2730 | if (printk_ratelimit()) | ||
2731 | dev_err(hub_dev, "connect-debounce failed, " | ||
2732 | "port %d disabled\n", port1); | ||
2733 | portstatus &= ~USB_PORT_STAT_CONNECTION; | ||
2734 | } else { | ||
2735 | portstatus = status; | ||
2736 | } | ||
2737 | } | ||
2738 | |||
2744 | /* Return now if debouncing failed or nothing is connected */ | 2739 | /* Return now if debouncing failed or nothing is connected */ |
2745 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) { | 2740 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) { |
2746 | 2741 | ||
@@ -2748,7 +2743,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, | |||
2748 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 | 2743 | if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 |
2749 | && !(portstatus & (1 << USB_PORT_FEAT_POWER))) | 2744 | && !(portstatus & (1 << USB_PORT_FEAT_POWER))) |
2750 | set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); | 2745 | set_port_feature(hdev, port1, USB_PORT_FEAT_POWER); |
2751 | 2746 | ||
2752 | if (portstatus & USB_PORT_STAT_ENABLE) | 2747 | if (portstatus & USB_PORT_STAT_ENABLE) |
2753 | goto done; | 2748 | goto done; |
2754 | return; | 2749 | return; |
diff --git a/drivers/usb/gadget/fsl_usb2_udc.c b/drivers/usb/gadget/fsl_usb2_udc.c index 1cfccf102a2d..45ad556169f1 100644 --- a/drivers/usb/gadget/fsl_usb2_udc.c +++ b/drivers/usb/gadget/fsl_usb2_udc.c | |||
@@ -223,7 +223,7 @@ static int dr_controller_setup(struct fsl_udc *udc) | |||
223 | fsl_writel(tmp, &dr_regs->endpointlistaddr); | 223 | fsl_writel(tmp, &dr_regs->endpointlistaddr); |
224 | 224 | ||
225 | VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x", | 225 | VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x", |
226 | (int)udc->ep_qh, (int)tmp, | 226 | udc->ep_qh, (int)tmp, |
227 | fsl_readl(&dr_regs->endpointlistaddr)); | 227 | fsl_readl(&dr_regs->endpointlistaddr)); |
228 | 228 | ||
229 | /* Config PHY interface */ | 229 | /* Config PHY interface */ |
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 574c53831a05..bb54cca4c543 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -787,7 +787,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) | |||
787 | omap_set_dma_dest_params(ep->lch, | 787 | omap_set_dma_dest_params(ep->lch, |
788 | OMAP_DMA_PORT_TIPB, | 788 | OMAP_DMA_PORT_TIPB, |
789 | OMAP_DMA_AMODE_CONSTANT, | 789 | OMAP_DMA_AMODE_CONSTANT, |
790 | (unsigned long) io_v2p(UDC_DATA_DMA), | 790 | UDC_DATA_DMA, |
791 | 0, 0); | 791 | 0, 0); |
792 | } | 792 | } |
793 | } else { | 793 | } else { |
@@ -804,7 +804,7 @@ static void dma_channel_claim(struct omap_ep *ep, unsigned channel) | |||
804 | omap_set_dma_src_params(ep->lch, | 804 | omap_set_dma_src_params(ep->lch, |
805 | OMAP_DMA_PORT_TIPB, | 805 | OMAP_DMA_PORT_TIPB, |
806 | OMAP_DMA_AMODE_CONSTANT, | 806 | OMAP_DMA_AMODE_CONSTANT, |
807 | (unsigned long) io_v2p(UDC_DATA_DMA), | 807 | UDC_DATA_DMA, |
808 | 0, 0); | 808 | 0, 0); |
809 | /* EMIFF or SDRC */ | 809 | /* EMIFF or SDRC */ |
810 | omap_set_dma_dest_burst_mode(ep->lch, | 810 | omap_set_dma_dest_burst_mode(ep->lch, |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index d9d53f289caf..8409e0705d63 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -145,16 +145,6 @@ static int handshake (struct ehci_hcd *ehci, void __iomem *ptr, | |||
145 | return -ETIMEDOUT; | 145 | return -ETIMEDOUT; |
146 | } | 146 | } |
147 | 147 | ||
148 | static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, | ||
149 | u32 mask, u32 done, int usec) | ||
150 | { | ||
151 | int error = handshake(ehci, ptr, mask, done, usec); | ||
152 | if (error) | ||
153 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | ||
154 | |||
155 | return error; | ||
156 | } | ||
157 | |||
158 | /* force HC to halt state from unknown (EHCI spec section 2.3) */ | 148 | /* force HC to halt state from unknown (EHCI spec section 2.3) */ |
159 | static int ehci_halt (struct ehci_hcd *ehci) | 149 | static int ehci_halt (struct ehci_hcd *ehci) |
160 | { | 150 | { |
@@ -173,6 +163,22 @@ static int ehci_halt (struct ehci_hcd *ehci) | |||
173 | STS_HALT, STS_HALT, 16 * 125); | 163 | STS_HALT, STS_HALT, 16 * 125); |
174 | } | 164 | } |
175 | 165 | ||
166 | static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr, | ||
167 | u32 mask, u32 done, int usec) | ||
168 | { | ||
169 | int error; | ||
170 | |||
171 | error = handshake(ehci, ptr, mask, done, usec); | ||
172 | if (error) { | ||
173 | ehci_halt(ehci); | ||
174 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | ||
175 | ehci_err(ehci, "force halt; handhake %p %08x %08x -> %d\n", | ||
176 | ptr, mask, done, error); | ||
177 | } | ||
178 | |||
179 | return error; | ||
180 | } | ||
181 | |||
176 | /* put TDI/ARC silicon into EHCI mode */ | 182 | /* put TDI/ARC silicon into EHCI mode */ |
177 | static void tdi_reset (struct ehci_hcd *ehci) | 183 | static void tdi_reset (struct ehci_hcd *ehci) |
178 | { | 184 | { |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index b7853c8bac0f..4a0c5a78b2ed 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -437,6 +437,9 @@ static int enable_periodic (struct ehci_hcd *ehci) | |||
437 | u32 cmd; | 437 | u32 cmd; |
438 | int status; | 438 | int status; |
439 | 439 | ||
440 | if (ehci->periodic_sched++) | ||
441 | return 0; | ||
442 | |||
440 | /* did clearing PSE did take effect yet? | 443 | /* did clearing PSE did take effect yet? |
441 | * takes effect only at frame boundaries... | 444 | * takes effect only at frame boundaries... |
442 | */ | 445 | */ |
@@ -461,6 +464,9 @@ static int disable_periodic (struct ehci_hcd *ehci) | |||
461 | u32 cmd; | 464 | u32 cmd; |
462 | int status; | 465 | int status; |
463 | 466 | ||
467 | if (--ehci->periodic_sched) | ||
468 | return 0; | ||
469 | |||
464 | /* did setting PSE not take effect yet? | 470 | /* did setting PSE not take effect yet? |
465 | * takes effect only at frame boundaries... | 471 | * takes effect only at frame boundaries... |
466 | */ | 472 | */ |
@@ -544,13 +550,10 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
544 | : (qh->usecs * 8); | 550 | : (qh->usecs * 8); |
545 | 551 | ||
546 | /* maybe enable periodic schedule processing */ | 552 | /* maybe enable periodic schedule processing */ |
547 | if (!ehci->periodic_sched++) | 553 | return enable_periodic(ehci); |
548 | return enable_periodic (ehci); | ||
549 | |||
550 | return 0; | ||
551 | } | 554 | } |
552 | 555 | ||
553 | static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | 556 | static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) |
554 | { | 557 | { |
555 | unsigned i; | 558 | unsigned i; |
556 | unsigned period; | 559 | unsigned period; |
@@ -586,9 +589,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |||
586 | qh_put (qh); | 589 | qh_put (qh); |
587 | 590 | ||
588 | /* maybe turn off periodic schedule */ | 591 | /* maybe turn off periodic schedule */ |
589 | ehci->periodic_sched--; | 592 | return disable_periodic(ehci); |
590 | if (!ehci->periodic_sched) | ||
591 | (void) disable_periodic (ehci); | ||
592 | } | 593 | } |
593 | 594 | ||
594 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) | 595 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) |
@@ -1562,9 +1563,7 @@ itd_link_urb ( | |||
1562 | urb->hcpriv = NULL; | 1563 | urb->hcpriv = NULL; |
1563 | 1564 | ||
1564 | timer_action (ehci, TIMER_IO_WATCHDOG); | 1565 | timer_action (ehci, TIMER_IO_WATCHDOG); |
1565 | if (unlikely (!ehci->periodic_sched++)) | 1566 | return enable_periodic(ehci); |
1566 | return enable_periodic (ehci); | ||
1567 | return 0; | ||
1568 | } | 1567 | } |
1569 | 1568 | ||
1570 | #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) | 1569 | #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) |
@@ -1642,7 +1641,7 @@ itd_complete ( | |||
1642 | ehci_urb_done(ehci, urb, 0); | 1641 | ehci_urb_done(ehci, urb, 0); |
1643 | retval = true; | 1642 | retval = true; |
1644 | urb = NULL; | 1643 | urb = NULL; |
1645 | ehci->periodic_sched--; | 1644 | (void) disable_periodic(ehci); |
1646 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | 1645 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
1647 | 1646 | ||
1648 | if (unlikely (list_empty (&stream->td_list))) { | 1647 | if (unlikely (list_empty (&stream->td_list))) { |
@@ -1951,9 +1950,7 @@ sitd_link_urb ( | |||
1951 | urb->hcpriv = NULL; | 1950 | urb->hcpriv = NULL; |
1952 | 1951 | ||
1953 | timer_action (ehci, TIMER_IO_WATCHDOG); | 1952 | timer_action (ehci, TIMER_IO_WATCHDOG); |
1954 | if (!ehci->periodic_sched++) | 1953 | return enable_periodic(ehci); |
1955 | return enable_periodic (ehci); | ||
1956 | return 0; | ||
1957 | } | 1954 | } |
1958 | 1955 | ||
1959 | /*-------------------------------------------------------------------------*/ | 1956 | /*-------------------------------------------------------------------------*/ |
@@ -2019,7 +2016,7 @@ sitd_complete ( | |||
2019 | ehci_urb_done(ehci, urb, 0); | 2016 | ehci_urb_done(ehci, urb, 0); |
2020 | retval = true; | 2017 | retval = true; |
2021 | urb = NULL; | 2018 | urb = NULL; |
2022 | ehci->periodic_sched--; | 2019 | (void) disable_periodic(ehci); |
2023 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | 2020 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; |
2024 | 2021 | ||
2025 | if (list_empty (&stream->td_list)) { | 2022 | if (list_empty (&stream->td_list)) { |
@@ -2243,8 +2240,7 @@ restart: | |||
2243 | if (unlikely (modified)) { | 2240 | if (unlikely (modified)) { |
2244 | if (likely(ehci->periodic_sched > 0)) | 2241 | if (likely(ehci->periodic_sched > 0)) |
2245 | goto restart; | 2242 | goto restart; |
2246 | /* maybe we can short-circuit this scan! */ | 2243 | /* short-circuit this scan */ |
2247 | disable_periodic(ehci); | ||
2248 | now_uframe = clock; | 2244 | now_uframe = clock; |
2249 | break; | 2245 | break; |
2250 | } | 2246 | } |
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index a0017486ad4e..58b2b8fc9439 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig | |||
@@ -9,6 +9,7 @@ comment "Enable Host or Gadget support to see Inventra options" | |||
9 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller | 9 | # (M)HDRC = (Multipoint) Highspeed Dual-Role Controller |
10 | config USB_MUSB_HDRC | 10 | config USB_MUSB_HDRC |
11 | depends on (USB || USB_GADGET) && HAVE_CLK | 11 | depends on (USB || USB_GADGET) && HAVE_CLK |
12 | depends on !SUPERH | ||
12 | select TWL4030_USB if MACH_OMAP_3430SDP | 13 | select TWL4030_USB if MACH_OMAP_3430SDP |
13 | tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' | 14 | tristate 'Inventra Highspeed Dual Role Controller (TI, ...)' |
14 | help | 15 | help |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c5b8f0296fcf..128e949db47c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -100,8 +100,8 @@ | |||
100 | #include <linux/io.h> | 100 | #include <linux/io.h> |
101 | 101 | ||
102 | #ifdef CONFIG_ARM | 102 | #ifdef CONFIG_ARM |
103 | #include <asm/arch/hardware.h> | 103 | #include <mach/hardware.h> |
104 | #include <asm/arch/memory.h> | 104 | #include <mach/memory.h> |
105 | #include <asm/mach-types.h> | 105 | #include <asm/mach-types.h> |
106 | #endif | 106 | #endif |
107 | 107 | ||
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 298b22e6ad0d..9d2dcb121c5e 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -35,8 +35,8 @@ | |||
35 | #include <linux/io.h> | 35 | #include <linux/io.h> |
36 | 36 | ||
37 | #include <asm/mach-types.h> | 37 | #include <asm/mach-types.h> |
38 | #include <asm/arch/hardware.h> | 38 | #include <mach/hardware.h> |
39 | #include <asm/arch/mux.h> | 39 | #include <mach/mux.h> |
40 | 40 | ||
41 | #include "musb_core.h" | 41 | #include "musb_core.h" |
42 | #include "omap2430.h" | 42 | #include "omap2430.h" |
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h index 786a62071f72..dc7670718cd2 100644 --- a/drivers/usb/musb/omap2430.h +++ b/drivers/usb/musb/omap2430.h | |||
@@ -11,8 +11,8 @@ | |||
11 | #define __MUSB_OMAP243X_H__ | 11 | #define __MUSB_OMAP243X_H__ |
12 | 12 | ||
13 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) | 13 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) |
14 | #include <asm/arch/hardware.h> | 14 | #include <mach/hardware.h> |
15 | #include <asm/arch/usb.h> | 15 | #include <mach/usb.h> |
16 | 16 | ||
17 | /* | 17 | /* |
18 | * OMAP2430-specific definitions | 18 | * OMAP2430-specific definitions |
diff --git a/drivers/usb/serial/cp2101.c b/drivers/usb/serial/cp2101.c index 442cba69cce5..1279553381e3 100644 --- a/drivers/usb/serial/cp2101.c +++ b/drivers/usb/serial/cp2101.c | |||
@@ -72,6 +72,7 @@ static struct usb_device_id id_table [] = { | |||
72 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ | 72 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ |
73 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ | 73 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ |
74 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ | 74 | { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ |
75 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ | ||
75 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ | 76 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ |
76 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ | 77 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
77 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ | 78 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
@@ -83,6 +84,7 @@ static struct usb_device_id id_table [] = { | |||
83 | { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ | 84 | { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ |
84 | { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ | 85 | { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ |
85 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ | 86 | { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ |
87 | { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ | ||
86 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ | 88 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
87 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ | 89 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
88 | { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ | 90 | { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ |
@@ -93,6 +95,7 @@ static struct usb_device_id id_table [] = { | |||
93 | { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ | 95 | { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ |
94 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ | 96 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ |
95 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ | 97 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
98 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | ||
96 | { } /* Terminating Entry */ | 99 | { } /* Terminating Entry */ |
97 | }; | 100 | }; |
98 | 101 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 984f6eff4c47..3dc93b542b30 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -654,6 +654,9 @@ static struct usb_device_id id_table_combined [] = { | |||
654 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 654 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
655 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, | 655 | { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, |
656 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, | 656 | { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, |
657 | { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, | ||
658 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, | ||
659 | { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, | ||
657 | { }, /* Optional parameter entry */ | 660 | { }, /* Optional parameter entry */ |
658 | { } /* Terminating entry */ | 661 | { } /* Terminating entry */ |
659 | }; | 662 | }; |
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index 382265bba969..8a5b6df3a976 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h | |||
@@ -750,6 +750,7 @@ | |||
750 | 750 | ||
751 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ | 751 | #define PAPOUCH_VID 0x5050 /* Vendor ID */ |
752 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ | 752 | #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ |
753 | #define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ | ||
753 | 754 | ||
754 | /* | 755 | /* |
755 | * ACG Identification Technologies GmbH products (http://www.acg.de/). | 756 | * ACG Identification Technologies GmbH products (http://www.acg.de/). |
@@ -838,6 +839,10 @@ | |||
838 | /* Rig Expert Ukraine devices */ | 839 | /* Rig Expert Ukraine devices */ |
839 | #define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */ | 840 | #define FTDI_REU_TINY_PID 0xED22 /* RigExpert Tiny */ |
840 | 841 | ||
842 | /* Domintell products http://www.domintell.com */ | ||
843 | #define FTDI_DOMINTELL_DGQG_PID 0xEF50 /* Master */ | ||
844 | #define FTDI_DOMINTELL_DUSB_PID 0xEF51 /* DUSB01 module */ | ||
845 | |||
841 | /* Commands */ | 846 | /* Commands */ |
842 | #define FTDI_SIO_RESET 0 /* Reset the port */ | 847 | #define FTDI_SIO_RESET 0 /* Reset the port */ |
843 | #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ | 848 | #define FTDI_SIO_MODEM_CTRL 1 /* Set the modem control register */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 9f9cd36455f4..73f8277f88f2 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -218,6 +218,7 @@ static int option_send_setup(struct tty_struct *tty, struct usb_serial_port *po | |||
218 | /* ZTE PRODUCTS */ | 218 | /* ZTE PRODUCTS */ |
219 | #define ZTE_VENDOR_ID 0x19d2 | 219 | #define ZTE_VENDOR_ID 0x19d2 |
220 | #define ZTE_PRODUCT_MF628 0x0015 | 220 | #define ZTE_PRODUCT_MF628 0x0015 |
221 | #define ZTE_PRODUCT_CDMA_TECH 0xfffe | ||
221 | 222 | ||
222 | static struct usb_device_id option_ids[] = { | 223 | static struct usb_device_id option_ids[] = { |
223 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, | 224 | { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, |
@@ -347,6 +348,7 @@ static struct usb_device_id option_ids[] = { | |||
347 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ | 348 | { USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */ |
348 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, | 349 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
349 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, | 350 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628) }, |
351 | { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH) }, | ||
350 | { } /* Terminating entry */ | 352 | { } /* Terminating entry */ |
351 | }; | 353 | }; |
352 | MODULE_DEVICE_TABLE(usb, option_ids); | 354 | MODULE_DEVICE_TABLE(usb, option_ids); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 706033753adb..ea1a103c99be 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -14,7 +14,7 @@ | |||
14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> | 14 | Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define DRIVER_VERSION "v.1.2.13a" | 17 | #define DRIVER_VERSION "v.1.3.2" |
18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" | 18 | #define DRIVER_AUTHOR "Kevin Lloyd <klloyd@sierrawireless.com>" |
19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" | 19 | #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" |
20 | 20 | ||
@@ -30,9 +30,6 @@ | |||
30 | 30 | ||
31 | #define SWIMS_USB_REQUEST_SetPower 0x00 | 31 | #define SWIMS_USB_REQUEST_SetPower 0x00 |
32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 | 32 | #define SWIMS_USB_REQUEST_SetNmea 0x07 |
33 | #define SWIMS_USB_REQUEST_SetMode 0x0B | ||
34 | #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A | ||
35 | #define SWIMS_SET_MODE_Modem 0x0001 | ||
36 | 33 | ||
37 | /* per port private data */ | 34 | /* per port private data */ |
38 | #define N_IN_URB 4 | 35 | #define N_IN_URB 4 |
@@ -163,7 +160,7 @@ static struct usb_device_id id_table [] = { | |||
163 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ | 160 | { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ |
164 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ | 161 | { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ |
165 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ | 162 | { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ |
166 | { USB_DEVICE(0x0f30, 0x1b1d) }, /* Sierra Wireless MC5720 */ | 163 | { USB_DEVICE(0x03f0, 0x1b1d) }, /* HP ev2200 a.k.a MC5720 */ |
167 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ | 164 | { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ |
168 | { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ | 165 | { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ |
169 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ | 166 | { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ |
@@ -175,6 +172,8 @@ static struct usb_device_id id_table [] = { | |||
175 | /* Sierra Wireless Device */ | 172 | /* Sierra Wireless Device */ |
176 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, | 173 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, |
177 | { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ | 174 | { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless Device */ |
175 | { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless Device */ | ||
176 | { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless Device */ | ||
178 | 177 | ||
179 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ | 178 | { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ |
180 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ | 179 | { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ |
@@ -187,6 +186,7 @@ static struct usb_device_id id_table [] = { | |||
187 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ | 186 | { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ |
188 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ | 187 | { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ |
189 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ | 188 | { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ |
189 | { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ | ||
190 | { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ | 190 | { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ |
191 | { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ | 191 | { USB_DEVICE(0x1199, 0x683C) }, /* Sierra Wireless MC8790 */ |
192 | { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ | 192 | { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8790 */ |
@@ -204,6 +204,8 @@ static struct usb_device_id id_table [] = { | |||
204 | /* Sierra Wireless Device */ | 204 | /* Sierra Wireless Device */ |
205 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, | 205 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, |
206 | /* Sierra Wireless Device */ | 206 | /* Sierra Wireless Device */ |
207 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, | ||
208 | /* Sierra Wireless Device */ | ||
207 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, | 209 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, |
208 | 210 | ||
209 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ | 211 | { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e39c779e4160..9a3e495c769c 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -1744,7 +1744,7 @@ static int ti_download_firmware(struct ti_device *tdev, int type) | |||
1744 | if (buffer) { | 1744 | if (buffer) { |
1745 | memcpy(buffer, fw_p->data, fw_p->size); | 1745 | memcpy(buffer, fw_p->data, fw_p->size); |
1746 | memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); | 1746 | memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size); |
1747 | ti_do_download(dev, pipe, buffer, fw_p->size); | 1747 | status = ti_do_download(dev, pipe, buffer, fw_p->size); |
1748 | kfree(buffer); | 1748 | kfree(buffer); |
1749 | } | 1749 | } |
1750 | release_firmware(fw_p); | 1750 | release_firmware(fw_p); |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b157c48e8b78..4f7f9e3ae0a4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -733,7 +733,9 @@ int usb_serial_probe(struct usb_interface *interface, | |||
733 | ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) && | 733 | ((le16_to_cpu(dev->descriptor.idVendor) == ATEN_VENDOR_ID) && |
734 | (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) || | 734 | (le16_to_cpu(dev->descriptor.idProduct) == ATEN_PRODUCT_ID)) || |
735 | ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) && | 735 | ((le16_to_cpu(dev->descriptor.idVendor) == ALCOR_VENDOR_ID) && |
736 | (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID))) { | 736 | (le16_to_cpu(dev->descriptor.idProduct) == ALCOR_PRODUCT_ID)) || |
737 | ((le16_to_cpu(dev->descriptor.idVendor) == SIEMENS_VENDOR_ID) && | ||
738 | (le16_to_cpu(dev->descriptor.idProduct) == SIEMENS_PRODUCT_ID_EF81))) { | ||
737 | if (interface != dev->actconfig->interface[0]) { | 739 | if (interface != dev->actconfig->interface[0]) { |
738 | /* check out the endpoints of the other interface*/ | 740 | /* check out the endpoints of the other interface*/ |
739 | iface_desc = dev->actconfig->interface[0]->cur_altsetting; | 741 | iface_desc = dev->actconfig->interface[0]->cur_altsetting; |
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig index c76034672c18..3d9249632ae1 100644 --- a/drivers/usb/storage/Kconfig +++ b/drivers/usb/storage/Kconfig | |||
@@ -146,18 +146,6 @@ config USB_STORAGE_KARMA | |||
146 | on the resulting scsi device node returns the Karma to normal | 146 | on the resulting scsi device node returns the Karma to normal |
147 | operation. | 147 | operation. |
148 | 148 | ||
149 | config USB_STORAGE_SIERRA | ||
150 | bool "Sierra Wireless TRU-Install Feature Support" | ||
151 | depends on USB_STORAGE | ||
152 | help | ||
153 | Say Y here to include additional code to support Sierra Wireless | ||
154 | products with the TRU-Install feature (e.g., AC597E, AC881U). | ||
155 | |||
156 | This code switches the Sierra Wireless device from being in | ||
157 | Mass Storage mode to Modem mode. It also has the ability to | ||
158 | support host software upgrades should full Linux support be added | ||
159 | to TRU-Install. | ||
160 | |||
161 | config USB_STORAGE_CYPRESS_ATACB | 149 | config USB_STORAGE_CYPRESS_ATACB |
162 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" | 150 | bool "SAT emulation on Cypress USB/ATA Bridge with ATACB" |
163 | depends on USB_STORAGE | 151 | depends on USB_STORAGE |
diff --git a/drivers/usb/storage/Makefile b/drivers/usb/storage/Makefile index bc3415b475c9..7f8beb5366ae 100644 --- a/drivers/usb/storage/Makefile +++ b/drivers/usb/storage/Makefile | |||
@@ -21,11 +21,10 @@ usb-storage-obj-$(CONFIG_USB_STORAGE_JUMPSHOT) += jumpshot.o | |||
21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o | 21 | usb-storage-obj-$(CONFIG_USB_STORAGE_ALAUDA) += alauda.o |
22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o | 22 | usb-storage-obj-$(CONFIG_USB_STORAGE_ONETOUCH) += onetouch.o |
23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o | 23 | usb-storage-obj-$(CONFIG_USB_STORAGE_KARMA) += karma.o |
24 | usb-storage-obj-$(CONFIG_USB_STORAGE_SIERRA) += sierra_ms.o | ||
25 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o | 24 | usb-storage-obj-$(CONFIG_USB_STORAGE_CYPRESS_ATACB) += cypress_atacb.o |
26 | 25 | ||
27 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ | 26 | usb-storage-objs := scsiglue.o protocol.o transport.o usb.o \ |
28 | initializers.o $(usb-storage-obj-y) | 27 | initializers.o sierra_ms.o $(usb-storage-obj-y) |
29 | 28 | ||
30 | ifneq ($(CONFIG_USB_LIBUSUAL),) | 29 | ifneq ($(CONFIG_USB_LIBUSUAL),) |
31 | obj-$(CONFIG_USB) += libusual.o | 30 | obj-$(CONFIG_USB) += libusual.o |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index ba412e68d474..cd155475cb6e 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -160,6 +160,13 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592, | |||
160 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 160 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
161 | US_FL_MAX_SECTORS_64 ), | 161 | US_FL_MAX_SECTORS_64 ), |
162 | 162 | ||
163 | /* Reported by Filip Joelsson <filip@blueturtle.nu> */ | ||
164 | UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600, | ||
165 | "Nokia", | ||
166 | "Nokia 3110c", | ||
167 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
168 | US_FL_FIX_CAPACITY ), | ||
169 | |||
163 | /* Reported by Mario Rettig <mariorettig@web.de> */ | 170 | /* Reported by Mario Rettig <mariorettig@web.de> */ |
164 | UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, | 171 | UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, |
165 | "Nokia", | 172 | "Nokia", |
@@ -232,6 +239,20 @@ UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, | |||
232 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 239 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
233 | US_FL_FIX_CAPACITY ), | 240 | US_FL_FIX_CAPACITY ), |
234 | 241 | ||
242 | /* Reported by Richard Nauber <RichardNauber@web.de> */ | ||
243 | UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601, | ||
244 | "Nokia", | ||
245 | "6300", | ||
246 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
247 | US_FL_FIX_CAPACITY ), | ||
248 | |||
249 | /* Patch for Nokia 5310 capacity */ | ||
250 | UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591, | ||
251 | "Nokia", | ||
252 | "5310", | ||
253 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
254 | US_FL_FIX_CAPACITY ), | ||
255 | |||
235 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ | 256 | /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */ |
236 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, | 257 | UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210, |
237 | "SMSC", | 258 | "SMSC", |
@@ -987,6 +1008,13 @@ UNUSUAL_DEV( 0x069b, 0x3004, 0x0001, 0x0001, | |||
987 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1008 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
988 | US_FL_FIX_CAPACITY ), | 1009 | US_FL_FIX_CAPACITY ), |
989 | 1010 | ||
1011 | /* Reported by Adrian Pilchowiec <adi1981@epf.pl> */ | ||
1012 | UNUSUAL_DEV( 0x071b, 0x3203, 0x0000, 0x0000, | ||
1013 | "RockChip", | ||
1014 | "MP3", | ||
1015 | US_SC_DEVICE, US_PR_DEVICE, NULL, | ||
1016 | US_FL_NO_WP_DETECT | US_FL_MAX_SECTORS_64), | ||
1017 | |||
990 | /* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> | 1018 | /* Reported by Massimiliano Ghilardi <massimiliano.ghilardi@gmail.com> |
991 | * This USB MP3/AVI player device fails and disconnects if more than 128 | 1019 | * This USB MP3/AVI player device fails and disconnects if more than 128 |
992 | * sectors (64kB) are read/written in a single command, and may be present | 1020 | * sectors (64kB) are read/written in a single command, and may be present |
@@ -1576,7 +1604,6 @@ UNUSUAL_DEV( 0x10d6, 0x2200, 0x0100, 0x0100, | |||
1576 | US_SC_DEVICE, US_PR_DEVICE, NULL, | 1604 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
1577 | 0), | 1605 | 0), |
1578 | 1606 | ||
1579 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
1580 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> | 1607 | /* Reported by Kevin Lloyd <linux@sierrawireless.com> |
1581 | * Entry is needed for the initializer function override, | 1608 | * Entry is needed for the initializer function override, |
1582 | * which instructs the device to load as a modem | 1609 | * which instructs the device to load as a modem |
@@ -1587,7 +1614,6 @@ UNUSUAL_DEV( 0x1199, 0x0fff, 0x0000, 0x9999, | |||
1587 | "USB MMC Storage", | 1614 | "USB MMC Storage", |
1588 | US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, | 1615 | US_SC_DEVICE, US_PR_DEVICE, sierra_ms_init, |
1589 | 0), | 1616 | 0), |
1590 | #endif | ||
1591 | 1617 | ||
1592 | /* Reported by Jaco Kroon <jaco@kroon.co.za> | 1618 | /* Reported by Jaco Kroon <jaco@kroon.co.za> |
1593 | * The usb-storage module found on the Digitech GNX4 (and supposedly other | 1619 | * The usb-storage module found on the Digitech GNX4 (and supposedly other |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 73679aa506de..27016fd2cad1 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -102,9 +102,7 @@ | |||
102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB | 102 | #ifdef CONFIG_USB_STORAGE_CYPRESS_ATACB |
103 | #include "cypress_atacb.h" | 103 | #include "cypress_atacb.h" |
104 | #endif | 104 | #endif |
105 | #ifdef CONFIG_USB_STORAGE_SIERRA | ||
106 | #include "sierra_ms.h" | 105 | #include "sierra_ms.h" |
107 | #endif | ||
108 | 106 | ||
109 | /* Some informational data */ | 107 | /* Some informational data */ |
110 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); | 108 | MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index c6299e8a041d..9cbff84b787d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) | |||
2400 | 2400 | ||
2401 | if (!fbcon_is_inactive(vc, info)) { | 2401 | if (!fbcon_is_inactive(vc, info)) { |
2402 | if (ops->blank_state != blank) { | 2402 | if (ops->blank_state != blank) { |
2403 | int ret = 1; | ||
2404 | |||
2403 | ops->blank_state = blank; | 2405 | ops->blank_state = blank; |
2404 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); | 2406 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); |
2405 | ops->cursor_flash = (!blank); | 2407 | ops->cursor_flash = (!blank); |
2406 | 2408 | ||
2407 | if (fb_blank(info, blank)) | 2409 | if (info->fbops->fb_blank) |
2410 | ret = info->fbops->fb_blank(blank, info); | ||
2411 | if (ret) | ||
2408 | fbcon_generic_blank(vc, info, blank); | 2412 | fbcon_generic_blank(vc, info, blank); |
2409 | } | 2413 | } |
2410 | 2414 | ||
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h index a6e38e9ea73f..89a346880ec0 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/console/fbcon.h | |||
@@ -110,7 +110,7 @@ static inline int mono_col(const struct fb_info *info) | |||
110 | __u32 max_len; | 110 | __u32 max_len; |
111 | max_len = max(info->var.green.length, info->var.red.length); | 111 | max_len = max(info->var.green.length, info->var.red.length); |
112 | max_len = max(info->var.blue.length, max_len); | 112 | max_len = max(info->var.blue.length, max_len); |
113 | return ~(0xfff << (max_len & 0xff)); | 113 | return (~(0xfff << max_len)) & 0xff; |
114 | } | 114 | } |
115 | 115 | ||
116 | static inline int attr_col_ec(int shift, struct vc_data *vc, | 116 | static inline int attr_col_ec(int shift, struct vc_data *vc, |
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c index 614a5c7017b6..6799a6de66fe 100644 --- a/drivers/watchdog/geodewdt.c +++ b/drivers/watchdog/geodewdt.c | |||
@@ -130,8 +130,8 @@ static ssize_t geodewdt_write(struct file *file, const char __user *data, | |||
130 | return len; | 130 | return len; |
131 | } | 131 | } |
132 | 132 | ||
133 | static int geodewdt_ioctl(struct inode *inode, struct file *file, | 133 | static long geodewdt_ioctl(struct file *file, unsigned int cmd, |
134 | unsigned int cmd, unsigned long arg) | 134 | unsigned long arg) |
135 | { | 135 | { |
136 | void __user *argp = (void __user *)arg; | 136 | void __user *argp = (void __user *)arg; |
137 | int __user *p = argp; | 137 | int __user *p = argp; |
@@ -198,7 +198,7 @@ static const struct file_operations geodewdt_fops = { | |||
198 | .owner = THIS_MODULE, | 198 | .owner = THIS_MODULE, |
199 | .llseek = no_llseek, | 199 | .llseek = no_llseek, |
200 | .write = geodewdt_write, | 200 | .write = geodewdt_write, |
201 | .ioctl = geodewdt_ioctl, | 201 | .unlocked_ioctl = geodewdt_ioctl, |
202 | .open = geodewdt_open, | 202 | .open = geodewdt_open, |
203 | .release = geodewdt_release, | 203 | .release = geodewdt_release, |
204 | }; | 204 | }; |
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c index b82405cfb4cd..89fcefcc8510 100644 --- a/drivers/watchdog/ibmasr.c +++ b/drivers/watchdog/ibmasr.c | |||
@@ -85,7 +85,6 @@ static void __asr_toggle(void) | |||
85 | 85 | ||
86 | outb(reg & ~asr_toggle_mask, asr_write_addr); | 86 | outb(reg & ~asr_toggle_mask, asr_write_addr); |
87 | reg = inb(asr_read_addr); | 87 | reg = inb(asr_read_addr); |
88 | spin_unlock(&asr_lock); | ||
89 | } | 88 | } |
90 | 89 | ||
91 | static void asr_toggle(void) | 90 | static void asr_toggle(void) |
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index 0ed84162437b..6d9f3d4a9987 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c | |||
@@ -173,8 +173,8 @@ static const struct watchdog_info ident = { | |||
173 | .identity = "PNX4008 Watchdog", | 173 | .identity = "PNX4008 Watchdog", |
174 | }; | 174 | }; |
175 | 175 | ||
176 | static long pnx4008_wdt_ioctl(struct inode *inode, struct file *file, | 176 | static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd, |
177 | unsigned int cmd, unsigned long arg) | 177 | unsigned long arg) |
178 | { | 178 | { |
179 | int ret = -ENOTTY; | 179 | int ret = -ENOTTY; |
180 | int time; | 180 | int time; |
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index 6756bcb009ed..c9c73b69c5e5 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c | |||
@@ -182,8 +182,8 @@ static ssize_t rc32434_wdt_write(struct file *file, const char *data, | |||
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
184 | 184 | ||
185 | static int rc32434_wdt_ioctl(struct inode *inode, struct file *file, | 185 | static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd, |
186 | unsigned int cmd, unsigned long arg) | 186 | unsigned long arg) |
187 | { | 187 | { |
188 | void __user *argp = (void __user *)arg; | 188 | void __user *argp = (void __user *)arg; |
189 | int new_timeout; | 189 | int new_timeout; |
@@ -242,7 +242,7 @@ static struct file_operations rc32434_wdt_fops = { | |||
242 | .owner = THIS_MODULE, | 242 | .owner = THIS_MODULE, |
243 | .llseek = no_llseek, | 243 | .llseek = no_llseek, |
244 | .write = rc32434_wdt_write, | 244 | .write = rc32434_wdt_write, |
245 | .ioctl = rc32434_wdt_ioctl, | 245 | .unlocked_ioctl = rc32434_wdt_ioctl, |
246 | .open = rc32434_wdt_open, | 246 | .open = rc32434_wdt_open, |
247 | .release = rc32434_wdt_release, | 247 | .release = rc32434_wdt_release, |
248 | }; | 248 | }; |
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 9108efa73e7d..bf92802f2bbe 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c | |||
@@ -144,8 +144,8 @@ static int rdc321x_wdt_release(struct inode *inode, struct file *file) | |||
144 | return 0; | 144 | return 0; |
145 | } | 145 | } |
146 | 146 | ||
147 | static int rdc321x_wdt_ioctl(struct inode *inode, struct file *file, | 147 | static long rdc321x_wdt_ioctl(struct file *file, unsigned int cmd, |
148 | unsigned int cmd, unsigned long arg) | 148 | unsigned long arg) |
149 | { | 149 | { |
150 | void __user *argp = (void __user *)arg; | 150 | void __user *argp = (void __user *)arg; |
151 | unsigned int value; | 151 | unsigned int value; |
@@ -204,7 +204,7 @@ static ssize_t rdc321x_wdt_write(struct file *file, const char __user *buf, | |||
204 | static const struct file_operations rdc321x_wdt_fops = { | 204 | static const struct file_operations rdc321x_wdt_fops = { |
205 | .owner = THIS_MODULE, | 205 | .owner = THIS_MODULE, |
206 | .llseek = no_llseek, | 206 | .llseek = no_llseek, |
207 | .ioctl = rdc321x_wdt_ioctl, | 207 | .unlocked_ioctl = rdc321x_wdt_ioctl, |
208 | .open = rdc321x_wdt_open, | 208 | .open = rdc321x_wdt_open, |
209 | .write = rdc321x_wdt_write, | 209 | .write = rdc321x_wdt_write, |
210 | .release = rdc321x_wdt_release, | 210 | .release = rdc321x_wdt_release, |
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index db362c34958b..191ea6302107 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c | |||
@@ -115,8 +115,8 @@ static int watchdog_release(struct inode *inode, struct file *file) | |||
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
117 | 117 | ||
118 | static ssize_t watchdog_write(struct file *file, const char *data, | 118 | static ssize_t watchdog_write(struct file *file, const char __user *data, |
119 | size_t len, loff_t *ppos) | 119 | size_t len, loff_t *ppos) |
120 | { | 120 | { |
121 | /* | 121 | /* |
122 | * Refresh the timer. | 122 | * Refresh the timer. |
@@ -133,21 +133,22 @@ static const struct watchdog_info ident = { | |||
133 | }; | 133 | }; |
134 | 134 | ||
135 | static long watchdog_ioctl(struct file *file, unsigned int cmd, | 135 | static long watchdog_ioctl(struct file *file, unsigned int cmd, |
136 | unsigned long arg) | 136 | unsigned long arg) |
137 | { | 137 | { |
138 | unsigned int new_margin; | 138 | unsigned int new_margin; |
139 | int __user *int_arg = (int __user *)arg; | ||
139 | int ret = -ENOTTY; | 140 | int ret = -ENOTTY; |
140 | 141 | ||
141 | switch (cmd) { | 142 | switch (cmd) { |
142 | case WDIOC_GETSUPPORT: | 143 | case WDIOC_GETSUPPORT: |
143 | ret = 0; | 144 | ret = 0; |
144 | if (copy_to_user((void *)arg, &ident, sizeof(ident))) | 145 | if (copy_to_user((void __user *)arg, &ident, sizeof(ident))) |
145 | ret = -EFAULT; | 146 | ret = -EFAULT; |
146 | break; | 147 | break; |
147 | 148 | ||
148 | case WDIOC_GETSTATUS: | 149 | case WDIOC_GETSTATUS: |
149 | case WDIOC_GETBOOTSTATUS: | 150 | case WDIOC_GETBOOTSTATUS: |
150 | ret = put_user(0, (int *)arg); | 151 | ret = put_user(0, int_arg); |
151 | break; | 152 | break; |
152 | 153 | ||
153 | case WDIOC_KEEPALIVE: | 154 | case WDIOC_KEEPALIVE: |
@@ -156,7 +157,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, | |||
156 | break; | 157 | break; |
157 | 158 | ||
158 | case WDIOC_SETTIMEOUT: | 159 | case WDIOC_SETTIMEOUT: |
159 | ret = get_user(new_margin, (int *)arg); | 160 | ret = get_user(new_margin, int_arg); |
160 | if (ret) | 161 | if (ret) |
161 | break; | 162 | break; |
162 | 163 | ||
@@ -171,7 +172,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, | |||
171 | watchdog_ping(); | 172 | watchdog_ping(); |
172 | /* Fall */ | 173 | /* Fall */ |
173 | case WDIOC_GETTIMEOUT: | 174 | case WDIOC_GETTIMEOUT: |
174 | ret = put_user(soft_margin, (int *)arg); | 175 | ret = put_user(soft_margin, int_arg); |
175 | break; | 176 | break; |
176 | } | 177 | } |
177 | return ret; | 178 | return ret; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index c95295c65045..e83aa5ebe861 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -626,8 +626,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
626 | return NULL; | 626 | return NULL; |
627 | 627 | ||
628 | error: | 628 | error: |
629 | if (fid) | 629 | p9_client_clunk(fid); |
630 | p9_client_clunk(fid); | ||
631 | 630 | ||
632 | return ERR_PTR(result); | 631 | return ERR_PTR(result); |
633 | } | 632 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 80e93956aced..e7a1a99b7464 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1395,6 +1395,10 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1395 | if (dentry->d_parent != parent) | 1395 | if (dentry->d_parent != parent) |
1396 | goto next; | 1396 | goto next; |
1397 | 1397 | ||
1398 | /* non-existing due to RCU? */ | ||
1399 | if (d_unhashed(dentry)) | ||
1400 | goto next; | ||
1401 | |||
1398 | /* | 1402 | /* |
1399 | * It is safe to compare names since d_move() cannot | 1403 | * It is safe to compare names since d_move() cannot |
1400 | * change the qstr (protected by d_lock). | 1404 | * change the qstr (protected by d_lock). |
@@ -1410,10 +1414,8 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) | |||
1410 | goto next; | 1414 | goto next; |
1411 | } | 1415 | } |
1412 | 1416 | ||
1413 | if (!d_unhashed(dentry)) { | 1417 | atomic_inc(&dentry->d_count); |
1414 | atomic_inc(&dentry->d_count); | 1418 | found = dentry; |
1415 | found = dentry; | ||
1416 | } | ||
1417 | spin_unlock(&dentry->d_lock); | 1419 | spin_unlock(&dentry->d_lock); |
1418 | break; | 1420 | break; |
1419 | next: | 1421 | next: |
@@ -752,11 +752,11 @@ static int exec_mmap(struct mm_struct *mm) | |||
752 | tsk->active_mm = mm; | 752 | tsk->active_mm = mm; |
753 | activate_mm(active_mm, mm); | 753 | activate_mm(active_mm, mm); |
754 | task_unlock(tsk); | 754 | task_unlock(tsk); |
755 | mm_update_next_owner(old_mm); | ||
756 | arch_pick_mmap_layout(mm); | 755 | arch_pick_mmap_layout(mm); |
757 | if (old_mm) { | 756 | if (old_mm) { |
758 | up_read(&old_mm->mmap_sem); | 757 | up_read(&old_mm->mmap_sem); |
759 | BUG_ON(active_mm != old_mm); | 758 | BUG_ON(active_mm != old_mm); |
759 | mm_update_next_owner(old_mm); | ||
760 | mmput(old_mm); | 760 | mmput(old_mm); |
761 | return 0; | 761 | return 0; |
762 | } | 762 | } |
diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 60249429a253..d85c7d931cdf 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c | |||
@@ -323,7 +323,7 @@ out: | |||
323 | } | 323 | } |
324 | 324 | ||
325 | /* | 325 | /* |
326 | * remove_kevent - cleans up and ultimately frees the given kevent | 326 | * remove_kevent - cleans up the given kevent |
327 | * | 327 | * |
328 | * Caller must hold dev->ev_mutex. | 328 | * Caller must hold dev->ev_mutex. |
329 | */ | 329 | */ |
@@ -334,7 +334,13 @@ static void remove_kevent(struct inotify_device *dev, | |||
334 | 334 | ||
335 | dev->event_count--; | 335 | dev->event_count--; |
336 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | 336 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; |
337 | } | ||
337 | 338 | ||
339 | /* | ||
340 | * free_kevent - frees the given kevent. | ||
341 | */ | ||
342 | static void free_kevent(struct inotify_kernel_event *kevent) | ||
343 | { | ||
338 | kfree(kevent->name); | 344 | kfree(kevent->name); |
339 | kmem_cache_free(event_cachep, kevent); | 345 | kmem_cache_free(event_cachep, kevent); |
340 | } | 346 | } |
@@ -350,6 +356,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev) | |||
350 | struct inotify_kernel_event *kevent; | 356 | struct inotify_kernel_event *kevent; |
351 | kevent = inotify_dev_get_event(dev); | 357 | kevent = inotify_dev_get_event(dev); |
352 | remove_kevent(dev, kevent); | 358 | remove_kevent(dev, kevent); |
359 | free_kevent(kevent); | ||
353 | } | 360 | } |
354 | } | 361 | } |
355 | 362 | ||
@@ -433,17 +440,15 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
433 | dev = file->private_data; | 440 | dev = file->private_data; |
434 | 441 | ||
435 | while (1) { | 442 | while (1) { |
436 | int events; | ||
437 | 443 | ||
438 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | 444 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); |
439 | 445 | ||
440 | mutex_lock(&dev->ev_mutex); | 446 | mutex_lock(&dev->ev_mutex); |
441 | events = !list_empty(&dev->events); | 447 | if (!list_empty(&dev->events)) { |
442 | mutex_unlock(&dev->ev_mutex); | ||
443 | if (events) { | ||
444 | ret = 0; | 448 | ret = 0; |
445 | break; | 449 | break; |
446 | } | 450 | } |
451 | mutex_unlock(&dev->ev_mutex); | ||
447 | 452 | ||
448 | if (file->f_flags & O_NONBLOCK) { | 453 | if (file->f_flags & O_NONBLOCK) { |
449 | ret = -EAGAIN; | 454 | ret = -EAGAIN; |
@@ -462,7 +467,6 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
462 | if (ret) | 467 | if (ret) |
463 | return ret; | 468 | return ret; |
464 | 469 | ||
465 | mutex_lock(&dev->ev_mutex); | ||
466 | while (1) { | 470 | while (1) { |
467 | struct inotify_kernel_event *kevent; | 471 | struct inotify_kernel_event *kevent; |
468 | 472 | ||
@@ -481,6 +485,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
481 | } | 485 | } |
482 | break; | 486 | break; |
483 | } | 487 | } |
488 | remove_kevent(dev, kevent); | ||
489 | |||
490 | /* | ||
491 | * Must perform the copy_to_user outside the mutex in order | ||
492 | * to avoid a lock order reversal with mmap_sem. | ||
493 | */ | ||
494 | mutex_unlock(&dev->ev_mutex); | ||
484 | 495 | ||
485 | if (copy_to_user(buf, &kevent->event, event_size)) { | 496 | if (copy_to_user(buf, &kevent->event, event_size)) { |
486 | ret = -EFAULT; | 497 | ret = -EFAULT; |
@@ -498,7 +509,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf, | |||
498 | count -= kevent->event.len; | 509 | count -= kevent->event.len; |
499 | } | 510 | } |
500 | 511 | ||
501 | remove_kevent(dev, kevent); | 512 | free_kevent(kevent); |
513 | |||
514 | mutex_lock(&dev->ev_mutex); | ||
502 | } | 515 | } |
503 | mutex_unlock(&dev->ev_mutex); | 516 | mutex_unlock(&dev->ev_mutex); |
504 | 517 | ||
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index 52312ec93ff4..5145cb9125af 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c | |||
@@ -58,7 +58,7 @@ const struct inode_operations ramfs_file_inode_operations = { | |||
58 | * size 0 on the assumption that it's going to be used for an mmap of shared | 58 | * size 0 on the assumption that it's going to be used for an mmap of shared |
59 | * memory | 59 | * memory |
60 | */ | 60 | */ |
61 | static int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) | 61 | int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) |
62 | { | 62 | { |
63 | struct pagevec lru_pvec; | 63 | struct pagevec lru_pvec; |
64 | unsigned long npages, xpages, loop, limit; | 64 | unsigned long npages, xpages, loop, limit; |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index b9cb77473758..d7f7645779f2 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -538,7 +538,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) | |||
538 | printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n); | 538 | printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n); |
539 | for (i = 0; i < n; i++) | 539 | for (i = 0; i < n; i++) |
540 | printk(KERN_DEBUG "\t ino %llu\n", | 540 | printk(KERN_DEBUG "\t ino %llu\n", |
541 | le64_to_cpu(orph->inos[i])); | 541 | (unsigned long long)le64_to_cpu(orph->inos[i])); |
542 | break; | 542 | break; |
543 | } | 543 | } |
544 | default: | 544 | default: |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 2b267c9a1806..526c01ec8003 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
@@ -426,7 +426,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) | |||
426 | 426 | ||
427 | while (1) { | 427 | while (1) { |
428 | dbg_gen("feed '%s', ino %llu, new f_pos %#x", | 428 | dbg_gen("feed '%s', ino %llu, new f_pos %#x", |
429 | dent->name, le64_to_cpu(dent->inum), | 429 | dent->name, (unsigned long long)le64_to_cpu(dent->inum), |
430 | key_hash_flash(c, &dent->key)); | 430 | key_hash_flash(c, &dent->key)); |
431 | ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); | 431 | ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); |
432 | 432 | ||
diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c index e045c8b55423..47814cde2407 100644 --- a/fs/ubifs/find.c +++ b/fs/ubifs/find.c | |||
@@ -507,7 +507,6 @@ int ubifs_find_free_space(struct ubifs_info *c, int min_space, int *free, | |||
507 | rsvd_idx_lebs = 0; | 507 | rsvd_idx_lebs = 0; |
508 | lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - | 508 | lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt - |
509 | c->lst.taken_empty_lebs; | 509 | c->lst.taken_empty_lebs; |
510 | ubifs_assert(lebs + c->lst.idx_lebs >= c->min_idx_lebs); | ||
511 | if (rsvd_idx_lebs < lebs) | 510 | if (rsvd_idx_lebs < lebs) |
512 | /* | 511 | /* |
513 | * OK to allocate an empty LEB, but we still don't want to go | 512 | * OK to allocate an empty LEB, but we still don't want to go |
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c index 13f1019c859f..02aba36fe3d4 100644 --- a/fs/ubifs/gc.c +++ b/fs/ubifs/gc.c | |||
@@ -334,15 +334,15 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) | |||
334 | 334 | ||
335 | err = move_nodes(c, sleb); | 335 | err = move_nodes(c, sleb); |
336 | if (err) | 336 | if (err) |
337 | goto out; | 337 | goto out_inc_seq; |
338 | 338 | ||
339 | err = gc_sync_wbufs(c); | 339 | err = gc_sync_wbufs(c); |
340 | if (err) | 340 | if (err) |
341 | goto out; | 341 | goto out_inc_seq; |
342 | 342 | ||
343 | err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); | 343 | err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); |
344 | if (err) | 344 | if (err) |
345 | goto out; | 345 | goto out_inc_seq; |
346 | 346 | ||
347 | /* Allow for races with TNC */ | 347 | /* Allow for races with TNC */ |
348 | c->gced_lnum = lnum; | 348 | c->gced_lnum = lnum; |
@@ -369,6 +369,14 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) | |||
369 | out: | 369 | out: |
370 | ubifs_scan_destroy(sleb); | 370 | ubifs_scan_destroy(sleb); |
371 | return err; | 371 | return err; |
372 | |||
373 | out_inc_seq: | ||
374 | /* We may have moved at least some nodes so allow for races with TNC */ | ||
375 | c->gced_lnum = lnum; | ||
376 | smp_wmb(); | ||
377 | c->gc_seq += 1; | ||
378 | smp_wmb(); | ||
379 | goto out; | ||
372 | } | 380 | } |
373 | 381 | ||
374 | /** | 382 | /** |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7562464ac83f..3f4902060c7a 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1024,14 +1024,13 @@ static int mount_ubifs(struct ubifs_info *c) | |||
1024 | goto out_dereg; | 1024 | goto out_dereg; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id); | ||
1027 | if (!mounted_read_only) { | 1028 | if (!mounted_read_only) { |
1028 | err = alloc_wbufs(c); | 1029 | err = alloc_wbufs(c); |
1029 | if (err) | 1030 | if (err) |
1030 | goto out_cbuf; | 1031 | goto out_cbuf; |
1031 | 1032 | ||
1032 | /* Create background thread */ | 1033 | /* Create background thread */ |
1033 | sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, | ||
1034 | c->vi.vol_id); | ||
1035 | c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name); | 1034 | c->bgt = kthread_create(ubifs_bg_thread, c, c->bgt_name); |
1036 | if (!c->bgt) | 1035 | if (!c->bgt) |
1037 | c->bgt = ERR_PTR(-EINVAL); | 1036 | c->bgt = ERR_PTR(-EINVAL); |
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 7da209ab9378..7634c5970887 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c | |||
@@ -1476,7 +1476,7 @@ again: | |||
1476 | } | 1476 | } |
1477 | 1477 | ||
1478 | err = fallible_read_node(c, key, &zbr, node); | 1478 | err = fallible_read_node(c, key, &zbr, node); |
1479 | if (maybe_leb_gced(c, zbr.lnum, gc_seq1)) { | 1479 | if (err <= 0 || maybe_leb_gced(c, zbr.lnum, gc_seq1)) { |
1480 | /* | 1480 | /* |
1481 | * The node may have been GC'ed out from under us so try again | 1481 | * The node may have been GC'ed out from under us so try again |
1482 | * while keeping the TNC mutex locked. | 1482 | * while keeping the TNC mutex locked. |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 00e80df9dd9d..dbd9cef852ec 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -4118,7 +4118,7 @@ xfs_iext_indirect_to_direct( | |||
4118 | ASSERT(nextents <= XFS_LINEAR_EXTS); | 4118 | ASSERT(nextents <= XFS_LINEAR_EXTS); |
4119 | size = nextents * sizeof(xfs_bmbt_rec_t); | 4119 | size = nextents * sizeof(xfs_bmbt_rec_t); |
4120 | 4120 | ||
4121 | xfs_iext_irec_compact_full(ifp); | 4121 | xfs_iext_irec_compact_pages(ifp); |
4122 | ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); | 4122 | ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ); |
4123 | 4123 | ||
4124 | ep = ifp->if_u1.if_ext_irec->er_extbuf; | 4124 | ep = ifp->if_u1.if_ext_irec->er_extbuf; |
@@ -4449,8 +4449,7 @@ xfs_iext_irec_remove( | |||
4449 | * compaction policy is as follows: | 4449 | * compaction policy is as follows: |
4450 | * | 4450 | * |
4451 | * Full Compaction: Extents fit into a single page (or inline buffer) | 4451 | * Full Compaction: Extents fit into a single page (or inline buffer) |
4452 | * Full Compaction: Extents occupy less than 10% of allocated space | 4452 | * Partial Compaction: Extents occupy less than 50% of allocated space |
4453 | * Partial Compaction: Extents occupy > 10% and < 50% of allocated space | ||
4454 | * No Compaction: Extents occupy at least 50% of allocated space | 4453 | * No Compaction: Extents occupy at least 50% of allocated space |
4455 | */ | 4454 | */ |
4456 | void | 4455 | void |
@@ -4471,8 +4470,6 @@ xfs_iext_irec_compact( | |||
4471 | xfs_iext_direct_to_inline(ifp, nextents); | 4470 | xfs_iext_direct_to_inline(ifp, nextents); |
4472 | } else if (nextents <= XFS_LINEAR_EXTS) { | 4471 | } else if (nextents <= XFS_LINEAR_EXTS) { |
4473 | xfs_iext_indirect_to_direct(ifp); | 4472 | xfs_iext_indirect_to_direct(ifp); |
4474 | } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) { | ||
4475 | xfs_iext_irec_compact_full(ifp); | ||
4476 | } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { | 4473 | } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) { |
4477 | xfs_iext_irec_compact_pages(ifp); | 4474 | xfs_iext_irec_compact_pages(ifp); |
4478 | } | 4475 | } |
@@ -4496,7 +4493,7 @@ xfs_iext_irec_compact_pages( | |||
4496 | erp_next = erp + 1; | 4493 | erp_next = erp + 1; |
4497 | if (erp_next->er_extcount <= | 4494 | if (erp_next->er_extcount <= |
4498 | (XFS_LINEAR_EXTS - erp->er_extcount)) { | 4495 | (XFS_LINEAR_EXTS - erp->er_extcount)) { |
4499 | memmove(&erp->er_extbuf[erp->er_extcount], | 4496 | memcpy(&erp->er_extbuf[erp->er_extcount], |
4500 | erp_next->er_extbuf, erp_next->er_extcount * | 4497 | erp_next->er_extbuf, erp_next->er_extcount * |
4501 | sizeof(xfs_bmbt_rec_t)); | 4498 | sizeof(xfs_bmbt_rec_t)); |
4502 | erp->er_extcount += erp_next->er_extcount; | 4499 | erp->er_extcount += erp_next->er_extcount; |
@@ -4516,91 +4513,6 @@ xfs_iext_irec_compact_pages( | |||
4516 | } | 4513 | } |
4517 | 4514 | ||
4518 | /* | 4515 | /* |
4519 | * Fully compact the extent records managed by the indirection array. | ||
4520 | */ | ||
4521 | void | ||
4522 | xfs_iext_irec_compact_full( | ||
4523 | xfs_ifork_t *ifp) /* inode fork pointer */ | ||
4524 | { | ||
4525 | xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */ | ||
4526 | xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */ | ||
4527 | int erp_idx = 0; /* extent irec index */ | ||
4528 | int ext_avail; /* empty entries in ex list */ | ||
4529 | int ext_diff; /* number of exts to add */ | ||
4530 | int nlists; /* number of irec's (ex lists) */ | ||
4531 | |||
4532 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); | ||
4533 | |||
4534 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; | ||
4535 | erp = ifp->if_u1.if_ext_irec; | ||
4536 | ep = &erp->er_extbuf[erp->er_extcount]; | ||
4537 | erp_next = erp + 1; | ||
4538 | ep_next = erp_next->er_extbuf; | ||
4539 | |||
4540 | while (erp_idx < nlists - 1) { | ||
4541 | /* | ||
4542 | * Check how many extent records are available in this irec. | ||
4543 | * If there is none skip the whole exercise. | ||
4544 | */ | ||
4545 | ext_avail = XFS_LINEAR_EXTS - erp->er_extcount; | ||
4546 | if (ext_avail) { | ||
4547 | |||
4548 | /* | ||
4549 | * Copy over as many as possible extent records into | ||
4550 | * the previous page. | ||
4551 | */ | ||
4552 | ext_diff = MIN(ext_avail, erp_next->er_extcount); | ||
4553 | memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t)); | ||
4554 | erp->er_extcount += ext_diff; | ||
4555 | erp_next->er_extcount -= ext_diff; | ||
4556 | |||
4557 | /* | ||
4558 | * If the next irec is empty now we can simply | ||
4559 | * remove it. | ||
4560 | */ | ||
4561 | if (erp_next->er_extcount == 0) { | ||
4562 | /* | ||
4563 | * Free page before removing extent record | ||
4564 | * so er_extoffs don't get modified in | ||
4565 | * xfs_iext_irec_remove. | ||
4566 | */ | ||
4567 | kmem_free(erp_next->er_extbuf); | ||
4568 | erp_next->er_extbuf = NULL; | ||
4569 | xfs_iext_irec_remove(ifp, erp_idx + 1); | ||
4570 | erp = &ifp->if_u1.if_ext_irec[erp_idx]; | ||
4571 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; | ||
4572 | |||
4573 | /* | ||
4574 | * If the next irec is not empty move up the content | ||
4575 | * that has not been copied to the previous page to | ||
4576 | * the beggining of this one. | ||
4577 | */ | ||
4578 | } else { | ||
4579 | memmove(erp_next->er_extbuf, &ep_next[ext_diff], | ||
4580 | erp_next->er_extcount * | ||
4581 | sizeof(xfs_bmbt_rec_t)); | ||
4582 | ep_next = erp_next->er_extbuf; | ||
4583 | memset(&ep_next[erp_next->er_extcount], 0, | ||
4584 | (XFS_LINEAR_EXTS - | ||
4585 | erp_next->er_extcount) * | ||
4586 | sizeof(xfs_bmbt_rec_t)); | ||
4587 | } | ||
4588 | } | ||
4589 | |||
4590 | if (erp->er_extcount == XFS_LINEAR_EXTS) { | ||
4591 | erp_idx++; | ||
4592 | if (erp_idx < nlists) | ||
4593 | erp = &ifp->if_u1.if_ext_irec[erp_idx]; | ||
4594 | else | ||
4595 | break; | ||
4596 | } | ||
4597 | ep = &erp->er_extbuf[erp->er_extcount]; | ||
4598 | erp_next = erp + 1; | ||
4599 | ep_next = erp_next->er_extbuf; | ||
4600 | } | ||
4601 | } | ||
4602 | |||
4603 | /* | ||
4604 | * This is called to update the er_extoff field in the indirection | 4516 | * This is called to update the er_extoff field in the indirection |
4605 | * array when extents have been added or removed from one of the | 4517 | * array when extents have been added or removed from one of the |
4606 | * extent lists. erp_idx contains the irec index to begin updating | 4518 | * extent lists. erp_idx contains the irec index to begin updating |
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h new file mode 100644 index 000000000000..fa4328f9124f --- /dev/null +++ b/include/asm-mips/cevt-r4k.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2008 Kevin D. Kissell | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Definitions used for common event timer implementation | ||
11 | * for MIPS 4K-type processors and their MIPS MT variants. | ||
12 | * Avoids unsightly extern declarations in C files. | ||
13 | */ | ||
14 | #ifndef __ASM_CEVT_R4K_H | ||
15 | #define __ASM_CEVT_R4K_H | ||
16 | |||
17 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||
18 | |||
19 | void mips_event_handler(struct clock_event_device *dev); | ||
20 | int c0_compare_int_usable(void); | ||
21 | void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); | ||
22 | irqreturn_t c0_compare_interrupt(int, void *); | ||
23 | |||
24 | extern struct irqaction c0_compare_irqaction; | ||
25 | extern int cp0_timer_irq_installed; | ||
26 | |||
27 | /* | ||
28 | * Possibly handle a performance counter interrupt. | ||
29 | * Return true if the timer interrupt should not be checked | ||
30 | */ | ||
31 | |||
32 | static inline int handle_perf_irq(int r2) | ||
33 | { | ||
34 | /* | ||
35 | * The performance counter overflow interrupt may be shared with the | ||
36 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
37 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
38 | * and we can't reliably determine if a counter interrupt has also | ||
39 | * happened (!r2) then don't check for a timer interrupt. | ||
40 | */ | ||
41 | return (cp0_perfcount_irq < 0) && | ||
42 | perf_irq() == IRQ_HANDLED && | ||
43 | !r2; | ||
44 | } | ||
45 | |||
46 | #endif /* __ASM_CEVT_R4K_H */ | ||
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 881e8866501d..701ec0ba8fa9 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h | |||
@@ -38,8 +38,17 @@ __asm__( | |||
38 | " .set pop \n" | 38 | " .set pop \n" |
39 | " .endm"); | 39 | " .endm"); |
40 | 40 | ||
41 | extern void smtc_ipi_replay(void); | ||
42 | |||
41 | static inline void raw_local_irq_enable(void) | 43 | static inline void raw_local_irq_enable(void) |
42 | { | 44 | { |
45 | #ifdef CONFIG_MIPS_MT_SMTC | ||
46 | /* | ||
47 | * SMTC kernel needs to do a software replay of queued | ||
48 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
49 | */ | ||
50 | smtc_ipi_replay(); | ||
51 | #endif | ||
43 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
44 | "raw_local_irq_enable" | 53 | "raw_local_irq_enable" |
45 | : /* no outputs */ | 54 | : /* no outputs */ |
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) | |||
47 | : "memory"); | 56 | : "memory"); |
48 | } | 57 | } |
49 | 58 | ||
59 | |||
50 | /* | 60 | /* |
51 | * For cli() we have to insert nops to make sure that the new value | 61 | * For cli() we have to insert nops to make sure that the new value |
52 | * has actually arrived in the status register before the end of this | 62 | * has actually arrived in the status register before the end of this |
@@ -185,15 +195,14 @@ __asm__( | |||
185 | " .set pop \n" | 195 | " .set pop \n" |
186 | " .endm \n"); | 196 | " .endm \n"); |
187 | 197 | ||
188 | extern void smtc_ipi_replay(void); | ||
189 | 198 | ||
190 | static inline void raw_local_irq_restore(unsigned long flags) | 199 | static inline void raw_local_irq_restore(unsigned long flags) |
191 | { | 200 | { |
192 | unsigned long __tmp1; | 201 | unsigned long __tmp1; |
193 | 202 | ||
194 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | 203 | #ifdef CONFIG_MIPS_MT_SMTC |
195 | /* | 204 | /* |
196 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred | 205 | * SMTC kernel needs to do a software replay of queued |
197 | * IPIs, at the cost of branch and call overhead on each | 206 | * IPIs, at the cost of branch and call overhead on each |
198 | * local_irq_restore() | 207 | * local_irq_restore() |
199 | */ | 208 | */ |
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) | |||
208 | : "memory"); | 217 | : "memory"); |
209 | } | 218 | } |
210 | 219 | ||
220 | static inline void __raw_local_irq_restore(unsigned long flags) | ||
221 | { | ||
222 | unsigned long __tmp1; | ||
223 | |||
224 | __asm__ __volatile__( | ||
225 | "raw_local_irq_restore\t%0" | ||
226 | : "=r" (__tmp1) | ||
227 | : "0" (flags) | ||
228 | : "memory"); | ||
229 | } | ||
230 | |||
211 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 231 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
212 | { | 232 | { |
213 | #ifdef CONFIG_MIPS_MT_SMTC | 233 | #ifdef CONFIG_MIPS_MT_SMTC |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index a46f8e258e6b..979866000da4 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ | |||
1462 | { \ | 1462 | { \ |
1463 | unsigned int res; \ | 1463 | unsigned int res; \ |
1464 | unsigned int omt; \ | 1464 | unsigned int omt; \ |
1465 | unsigned int flags; \ | 1465 | unsigned long flags; \ |
1466 | \ | 1466 | \ |
1467 | local_irq_save(flags); \ | 1467 | local_irq_save(flags); \ |
1468 | omt = __dmt(); \ | 1468 | omt = __dmt(); \ |
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ | |||
1480 | { \ | 1480 | { \ |
1481 | unsigned int res; \ | 1481 | unsigned int res; \ |
1482 | unsigned int omt; \ | 1482 | unsigned int omt; \ |
1483 | unsigned int flags; \ | 1483 | unsigned long flags; \ |
1484 | \ | 1484 | \ |
1485 | local_irq_save(flags); \ | 1485 | local_irq_save(flags); \ |
1486 | omt = __dmt(); \ | 1486 | omt = __dmt(); \ |
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ | |||
1498 | { \ | 1498 | { \ |
1499 | unsigned int res; \ | 1499 | unsigned int res; \ |
1500 | unsigned int omt; \ | 1500 | unsigned int omt; \ |
1501 | unsigned int flags; \ | 1501 | unsigned long flags; \ |
1502 | \ | 1502 | \ |
1503 | local_irq_save(flags); \ | 1503 | local_irq_save(flags); \ |
1504 | \ | 1504 | \ |
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h index 4396e9ffd418..55813d6150c7 100644 --- a/include/asm-mips/pgtable-32.h +++ b/include/asm-mips/pgtable-32.h | |||
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
57 | #define PMD_ORDER 1 | 57 | #define PMD_ORDER 1 |
58 | #define PTE_ORDER 0 | 58 | #define PTE_ORDER 0 |
59 | 59 | ||
60 | #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) | 60 | #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) |
61 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) | 61 | #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
62 | 62 | ||
63 | #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) | 63 | #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index 3639b28f80db..ea60bf08dcb0 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <asm/mips_mt.h> | 8 | #include <asm/mips_mt.h> |
9 | #include <asm/smtc_ipi.h> | ||
9 | 10 | ||
10 | /* | 11 | /* |
11 | * System-wide SMTC status information | 12 | * System-wide SMTC status information |
@@ -38,14 +39,15 @@ struct mm_struct; | |||
38 | struct task_struct; | 39 | struct task_struct; |
39 | 40 | ||
40 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | 41 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); |
41 | 42 | void self_ipi(struct smtc_ipi *); | |
42 | void smtc_flush_tlb_asid(unsigned long asid); | 43 | void smtc_flush_tlb_asid(unsigned long asid); |
43 | extern int mipsmt_build_cpu_map(int startslot); | 44 | extern int smtc_build_cpu_map(int startslot); |
44 | extern void mipsmt_prepare_cpus(void); | 45 | extern void smtc_prepare_cpus(int cpus); |
45 | extern void smtc_smp_finish(void); | 46 | extern void smtc_smp_finish(void); |
46 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | 47 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); |
47 | extern void smtc_cpus_done(void); | 48 | extern void smtc_cpus_done(void); |
48 | 49 | ||
50 | |||
49 | /* | 51 | /* |
50 | * Sharing the TLB between multiple VPEs means that the | 52 | * Sharing the TLB between multiple VPEs means that the |
51 | * "random" index selection function is not allowed to | 53 | * "random" index selection function is not allowed to |
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 051e1af0bb95..4c37c4e5f72e 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -297,14 +297,31 @@ | |||
297 | #ifdef CONFIG_MIPS_MT_SMTC | 297 | #ifdef CONFIG_MIPS_MT_SMTC |
298 | .set mips32r2 | 298 | .set mips32r2 |
299 | /* | 299 | /* |
300 | * This may not really be necessary if ints are already | 300 | * We need to make sure the read-modify-write |
301 | * inhibited here. | 301 | * of Status below isn't perturbed by an interrupt |
302 | * or cross-TC access, so we need to do at least a DMT, | ||
303 | * protected by an interrupt-inhibit. But setting IXMT | ||
304 | * also creates a few-cycle window where an IPI could | ||
305 | * be queued and not be detected before potentially | ||
306 | * returning to a WAIT or user-mode loop. It must be | ||
307 | * replayed. | ||
308 | * | ||
309 | * We're in the middle of a context switch, and | ||
310 | * we can't dispatch it directly without trashing | ||
311 | * some registers, so we'll try to detect this unlikely | ||
312 | * case and program a software interrupt in the VPE, | ||
313 | * as would be done for a cross-VPE IPI. To accomodate | ||
314 | * the handling of that case, we're doing a DVPE instead | ||
315 | * of just a DMT here to protect against other threads. | ||
316 | * This is a lot of cruft to cover a tiny window. | ||
317 | * If you can find a better design, implement it! | ||
318 | * | ||
302 | */ | 319 | */ |
303 | mfc0 v0, CP0_TCSTATUS | 320 | mfc0 v0, CP0_TCSTATUS |
304 | ori v0, TCSTATUS_IXMT | 321 | ori v0, TCSTATUS_IXMT |
305 | mtc0 v0, CP0_TCSTATUS | 322 | mtc0 v0, CP0_TCSTATUS |
306 | _ehb | 323 | _ehb |
307 | DMT 5 # dmt a1 | 324 | DVPE 5 # dvpe a1 |
308 | jal mips_ihb | 325 | jal mips_ihb |
309 | #endif /* CONFIG_MIPS_MT_SMTC */ | 326 | #endif /* CONFIG_MIPS_MT_SMTC */ |
310 | mfc0 a0, CP0_STATUS | 327 | mfc0 a0, CP0_STATUS |
@@ -325,17 +342,50 @@ | |||
325 | */ | 342 | */ |
326 | LONG_L v1, PT_TCSTATUS(sp) | 343 | LONG_L v1, PT_TCSTATUS(sp) |
327 | _ehb | 344 | _ehb |
328 | mfc0 v0, CP0_TCSTATUS | 345 | mfc0 a0, CP0_TCSTATUS |
329 | andi v1, TCSTATUS_IXMT | 346 | andi v1, TCSTATUS_IXMT |
330 | /* We know that TCStatua.IXMT should be set from above */ | 347 | bnez v1, 0f |
331 | xori v0, v0, TCSTATUS_IXMT | 348 | |
332 | or v0, v0, v1 | 349 | /* |
333 | mtc0 v0, CP0_TCSTATUS | 350 | * We'd like to detect any IPIs queued in the tiny window |
334 | _ehb | 351 | * above and request an software interrupt to service them |
335 | andi a1, a1, VPECONTROL_TE | 352 | * when we ERET. |
353 | * | ||
354 | * Computing the offset into the IPIQ array of the executing | ||
355 | * TC's IPI queue in-line would be tedious. We use part of | ||
356 | * the TCContext register to hold 16 bits of offset that we | ||
357 | * can add in-line to find the queue head. | ||
358 | */ | ||
359 | mfc0 v0, CP0_TCCONTEXT | ||
360 | la a2, IPIQ | ||
361 | srl v0, v0, 16 | ||
362 | addu a2, a2, v0 | ||
363 | LONG_L v0, 0(a2) | ||
364 | beqz v0, 0f | ||
365 | /* | ||
366 | * If we have a queue, provoke dispatch within the VPE by setting C_SW1 | ||
367 | */ | ||
368 | mfc0 v0, CP0_CAUSE | ||
369 | ori v0, v0, C_SW1 | ||
370 | mtc0 v0, CP0_CAUSE | ||
371 | 0: | ||
372 | /* | ||
373 | * This test should really never branch but | ||
374 | * let's be prudent here. Having atomized | ||
375 | * the shared register modifications, we can | ||
376 | * now EVPE, and must do so before interrupts | ||
377 | * are potentially re-enabled. | ||
378 | */ | ||
379 | andi a1, a1, MVPCONTROL_EVP | ||
336 | beqz a1, 1f | 380 | beqz a1, 1f |
337 | emt | 381 | evpe |
338 | 1: | 382 | 1: |
383 | /* We know that TCStatua.IXMT should be set from above */ | ||
384 | xori a0, a0, TCSTATUS_IXMT | ||
385 | or a0, a0, v1 | ||
386 | mtc0 a0, CP0_TCSTATUS | ||
387 | _ehb | ||
388 | |||
339 | .set mips0 | 389 | .set mips0 |
340 | #endif /* CONFIG_MIPS_MT_SMTC */ | 390 | #endif /* CONFIG_MIPS_MT_SMTC */ |
341 | LONG_L v1, PT_EPC(sp) | 391 | LONG_L v1, PT_EPC(sp) |
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h index 635d764dc13e..35d1743b57ac 100644 --- a/include/asm-x86/acpi.h +++ b/include/asm-x86/acpi.h | |||
@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
140 | boot_cpu_data.x86_model <= 0x05 && | 140 | boot_cpu_data.x86_model <= 0x05 && |
141 | boot_cpu_data.x86_mask < 0x0A) | 141 | boot_cpu_data.x86_mask < 0x0A) |
142 | return 1; | 142 | return 1; |
143 | else if (boot_cpu_has(X86_FEATURE_AMDC1E)) | ||
144 | return 1; | ||
143 | else | 145 | else |
144 | return max_cstate; | 146 | return max_cstate; |
145 | } | 147 | } |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 9489283a4bcf..cfcfb0a806ba 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -81,6 +81,7 @@ | |||
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ |
82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ |
83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
84 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ | ||
84 | 85 | ||
85 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 86 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
86 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 87 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h index d240e5b30a45..cbb649123612 100644 --- a/include/asm-x86/idle.h +++ b/include/asm-x86/idle.h | |||
@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n); | |||
10 | void enter_idle(void); | 10 | void enter_idle(void); |
11 | void exit_idle(void); | 11 | void exit_idle(void); |
12 | 12 | ||
13 | void c1e_remove_cpu(int cpu); | ||
14 | |||
13 | #endif | 15 | #endif |
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h index 484c47554f3b..94d63db10365 100644 --- a/include/asm-x86/kgdb.h +++ b/include/asm-x86/kgdb.h | |||
@@ -39,12 +39,13 @@ enum regnames { | |||
39 | GDB_FS, /* 14 */ | 39 | GDB_FS, /* 14 */ |
40 | GDB_GS, /* 15 */ | 40 | GDB_GS, /* 15 */ |
41 | }; | 41 | }; |
42 | #define NUMREGBYTES ((GDB_GS+1)*4) | ||
42 | #else /* ! CONFIG_X86_32 */ | 43 | #else /* ! CONFIG_X86_32 */ |
43 | enum regnames { | 44 | enum regnames64 { |
44 | GDB_AX, /* 0 */ | 45 | GDB_AX, /* 0 */ |
45 | GDB_DX, /* 1 */ | 46 | GDB_BX, /* 1 */ |
46 | GDB_CX, /* 2 */ | 47 | GDB_CX, /* 2 */ |
47 | GDB_BX, /* 3 */ | 48 | GDB_DX, /* 3 */ |
48 | GDB_SI, /* 4 */ | 49 | GDB_SI, /* 4 */ |
49 | GDB_DI, /* 5 */ | 50 | GDB_DI, /* 5 */ |
50 | GDB_BP, /* 6 */ | 51 | GDB_BP, /* 6 */ |
@@ -58,18 +59,15 @@ enum regnames { | |||
58 | GDB_R14, /* 14 */ | 59 | GDB_R14, /* 14 */ |
59 | GDB_R15, /* 15 */ | 60 | GDB_R15, /* 15 */ |
60 | GDB_PC, /* 16 */ | 61 | GDB_PC, /* 16 */ |
61 | GDB_PS, /* 17 */ | ||
62 | }; | 62 | }; |
63 | #endif /* CONFIG_X86_32 */ | ||
64 | 63 | ||
65 | /* | 64 | enum regnames32 { |
66 | * Number of bytes of registers: | 65 | GDB_PS = 34, |
67 | */ | 66 | GDB_CS, |
68 | #ifdef CONFIG_X86_32 | 67 | GDB_SS, |
69 | # define NUMREGBYTES 64 | 68 | }; |
70 | #else | 69 | #define NUMREGBYTES ((GDB_SS+1)*4) |
71 | # define NUMREGBYTES ((GDB_PS+1)*8) | 70 | #endif /* CONFIG_X86_32 */ |
72 | #endif | ||
73 | 71 | ||
74 | static inline void arch_kgdb_breakpoint(void) | 72 | static inline void arch_kgdb_breakpoint(void) |
75 | { | 73 | { |
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index 515d4dce96b5..45806d60bcbe 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/compiler.h> | 7 | #include <linux/compiler.h> |
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
10 | #include <linux/lockdep.h> | ||
10 | #include <asm/page.h> | 11 | #include <asm/page.h> |
11 | 12 | ||
12 | /* | 13 | /* |
diff --git a/arch/arm/include/asm/cnt32_to_63.h b/include/linux/cnt32_to_63.h index 480c873fa746..8c0f9505b48c 100644 --- a/arch/arm/include/asm/cnt32_to_63.h +++ b/include/linux/cnt32_to_63.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm/cnt32_to_63.h -- extend a 32-bit counter to 63 bits | 2 | * Extend a 32-bit counter to 63 bits |
3 | * | 3 | * |
4 | * Author: Nicolas Pitre | 4 | * Author: Nicolas Pitre |
5 | * Created: December 3, 2006 | 5 | * Created: December 3, 2006 |
@@ -10,15 +10,30 @@ | |||
10 | * as published by the Free Software Foundation. | 10 | * as published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef __INCLUDE_CNT32_TO_63_H__ | 13 | #ifndef __LINUX_CNT32_TO_63_H__ |
14 | #define __INCLUDE_CNT32_TO_63_H__ | 14 | #define __LINUX_CNT32_TO_63_H__ |
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <asm/types.h> | 17 | #include <linux/types.h> |
18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
19 | 19 | ||
20 | /* | 20 | /* this is used only to give gcc a clue about good code generation */ |
21 | * Prototype: u64 cnt32_to_63(u32 cnt) | 21 | union cnt32_to_63 { |
22 | struct { | ||
23 | #if defined(__LITTLE_ENDIAN) | ||
24 | u32 lo, hi; | ||
25 | #elif defined(__BIG_ENDIAN) | ||
26 | u32 hi, lo; | ||
27 | #endif | ||
28 | }; | ||
29 | u64 val; | ||
30 | }; | ||
31 | |||
32 | |||
33 | /** | ||
34 | * cnt32_to_63 - Expand a 32-bit counter to a 63-bit counter | ||
35 | * @cnt_lo: The low part of the counter | ||
36 | * | ||
22 | * Many hardware clock counters are only 32 bits wide and therefore have | 37 | * Many hardware clock counters are only 32 bits wide and therefore have |
23 | * a relatively short period making wrap-arounds rather frequent. This | 38 | * a relatively short period making wrap-arounds rather frequent. This |
24 | * is a problem when implementing sched_clock() for example, where a 64-bit | 39 | * is a problem when implementing sched_clock() for example, where a 64-bit |
@@ -51,26 +66,13 @@ | |||
51 | * clear-bit instruction. Otherwise caller must remember to clear the top | 66 | * clear-bit instruction. Otherwise caller must remember to clear the top |
52 | * bit explicitly. | 67 | * bit explicitly. |
53 | */ | 68 | */ |
54 | |||
55 | /* this is used only to give gcc a clue about good code generation */ | ||
56 | typedef union { | ||
57 | struct { | ||
58 | #if defined(__LITTLE_ENDIAN) | ||
59 | u32 lo, hi; | ||
60 | #elif defined(__BIG_ENDIAN) | ||
61 | u32 hi, lo; | ||
62 | #endif | ||
63 | }; | ||
64 | u64 val; | ||
65 | } cnt32_to_63_t; | ||
66 | |||
67 | #define cnt32_to_63(cnt_lo) \ | 69 | #define cnt32_to_63(cnt_lo) \ |
68 | ({ \ | 70 | ({ \ |
69 | static volatile u32 __m_cnt_hi = 0; \ | 71 | static volatile u32 __m_cnt_hi; \ |
70 | cnt32_to_63_t __x; \ | 72 | union cnt32_to_63 __x; \ |
71 | __x.hi = __m_cnt_hi; \ | 73 | __x.hi = __m_cnt_hi; \ |
72 | __x.lo = (cnt_lo); \ | 74 | __x.lo = (cnt_lo); \ |
73 | if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ | 75 | if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ |
74 | __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ | 76 | __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ |
75 | __x.val; \ | 77 | __x.val; \ |
76 | }) | 78 | }) |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6d93dce61cbb..2f245fe63bda 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -47,14 +47,22 @@ enum hrtimer_restart { | |||
47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context | 47 | * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context |
48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and | 48 | * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and |
49 | * does not restart the timer | 49 | * does not restart the timer |
50 | * HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: Callback must run in hardirq context | 50 | * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context |
51 | * Special mode for tick emultation | 51 | * Special mode for tick emulation and |
52 | * scheduler timer. Such timers are per | ||
53 | * cpu and not allowed to be migrated on | ||
54 | * cpu unplug. | ||
55 | * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context | ||
56 | * with timer->base lock unlocked | ||
57 | * used for timers which call wakeup to | ||
58 | * avoid lock order problems with rq->lock | ||
52 | */ | 59 | */ |
53 | enum hrtimer_cb_mode { | 60 | enum hrtimer_cb_mode { |
54 | HRTIMER_CB_SOFTIRQ, | 61 | HRTIMER_CB_SOFTIRQ, |
55 | HRTIMER_CB_IRQSAFE, | 62 | HRTIMER_CB_IRQSAFE, |
56 | HRTIMER_CB_IRQSAFE_NO_RESTART, | 63 | HRTIMER_CB_IRQSAFE_NO_RESTART, |
57 | HRTIMER_CB_IRQSAFE_NO_SOFTIRQ, | 64 | HRTIMER_CB_IRQSAFE_PERCPU, |
65 | HRTIMER_CB_IRQSAFE_UNLOCKED, | ||
58 | }; | 66 | }; |
59 | 67 | ||
60 | /* | 68 | /* |
@@ -67,9 +75,10 @@ enum hrtimer_cb_mode { | |||
67 | * 0x02 callback function running | 75 | * 0x02 callback function running |
68 | * 0x04 callback pending (high resolution mode) | 76 | * 0x04 callback pending (high resolution mode) |
69 | * | 77 | * |
70 | * Special case: | 78 | * Special cases: |
71 | * 0x03 callback function running and enqueued | 79 | * 0x03 callback function running and enqueued |
72 | * (was requeued on another CPU) | 80 | * (was requeued on another CPU) |
81 | * 0x09 timer was migrated on CPU hotunplug | ||
73 | * The "callback function running and enqueued" status is only possible on | 82 | * The "callback function running and enqueued" status is only possible on |
74 | * SMP. It happens for example when a posix timer expired and the callback | 83 | * SMP. It happens for example when a posix timer expired and the callback |
75 | * queued a signal. Between dropping the lock which protects the posix timer | 84 | * queued a signal. Between dropping the lock which protects the posix timer |
@@ -87,6 +96,7 @@ enum hrtimer_cb_mode { | |||
87 | #define HRTIMER_STATE_ENQUEUED 0x01 | 96 | #define HRTIMER_STATE_ENQUEUED 0x01 |
88 | #define HRTIMER_STATE_CALLBACK 0x02 | 97 | #define HRTIMER_STATE_CALLBACK 0x02 |
89 | #define HRTIMER_STATE_PENDING 0x04 | 98 | #define HRTIMER_STATE_PENDING 0x04 |
99 | #define HRTIMER_STATE_MIGRATE 0x08 | ||
90 | 100 | ||
91 | /** | 101 | /** |
92 | * struct hrtimer - the basic hrtimer structure | 102 | * struct hrtimer - the basic hrtimer structure |
diff --git a/include/linux/pci.h b/include/linux/pci.h index c0e14008a3c2..98dc6243a706 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -534,7 +534,7 @@ extern void pci_sort_breadthfirst(void); | |||
534 | #ifdef CONFIG_PCI_LEGACY | 534 | #ifdef CONFIG_PCI_LEGACY |
535 | struct pci_dev __deprecated *pci_find_device(unsigned int vendor, | 535 | struct pci_dev __deprecated *pci_find_device(unsigned int vendor, |
536 | unsigned int device, | 536 | unsigned int device, |
537 | const struct pci_dev *from); | 537 | struct pci_dev *from); |
538 | struct pci_dev __deprecated *pci_find_slot(unsigned int bus, | 538 | struct pci_dev __deprecated *pci_find_slot(unsigned int bus, |
539 | unsigned int devfn); | 539 | unsigned int devfn); |
540 | #endif /* CONFIG_PCI_LEGACY */ | 540 | #endif /* CONFIG_PCI_LEGACY */ |
@@ -550,7 +550,7 @@ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, | |||
550 | struct pci_dev *from); | 550 | struct pci_dev *from); |
551 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, | 551 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, |
552 | unsigned int ss_vendor, unsigned int ss_device, | 552 | unsigned int ss_vendor, unsigned int ss_device, |
553 | const struct pci_dev *from); | 553 | struct pci_dev *from); |
554 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); | 554 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); |
555 | struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); | 555 | struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); |
556 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); | 556 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); |
@@ -816,7 +816,7 @@ _PCI_NOP_ALL(write,) | |||
816 | 816 | ||
817 | static inline struct pci_dev *pci_find_device(unsigned int vendor, | 817 | static inline struct pci_dev *pci_find_device(unsigned int vendor, |
818 | unsigned int device, | 818 | unsigned int device, |
819 | const struct pci_dev *from) | 819 | struct pci_dev *from) |
820 | { | 820 | { |
821 | return NULL; | 821 | return NULL; |
822 | } | 822 | } |
@@ -838,7 +838,7 @@ static inline struct pci_dev *pci_get_subsys(unsigned int vendor, | |||
838 | unsigned int device, | 838 | unsigned int device, |
839 | unsigned int ss_vendor, | 839 | unsigned int ss_vendor, |
840 | unsigned int ss_device, | 840 | unsigned int ss_device, |
841 | const struct pci_dev *from) | 841 | struct pci_dev *from) |
842 | { | 842 | { |
843 | return NULL; | 843 | return NULL; |
844 | } | 844 | } |
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index b160fb18e8d6..37aaf2b39863 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h | |||
@@ -6,6 +6,7 @@ extern int ramfs_get_sb(struct file_system_type *fs_type, | |||
6 | int flags, const char *dev_name, void *data, struct vfsmount *mnt); | 6 | int flags, const char *dev_name, void *data, struct vfsmount *mnt); |
7 | 7 | ||
8 | #ifndef CONFIG_MMU | 8 | #ifndef CONFIG_MMU |
9 | extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize); | ||
9 | extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, | 10 | extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file, |
10 | unsigned long addr, | 11 | unsigned long addr, |
11 | unsigned long len, | 12 | unsigned long len, |
diff --git a/include/linux/smb.h b/include/linux/smb.h index caa43b2370cb..82fefddc5987 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h | |||
@@ -11,7 +11,9 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/magic.h> | 13 | #include <linux/magic.h> |
14 | #ifdef __KERNEL__ | ||
14 | #include <linux/time.h> | 15 | #include <linux/time.h> |
16 | #endif | ||
15 | 17 | ||
16 | enum smb_protocol { | 18 | enum smb_protocol { |
17 | SMB_PROTOCOL_NONE, | 19 | SMB_PROTOCOL_NONE, |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..b106fd8e0d5c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_STACKTRACE_H | 1 | #ifndef __LINUX_STACKTRACE_H |
2 | #define __LINUX_STACKTRACE_H | 2 | #define __LINUX_STACKTRACE_H |
3 | 3 | ||
4 | struct task_struct; | ||
5 | |||
4 | #ifdef CONFIG_STACKTRACE | 6 | #ifdef CONFIG_STACKTRACE |
5 | struct stack_trace { | 7 | struct stack_trace { |
6 | unsigned int nr_entries, max_entries; | 8 | unsigned int nr_entries, max_entries; |
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index b3d3e27c6299..c3626c0ba9d3 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h | |||
@@ -596,4 +596,5 @@ int p9_idpool_check(int id, struct p9_idpool *p); | |||
596 | int p9_error_init(void); | 596 | int p9_error_init(void); |
597 | int p9_errstr2errno(char *, int); | 597 | int p9_errstr2errno(char *, int); |
598 | int p9_trans_fd_init(void); | 598 | int p9_trans_fd_init(void); |
599 | void p9_trans_fd_exit(void); | ||
599 | #endif /* NET_9P_H */ | 600 | #endif /* NET_9P_H */ |
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index 0db3a4038dc0..3ca737120a90 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #ifndef NET_9P_TRANSPORT_H | 26 | #ifndef NET_9P_TRANSPORT_H |
27 | #define NET_9P_TRANSPORT_H | 27 | #define NET_9P_TRANSPORT_H |
28 | 28 | ||
29 | #include <linux/module.h> | ||
30 | |||
29 | /** | 31 | /** |
30 | * enum p9_trans_status - different states of underlying transports | 32 | * enum p9_trans_status - different states of underlying transports |
31 | * @Connected: transport is connected and healthy | 33 | * @Connected: transport is connected and healthy |
@@ -91,9 +93,12 @@ struct p9_trans_module { | |||
91 | int maxsize; /* max message size of transport */ | 93 | int maxsize; /* max message size of transport */ |
92 | int def; /* this transport should be default */ | 94 | int def; /* this transport should be default */ |
93 | struct p9_trans * (*create)(const char *, char *, int, unsigned char); | 95 | struct p9_trans * (*create)(const char *, char *, int, unsigned char); |
96 | struct module *owner; | ||
94 | }; | 97 | }; |
95 | 98 | ||
96 | void v9fs_register_trans(struct p9_trans_module *m); | 99 | void v9fs_register_trans(struct p9_trans_module *m); |
97 | struct p9_trans_module *v9fs_match_trans(const substring_t *name); | 100 | void v9fs_unregister_trans(struct p9_trans_module *m); |
98 | struct p9_trans_module *v9fs_default_trans(void); | 101 | struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name); |
102 | struct p9_trans_module *v9fs_get_default_trans(void); | ||
103 | void v9fs_put_trans(struct p9_trans_module *m); | ||
99 | #endif /* NET_9P_TRANSPORT_H */ | 104 | #endif /* NET_9P_TRANSPORT_H */ |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 24811732bdb2..029a54a02396 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -227,6 +227,9 @@ struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, | |||
227 | const struct sctp_chunk *, | 227 | const struct sctp_chunk *, |
228 | const __u8 *, | 228 | const __u8 *, |
229 | const size_t ); | 229 | const size_t ); |
230 | struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, | ||
231 | const struct sctp_chunk *, | ||
232 | struct sctp_paramhdr *); | ||
230 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, | 233 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, |
231 | const struct sctp_transport *, | 234 | const struct sctp_transport *, |
232 | const void *payload, | 235 | const void *payload, |
diff --git a/init/main.c b/init/main.c index f6f7042331dc..3820323c4c84 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -708,7 +708,7 @@ int do_one_initcall(initcall_t fn) | |||
708 | int result; | 708 | int result; |
709 | 709 | ||
710 | if (initcall_debug) { | 710 | if (initcall_debug) { |
711 | print_fn_descriptor_symbol("calling %s\n", fn); | 711 | printk("calling %pF\n", fn); |
712 | t0 = ktime_get(); | 712 | t0 = ktime_get(); |
713 | } | 713 | } |
714 | 714 | ||
@@ -718,8 +718,8 @@ int do_one_initcall(initcall_t fn) | |||
718 | t1 = ktime_get(); | 718 | t1 = ktime_get(); |
719 | delta = ktime_sub(t1, t0); | 719 | delta = ktime_sub(t1, t0); |
720 | 720 | ||
721 | print_fn_descriptor_symbol("initcall %s", fn); | 721 | printk("initcall %pF returned %d after %Ld msecs\n", |
722 | printk(" returned %d after %Ld msecs\n", result, | 722 | fn, result, |
723 | (unsigned long long) delta.tv64 >> 20); | 723 | (unsigned long long) delta.tv64 >> 20); |
724 | } | 724 | } |
725 | 725 | ||
@@ -737,8 +737,7 @@ int do_one_initcall(initcall_t fn) | |||
737 | local_irq_enable(); | 737 | local_irq_enable(); |
738 | } | 738 | } |
739 | if (msgbuf[0]) { | 739 | if (msgbuf[0]) { |
740 | print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn); | 740 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
741 | printk(" returned with %s\n", msgbuf); | ||
742 | } | 741 | } |
743 | 742 | ||
744 | return result; | 743 | return result; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 13932abde159..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2738 | */ | 2738 | */ |
2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | 2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) |
2740 | { | 2740 | { |
2741 | struct cgroup *oldcgrp, *newcgrp; | 2741 | struct cgroup *oldcgrp, *newcgrp = NULL; |
2742 | 2742 | ||
2743 | if (need_mm_owner_callback) { | 2743 | if (need_mm_owner_callback) { |
2744 | int i; | 2744 | int i; |
2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2746 | struct cgroup_subsys *ss = subsys[i]; | 2746 | struct cgroup_subsys *ss = subsys[i]; |
2747 | oldcgrp = task_cgroup(old, ss->subsys_id); | 2747 | oldcgrp = task_cgroup(old, ss->subsys_id); |
2748 | newcgrp = task_cgroup(new, ss->subsys_id); | 2748 | if (new) |
2749 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2749 | if (oldcgrp == newcgrp) | 2750 | if (oldcgrp == newcgrp) |
2750 | continue; | 2751 | continue; |
2751 | if (ss->mm_owner_changed) | 2752 | if (ss->mm_owner_changed) |
diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98f..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
583 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
584 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
585 | */ | 585 | */ |
586 | if (!mm) | ||
587 | return 0; | ||
588 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
589 | return 0; | 587 | return 0; |
590 | if (mm->owner != p) | 588 | if (mm->owner != p) |
@@ -627,6 +625,16 @@ retry: | |||
627 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
628 | 626 | ||
629 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
628 | /* | ||
629 | * We found no owner yet mm_users > 1: this implies that we are | ||
630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
632 | * so that subsystems can understand the callback and take action. | ||
633 | */ | ||
634 | down_write(&mm->mmap_sem); | ||
635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
636 | mm->owner = NULL; | ||
637 | up_write(&mm->mmap_sem); | ||
630 | return; | 638 | return; |
631 | 639 | ||
632 | assign_new_owner: | 640 | assign_new_owner: |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce80a74..cdec83e722fa 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
672 | */ | 672 | */ |
673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | 673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); |
674 | return 1; | 674 | return 1; |
675 | case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: | 675 | case HRTIMER_CB_IRQSAFE_PERCPU: |
676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | ||
676 | /* | 677 | /* |
677 | * This is solely for the sched tick emulation with | 678 | * This is solely for the sched tick emulation with |
678 | * dynamic tick support to ensure that we do not | 679 | * dynamic tick support to ensure that we do not |
679 | * restart the tick right on the edge and end up with | 680 | * restart the tick right on the edge and end up with |
680 | * the tick timer in the softirq ! The calling site | 681 | * the tick timer in the softirq ! The calling site |
681 | * takes care of this. | 682 | * takes care of this. Also used for hrtimer sleeper ! |
682 | */ | 683 | */ |
683 | debug_hrtimer_deactivate(timer); | 684 | debug_hrtimer_deactivate(timer); |
684 | return 1; | 685 | return 1; |
@@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1245 | timer_stats_account_hrtimer(timer); | 1246 | timer_stats_account_hrtimer(timer); |
1246 | 1247 | ||
1247 | fn = timer->function; | 1248 | fn = timer->function; |
1248 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { | 1249 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || |
1250 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { | ||
1249 | /* | 1251 | /* |
1250 | * Used for scheduler timers, avoid lock inversion with | 1252 | * Used for scheduler timers, avoid lock inversion with |
1251 | * rq->lock and tasklist_lock. | 1253 | * rq->lock and tasklist_lock. |
@@ -1452,7 +1454,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |||
1452 | sl->timer.function = hrtimer_wakeup; | 1454 | sl->timer.function = hrtimer_wakeup; |
1453 | sl->task = task; | 1455 | sl->task = task; |
1454 | #ifdef CONFIG_HIGH_RES_TIMERS | 1456 | #ifdef CONFIG_HIGH_RES_TIMERS |
1455 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1457 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
1456 | #endif | 1458 | #endif |
1457 | } | 1459 | } |
1458 | 1460 | ||
@@ -1591,29 +1593,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1591 | 1593 | ||
1592 | #ifdef CONFIG_HOTPLUG_CPU | 1594 | #ifdef CONFIG_HOTPLUG_CPU |
1593 | 1595 | ||
1594 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1596 | static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1595 | struct hrtimer_clock_base *new_base) | 1597 | struct hrtimer_clock_base *new_base, int dcpu) |
1596 | { | 1598 | { |
1597 | struct hrtimer *timer; | 1599 | struct hrtimer *timer; |
1598 | struct rb_node *node; | 1600 | struct rb_node *node; |
1601 | int raise = 0; | ||
1599 | 1602 | ||
1600 | while ((node = rb_first(&old_base->active))) { | 1603 | while ((node = rb_first(&old_base->active))) { |
1601 | timer = rb_entry(node, struct hrtimer, node); | 1604 | timer = rb_entry(node, struct hrtimer, node); |
1602 | BUG_ON(hrtimer_callback_running(timer)); | 1605 | BUG_ON(hrtimer_callback_running(timer)); |
1603 | debug_hrtimer_deactivate(timer); | 1606 | debug_hrtimer_deactivate(timer); |
1604 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); | 1607 | |
1608 | /* | ||
1609 | * Should not happen. Per CPU timers should be | ||
1610 | * canceled _before_ the migration code is called | ||
1611 | */ | ||
1612 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { | ||
1613 | __remove_hrtimer(timer, old_base, | ||
1614 | HRTIMER_STATE_INACTIVE, 0); | ||
1615 | WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", | ||
1616 | timer, timer->function, dcpu); | ||
1617 | continue; | ||
1618 | } | ||
1619 | |||
1620 | /* | ||
1621 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | ||
1622 | * timer could be seen as !active and just vanish away | ||
1623 | * under us on another CPU | ||
1624 | */ | ||
1625 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | ||
1605 | timer->base = new_base; | 1626 | timer->base = new_base; |
1606 | /* | 1627 | /* |
1607 | * Enqueue the timer. Allow reprogramming of the event device | 1628 | * Enqueue the timer. Allow reprogramming of the event device |
1608 | */ | 1629 | */ |
1609 | enqueue_hrtimer(timer, new_base, 1); | 1630 | enqueue_hrtimer(timer, new_base, 1); |
1631 | |||
1632 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1633 | /* | ||
1634 | * Happens with high res enabled when the timer was | ||
1635 | * already expired and the callback mode is | ||
1636 | * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The | ||
1637 | * enqueue code does not move them to the soft irq | ||
1638 | * pending list for performance/latency reasons, but | ||
1639 | * in the migration state, we need to do that | ||
1640 | * otherwise we end up with a stale timer. | ||
1641 | */ | ||
1642 | if (timer->state == HRTIMER_STATE_MIGRATE) { | ||
1643 | timer->state = HRTIMER_STATE_PENDING; | ||
1644 | list_add_tail(&timer->cb_entry, | ||
1645 | &new_base->cpu_base->cb_pending); | ||
1646 | raise = 1; | ||
1647 | } | ||
1648 | #endif | ||
1649 | /* Clear the migration state bit */ | ||
1650 | timer->state &= ~HRTIMER_STATE_MIGRATE; | ||
1651 | } | ||
1652 | return raise; | ||
1653 | } | ||
1654 | |||
1655 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1656 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1657 | struct hrtimer_cpu_base *new_base) | ||
1658 | { | ||
1659 | struct hrtimer *timer; | ||
1660 | int raise = 0; | ||
1661 | |||
1662 | while (!list_empty(&old_base->cb_pending)) { | ||
1663 | timer = list_entry(old_base->cb_pending.next, | ||
1664 | struct hrtimer, cb_entry); | ||
1665 | |||
1666 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); | ||
1667 | timer->base = &new_base->clock_base[timer->base->index]; | ||
1668 | list_add_tail(&timer->cb_entry, &new_base->cb_pending); | ||
1669 | raise = 1; | ||
1610 | } | 1670 | } |
1671 | return raise; | ||
1672 | } | ||
1673 | #else | ||
1674 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1675 | struct hrtimer_cpu_base *new_base) | ||
1676 | { | ||
1677 | return 0; | ||
1611 | } | 1678 | } |
1679 | #endif | ||
1612 | 1680 | ||
1613 | static void migrate_hrtimers(int cpu) | 1681 | static void migrate_hrtimers(int cpu) |
1614 | { | 1682 | { |
1615 | struct hrtimer_cpu_base *old_base, *new_base; | 1683 | struct hrtimer_cpu_base *old_base, *new_base; |
1616 | int i; | 1684 | int i, raise = 0; |
1617 | 1685 | ||
1618 | BUG_ON(cpu_online(cpu)); | 1686 | BUG_ON(cpu_online(cpu)); |
1619 | old_base = &per_cpu(hrtimer_bases, cpu); | 1687 | old_base = &per_cpu(hrtimer_bases, cpu); |
@@ -1626,14 +1694,21 @@ static void migrate_hrtimers(int cpu) | |||
1626 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1694 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1627 | 1695 | ||
1628 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1696 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1629 | migrate_hrtimer_list(&old_base->clock_base[i], | 1697 | if (migrate_hrtimer_list(&old_base->clock_base[i], |
1630 | &new_base->clock_base[i]); | 1698 | &new_base->clock_base[i], cpu)) |
1699 | raise = 1; | ||
1631 | } | 1700 | } |
1632 | 1701 | ||
1702 | if (migrate_hrtimer_pending(old_base, new_base)) | ||
1703 | raise = 1; | ||
1704 | |||
1633 | spin_unlock(&old_base->lock); | 1705 | spin_unlock(&old_base->lock); |
1634 | spin_unlock(&new_base->lock); | 1706 | spin_unlock(&new_base->lock); |
1635 | local_irq_enable(); | 1707 | local_irq_enable(); |
1636 | put_cpu_var(hrtimer_bases); | 1708 | put_cpu_var(hrtimer_bases); |
1709 | |||
1710 | if (raise) | ||
1711 | hrtimer_raise_softirq(); | ||
1637 | } | 1712 | } |
1638 | #endif /* CONFIG_HOTPLUG_CPU */ | 1713 | #endif /* CONFIG_HOTPLUG_CPU */ |
1639 | 1714 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f3f0df35d4..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
753 | *old = addr | (*old & ~PAGE_MASK); | 753 | *old = addr | (*old & ~PAGE_MASK); |
754 | 754 | ||
755 | /* The old page I have found cannot be a | 755 | /* The old page I have found cannot be a |
756 | * destination page, so return it. | 756 | * destination page, so return it if it's |
757 | * gfp_flags honor the ones passed in. | ||
757 | */ | 758 | */ |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | ||
760 | PageHighMem(old_page)) { | ||
761 | kimage_free_pages(old_page); | ||
762 | continue; | ||
763 | } | ||
758 | addr = old_addr; | 764 | addr = old_addr; |
759 | page = old_page; | 765 | page = old_page; |
760 | break; | 766 | break; |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index eaa21fc9ad1d..25d955dbb989 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -488,7 +488,7 @@ static int write_mem_msg(int binary) | |||
488 | if (err) | 488 | if (err) |
489 | return err; | 489 | return err; |
490 | if (CACHE_FLUSH_IS_SAFE) | 490 | if (CACHE_FLUSH_IS_SAFE) |
491 | flush_icache_range(addr, addr + length + 1); | 491 | flush_icache_range(addr, addr + length); |
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | 494 | ||
@@ -1462,7 +1462,7 @@ acquirelock: | |||
1462 | * Get the passive CPU lock which will hold all the non-primary | 1462 | * Get the passive CPU lock which will hold all the non-primary |
1463 | * CPU in a spin state while the debugger is active | 1463 | * CPU in a spin state while the debugger is active |
1464 | */ | 1464 | */ |
1465 | if (!kgdb_single_step || !kgdb_contthread) { | 1465 | if (!kgdb_single_step) { |
1466 | for (i = 0; i < NR_CPUS; i++) | 1466 | for (i = 0; i < NR_CPUS; i++) |
1467 | atomic_set(&passive_cpu_wait[i], 1); | 1467 | atomic_set(&passive_cpu_wait[i], 1); |
1468 | } | 1468 | } |
@@ -1475,7 +1475,7 @@ acquirelock: | |||
1475 | 1475 | ||
1476 | #ifdef CONFIG_SMP | 1476 | #ifdef CONFIG_SMP |
1477 | /* Signal the other CPUs to enter kgdb_wait() */ | 1477 | /* Signal the other CPUs to enter kgdb_wait() */ |
1478 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1478 | if ((!kgdb_single_step) && kgdb_do_roundup) |
1479 | kgdb_roundup_cpus(flags); | 1479 | kgdb_roundup_cpus(flags); |
1480 | #endif | 1480 | #endif |
1481 | 1481 | ||
@@ -1494,7 +1494,7 @@ acquirelock: | |||
1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1495 | kgdb_deactivate_sw_breakpoints(); | 1495 | kgdb_deactivate_sw_breakpoints(); |
1496 | kgdb_single_step = 0; | 1496 | kgdb_single_step = 0; |
1497 | kgdb_contthread = NULL; | 1497 | kgdb_contthread = current; |
1498 | exception_level = 0; | 1498 | exception_level = 0; |
1499 | 1499 | ||
1500 | /* Talk to debugger with gdbserial protocol */ | 1500 | /* Talk to debugger with gdbserial protocol */ |
@@ -1508,7 +1508,7 @@ acquirelock: | |||
1508 | kgdb_info[ks->cpu].task = NULL; | 1508 | kgdb_info[ks->cpu].task = NULL; |
1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1510 | 1510 | ||
1511 | if (!kgdb_single_step || !kgdb_contthread) { | 1511 | if (!kgdb_single_step) { |
1512 | for (i = NR_CPUS-1; i >= 0; i--) | 1512 | for (i = NR_CPUS-1; i >= 0; i--) |
1513 | atomic_set(&passive_cpu_wait[i], 0); | 1513 | atomic_set(&passive_cpu_wait[i], 0); |
1514 | /* | 1514 | /* |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d5798cbff..5131e5471169 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -441,7 +441,7 @@ static struct k_itimer * alloc_posix_timer(void) | |||
441 | return tmr; | 441 | return tmr; |
442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { | 442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { |
443 | kmem_cache_free(posix_timers_cache, tmr); | 443 | kmem_cache_free(posix_timers_cache, tmr); |
444 | tmr = NULL; | 444 | return NULL; |
445 | } | 445 | } |
446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); | 446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); |
447 | return tmr; | 447 | return tmr; |
diff --git a/kernel/sched.c b/kernel/sched.c index 98890807375b..ad1962dc0aa2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -201,7 +201,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
201 | hrtimer_init(&rt_b->rt_period_timer, | 201 | hrtimer_init(&rt_b->rt_period_timer, |
202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
203 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 203 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | 207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
@@ -1087,7 +1087,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1087 | return NOTIFY_DONE; | 1087 | return NOTIFY_DONE; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static void init_hrtick(void) | 1090 | static __init void init_hrtick(void) |
1091 | { | 1091 | { |
1092 | hotcpu_notifier(hotplug_hrtick, 0); | 1092 | hotcpu_notifier(hotplug_hrtick, 0); |
1093 | } | 1093 | } |
@@ -1119,7 +1119,7 @@ static void init_rq_hrtick(struct rq *rq) | |||
1119 | 1119 | ||
1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1121 | rq->hrtick_timer.function = hrtick; | 1121 | rq->hrtick_timer.function = hrtick; |
1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
1123 | } | 1123 | } |
1124 | #else | 1124 | #else |
1125 | static inline void hrtick_clear(struct rq *rq) | 1125 | static inline void hrtick_clear(struct rq *rq) |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f1f3eee28113..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -235,7 +235,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
239 | TICKDEV_MODE_PERIODIC) | ||
239 | clockevents_shutdown(dev); | 240 | clockevents_shutdown(dev); |
240 | } | 241 | } |
241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
@@ -245,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
245 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
246 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
247 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
248 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
250 | TICKDEV_MODE_PERIODIC) | ||
249 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
250 | } | 252 | } |
251 | break; | 253 | break; |
@@ -575,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
576 | } | 578 | } |
577 | 579 | ||
580 | /* | ||
581 | * Check, whether the broadcast device is in one shot mode | ||
582 | */ | ||
583 | int tick_broadcast_oneshot_active(void) | ||
584 | { | ||
585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
586 | } | ||
587 | |||
578 | #endif | 588 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 019315ebf9de..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
113 | !tick_broadcast_oneshot_active()) { | ||
113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
114 | } else { | 115 | } else { |
115 | unsigned long seq; | 116 | unsigned long seq; |
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
149 | * this cpu: | 150 | * this cpu: |
150 | */ | 151 | */ |
151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
302 | 303 | ||
303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
305 | TICK_DO_TIMER_NONE; | ||
304 | } | 306 | } |
305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
306 | } | 308 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 6e9db9734aa6..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -1,6 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
3 | */ | 3 | */ |
4 | |||
5 | #define TICK_DO_TIMER_NONE -1 | ||
6 | #define TICK_DO_TIMER_BOOT -2 | ||
7 | |||
4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
@@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
31 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
32 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
33 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
38 | extern int tick_broadcast_oneshot_active(void); | ||
34 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
35 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
36 | { | 41 | { |
@@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
39 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
40 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
41 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
42 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
43 | 49 | ||
44 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
@@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
68 | { | 74 | { |
69 | return 0; | 75 | return 0; |
70 | } | 76 | } |
77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
71 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
72 | 79 | ||
73 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..cb02324bdb88 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 75 | incr * ticks); |
76 | } | 76 | } |
77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
78 | |||
79 | /* Keep the tick_next_period variable up to date */ | ||
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 81 | } |
79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
80 | } | 83 | } |
@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
221 | */ | 224 | */ |
222 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
223 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
224 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
225 | } | 228 | } |
226 | 229 | ||
227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
303 | * invoked. | 306 | * invoked. |
304 | */ | 307 | */ |
305 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
306 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
307 | 310 | ||
308 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
309 | 312 | ||
@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
468 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
469 | * xtime_lock. | 472 | * xtime_lock. |
470 | */ | 473 | */ |
471 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
472 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
473 | 476 | ||
474 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
570 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
571 | * xtime_lock. | 574 | * xtime_lock. |
572 | */ | 575 | */ |
573 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
574 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
575 | #endif | 578 | #endif |
576 | 579 | ||
@@ -622,7 +625,7 @@ void tick_setup_sched_timer(void) | |||
622 | */ | 625 | */ |
623 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 626 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
624 | ts->sched_timer.function = tick_sched_timer; | 627 | ts->sched_timer.function = tick_sched_timer; |
625 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 628 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
626 | 629 | ||
627 | /* Get the next period (per cpu) */ | 630 | /* Get the next period (per cpu) */ |
628 | ts->sched_timer.expires = tick_init_jiffy_update(); | 631 | ts->sched_timer.expires = tick_init_jiffy_update(); |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index bb948e52ce20..db58fb66a135 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -202,7 +202,7 @@ static void start_stack_timer(int cpu) | |||
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
206 | 206 | ||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | 207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); |
208 | } | 208 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0f1f7a7374ba..36896f3eb7f5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -250,6 +250,14 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |||
250 | 250 | ||
251 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | 251 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) |
252 | { | 252 | { |
253 | /* | ||
254 | * mm_update_next_owner() may clear mm->owner to NULL | ||
255 | * if it races with swapoff, page migration, etc. | ||
256 | * So this can be called with p == NULL. | ||
257 | */ | ||
258 | if (unlikely(!p)) | ||
259 | return NULL; | ||
260 | |||
253 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 261 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), |
254 | struct mem_cgroup, css); | 262 | struct mem_cgroup, css); |
255 | } | 263 | } |
@@ -549,6 +557,11 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
549 | if (likely(!memcg)) { | 557 | if (likely(!memcg)) { |
550 | rcu_read_lock(); | 558 | rcu_read_lock(); |
551 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 559 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
560 | if (unlikely(!mem)) { | ||
561 | rcu_read_unlock(); | ||
562 | kmem_cache_free(page_cgroup_cache, pc); | ||
563 | return 0; | ||
564 | } | ||
552 | /* | 565 | /* |
553 | * For every charge from the cgroup, increment reference count | 566 | * For every charge from the cgroup, increment reference count |
554 | */ | 567 | */ |
@@ -801,11 +814,16 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask) | |||
801 | 814 | ||
802 | rcu_read_lock(); | 815 | rcu_read_lock(); |
803 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 816 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
817 | if (unlikely(!mem)) { | ||
818 | rcu_read_unlock(); | ||
819 | return 0; | ||
820 | } | ||
804 | css_get(&mem->css); | 821 | css_get(&mem->css); |
805 | rcu_read_unlock(); | 822 | rcu_read_unlock(); |
806 | 823 | ||
807 | do { | 824 | do { |
808 | progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); | 825 | progress = try_to_free_mem_cgroup_pages(mem, gfp_mask); |
826 | progress += res_counter_check_under_limit(&mem->res); | ||
809 | } while (!progress && --retry); | 827 | } while (!progress && --retry); |
810 | 828 | ||
811 | css_put(&mem->css); | 829 | css_put(&mem->css); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e293c58bea58..27b8681139fd 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -268,13 +268,14 @@ void prep_compound_page(struct page *page, unsigned long order) | |||
268 | { | 268 | { |
269 | int i; | 269 | int i; |
270 | int nr_pages = 1 << order; | 270 | int nr_pages = 1 << order; |
271 | struct page *p = page + 1; | ||
271 | 272 | ||
272 | set_compound_page_dtor(page, free_compound_page); | 273 | set_compound_page_dtor(page, free_compound_page); |
273 | set_compound_order(page, order); | 274 | set_compound_order(page, order); |
274 | __SetPageHead(page); | 275 | __SetPageHead(page); |
275 | for (i = 1; i < nr_pages; i++) { | 276 | for (i = 1; i < nr_pages; i++, p++) { |
276 | struct page *p = page + i; | 277 | if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) |
277 | 278 | p = pfn_to_page(page_to_pfn(page) + i); | |
278 | __SetPageTail(p); | 279 | __SetPageTail(p); |
279 | p->first_page = page; | 280 | p->first_page = page; |
280 | } | 281 | } |
@@ -284,6 +285,7 @@ static void destroy_compound_page(struct page *page, unsigned long order) | |||
284 | { | 285 | { |
285 | int i; | 286 | int i; |
286 | int nr_pages = 1 << order; | 287 | int nr_pages = 1 << order; |
288 | struct page *p = page + 1; | ||
287 | 289 | ||
288 | if (unlikely(compound_order(page) != order)) | 290 | if (unlikely(compound_order(page) != order)) |
289 | bad_page(page); | 291 | bad_page(page); |
@@ -291,8 +293,9 @@ static void destroy_compound_page(struct page *page, unsigned long order) | |||
291 | if (unlikely(!PageHead(page))) | 293 | if (unlikely(!PageHead(page))) |
292 | bad_page(page); | 294 | bad_page(page); |
293 | __ClearPageHead(page); | 295 | __ClearPageHead(page); |
294 | for (i = 1; i < nr_pages; i++) { | 296 | for (i = 1; i < nr_pages; i++, p++) { |
295 | struct page *p = page + i; | 297 | if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0)) |
298 | p = pfn_to_page(page_to_pfn(page) + i); | ||
296 | 299 | ||
297 | if (unlikely(!PageTail(p) | | 300 | if (unlikely(!PageTail(p) | |
298 | (p->first_page != page))) | 301 | (p->first_page != page))) |
diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c69f84fe038d..b70a7fec1ff6 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c | |||
@@ -114,8 +114,10 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) | |||
114 | 114 | ||
115 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | 115 | int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) |
116 | { | 116 | { |
117 | unsigned long pfn; | 117 | unsigned long pfn, flags; |
118 | struct page *page; | 118 | struct page *page; |
119 | struct zone *zone; | ||
120 | int ret; | ||
119 | 121 | ||
120 | pfn = start_pfn; | 122 | pfn = start_pfn; |
121 | /* | 123 | /* |
@@ -131,7 +133,9 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) | |||
131 | if (pfn < end_pfn) | 133 | if (pfn < end_pfn) |
132 | return -EBUSY; | 134 | return -EBUSY; |
133 | /* Check all pages are free or Marked as ISOLATED */ | 135 | /* Check all pages are free or Marked as ISOLATED */ |
134 | if (__test_page_isolated_in_pageblock(start_pfn, end_pfn)) | 136 | zone = page_zone(pfn_to_page(pfn)); |
135 | return 0; | 137 | spin_lock_irqsave(&zone->lock, flags); |
136 | return -EBUSY; | 138 | ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn); |
139 | spin_unlock_irqrestore(&zone->lock, flags); | ||
140 | return ret ? 0 : -EBUSY; | ||
137 | } | 141 | } |
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c index ae532f501943..8d7a27a6335c 100644 --- a/mm/tiny-shmem.c +++ b/mm/tiny-shmem.c | |||
@@ -65,31 +65,31 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) | |||
65 | if (!dentry) | 65 | if (!dentry) |
66 | goto put_memory; | 66 | goto put_memory; |
67 | 67 | ||
68 | error = -ENFILE; | ||
69 | file = get_empty_filp(); | ||
70 | if (!file) | ||
71 | goto put_dentry; | ||
72 | |||
68 | error = -ENOSPC; | 73 | error = -ENOSPC; |
69 | inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); | 74 | inode = ramfs_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); |
70 | if (!inode) | 75 | if (!inode) |
71 | goto put_dentry; | 76 | goto close_file; |
72 | 77 | ||
73 | d_instantiate(dentry, inode); | 78 | d_instantiate(dentry, inode); |
74 | error = -ENFILE; | 79 | inode->i_size = size; |
75 | file = alloc_file(shm_mnt, dentry, FMODE_WRITE | FMODE_READ, | ||
76 | &ramfs_file_operations); | ||
77 | if (!file) | ||
78 | goto put_dentry; | ||
79 | |||
80 | inode->i_nlink = 0; /* It is unlinked */ | 80 | inode->i_nlink = 0; /* It is unlinked */ |
81 | init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, | ||
82 | &ramfs_file_operations); | ||
81 | 83 | ||
82 | /* notify everyone as to the change of file size */ | 84 | #ifndef CONFIG_MMU |
83 | error = do_truncate(dentry, size, 0, file); | 85 | error = ramfs_nommu_expand_for_mapping(inode, size); |
84 | if (error < 0) | 86 | if (error) |
85 | goto close_file; | 87 | goto close_file; |
86 | 88 | #endif | |
87 | return file; | 89 | return file; |
88 | 90 | ||
89 | close_file: | 91 | close_file: |
90 | put_filp(file); | 92 | put_filp(file); |
91 | return ERR_PTR(error); | ||
92 | |||
93 | put_dentry: | 93 | put_dentry: |
94 | dput(dentry); | 94 | dput(dentry); |
95 | put_memory: | 95 | put_memory: |
diff --git a/net/9p/client.c b/net/9p/client.c index 2ffe40cf2f01..10e320307ec0 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -75,7 +75,6 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
75 | int option; | 75 | int option; |
76 | int ret = 0; | 76 | int ret = 0; |
77 | 77 | ||
78 | clnt->trans_mod = v9fs_default_trans(); | ||
79 | clnt->dotu = 1; | 78 | clnt->dotu = 1; |
80 | clnt->msize = 8192; | 79 | clnt->msize = 8192; |
81 | 80 | ||
@@ -108,7 +107,7 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
108 | clnt->msize = option; | 107 | clnt->msize = option; |
109 | break; | 108 | break; |
110 | case Opt_trans: | 109 | case Opt_trans: |
111 | clnt->trans_mod = v9fs_match_trans(&args[0]); | 110 | clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); |
112 | break; | 111 | break; |
113 | case Opt_legacy: | 112 | case Opt_legacy: |
114 | clnt->dotu = 0; | 113 | clnt->dotu = 0; |
@@ -117,6 +116,10 @@ static int parse_opts(char *opts, struct p9_client *clnt) | |||
117 | continue; | 116 | continue; |
118 | } | 117 | } |
119 | } | 118 | } |
119 | |||
120 | if (!clnt->trans_mod) | ||
121 | clnt->trans_mod = v9fs_get_default_trans(); | ||
122 | |||
120 | kfree(options); | 123 | kfree(options); |
121 | return ret; | 124 | return ret; |
122 | } | 125 | } |
@@ -150,6 +153,7 @@ struct p9_client *p9_client_create(const char *dev_name, char *options) | |||
150 | if (!clnt) | 153 | if (!clnt) |
151 | return ERR_PTR(-ENOMEM); | 154 | return ERR_PTR(-ENOMEM); |
152 | 155 | ||
156 | clnt->trans_mod = NULL; | ||
153 | clnt->trans = NULL; | 157 | clnt->trans = NULL; |
154 | spin_lock_init(&clnt->lock); | 158 | spin_lock_init(&clnt->lock); |
155 | INIT_LIST_HEAD(&clnt->fidlist); | 159 | INIT_LIST_HEAD(&clnt->fidlist); |
@@ -235,6 +239,8 @@ void p9_client_destroy(struct p9_client *clnt) | |||
235 | clnt->trans = NULL; | 239 | clnt->trans = NULL; |
236 | } | 240 | } |
237 | 241 | ||
242 | v9fs_put_trans(clnt->trans_mod); | ||
243 | |||
238 | list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) | 244 | list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) |
239 | p9_fid_destroy(fid); | 245 | p9_fid_destroy(fid); |
240 | 246 | ||
diff --git a/net/9p/conv.c b/net/9p/conv.c index 44547201f5bc..5ad3a3bd73b2 100644 --- a/net/9p/conv.c +++ b/net/9p/conv.c | |||
@@ -451,8 +451,10 @@ p9_put_data(struct cbuf *bufp, const char *data, int count, | |||
451 | unsigned char **pdata) | 451 | unsigned char **pdata) |
452 | { | 452 | { |
453 | *pdata = buf_alloc(bufp, count); | 453 | *pdata = buf_alloc(bufp, count); |
454 | if (*pdata == NULL) | ||
455 | return -ENOMEM; | ||
454 | memmove(*pdata, data, count); | 456 | memmove(*pdata, data, count); |
455 | return count; | 457 | return 0; |
456 | } | 458 | } |
457 | 459 | ||
458 | static int | 460 | static int |
@@ -460,6 +462,8 @@ p9_put_user_data(struct cbuf *bufp, const char __user *data, int count, | |||
460 | unsigned char **pdata) | 462 | unsigned char **pdata) |
461 | { | 463 | { |
462 | *pdata = buf_alloc(bufp, count); | 464 | *pdata = buf_alloc(bufp, count); |
465 | if (*pdata == NULL) | ||
466 | return -ENOMEM; | ||
463 | return copy_from_user(*pdata, data, count); | 467 | return copy_from_user(*pdata, data, count); |
464 | } | 468 | } |
465 | 469 | ||
diff --git a/net/9p/mod.c b/net/9p/mod.c index bdee1fb7cc62..1084feb24cb0 100644 --- a/net/9p/mod.c +++ b/net/9p/mod.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/parser.h> | 31 | #include <linux/parser.h> |
32 | #include <net/9p/transport.h> | 32 | #include <net/9p/transport.h> |
33 | #include <linux/list.h> | 33 | #include <linux/list.h> |
34 | #include <linux/spinlock.h> | ||
34 | 35 | ||
35 | #ifdef CONFIG_NET_9P_DEBUG | 36 | #ifdef CONFIG_NET_9P_DEBUG |
36 | unsigned int p9_debug_level = 0; /* feature-rific global debug level */ | 37 | unsigned int p9_debug_level = 0; /* feature-rific global debug level */ |
@@ -44,8 +45,8 @@ MODULE_PARM_DESC(debug, "9P debugging level"); | |||
44 | * | 45 | * |
45 | */ | 46 | */ |
46 | 47 | ||
48 | static DEFINE_SPINLOCK(v9fs_trans_lock); | ||
47 | static LIST_HEAD(v9fs_trans_list); | 49 | static LIST_HEAD(v9fs_trans_list); |
48 | static struct p9_trans_module *v9fs_default_transport; | ||
49 | 50 | ||
50 | /** | 51 | /** |
51 | * v9fs_register_trans - register a new transport with 9p | 52 | * v9fs_register_trans - register a new transport with 9p |
@@ -54,48 +55,87 @@ static struct p9_trans_module *v9fs_default_transport; | |||
54 | */ | 55 | */ |
55 | void v9fs_register_trans(struct p9_trans_module *m) | 56 | void v9fs_register_trans(struct p9_trans_module *m) |
56 | { | 57 | { |
58 | spin_lock(&v9fs_trans_lock); | ||
57 | list_add_tail(&m->list, &v9fs_trans_list); | 59 | list_add_tail(&m->list, &v9fs_trans_list); |
58 | if (m->def) | 60 | spin_unlock(&v9fs_trans_lock); |
59 | v9fs_default_transport = m; | ||
60 | } | 61 | } |
61 | EXPORT_SYMBOL(v9fs_register_trans); | 62 | EXPORT_SYMBOL(v9fs_register_trans); |
62 | 63 | ||
63 | /** | 64 | /** |
64 | * v9fs_match_trans - match transport versus registered transports | 65 | * v9fs_unregister_trans - unregister a 9p transport |
66 | * @m: the transport to remove | ||
67 | * | ||
68 | */ | ||
69 | void v9fs_unregister_trans(struct p9_trans_module *m) | ||
70 | { | ||
71 | spin_lock(&v9fs_trans_lock); | ||
72 | list_del_init(&m->list); | ||
73 | spin_unlock(&v9fs_trans_lock); | ||
74 | } | ||
75 | EXPORT_SYMBOL(v9fs_unregister_trans); | ||
76 | |||
77 | /** | ||
78 | * v9fs_get_trans_by_name - get transport with the matching name | ||
65 | * @name: string identifying transport | 79 | * @name: string identifying transport |
66 | * | 80 | * |
67 | */ | 81 | */ |
68 | struct p9_trans_module *v9fs_match_trans(const substring_t *name) | 82 | struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name) |
69 | { | 83 | { |
70 | struct list_head *p; | 84 | struct p9_trans_module *t, *found = NULL; |
71 | struct p9_trans_module *t = NULL; | 85 | |
72 | 86 | spin_lock(&v9fs_trans_lock); | |
73 | list_for_each(p, &v9fs_trans_list) { | 87 | |
74 | t = list_entry(p, struct p9_trans_module, list); | 88 | list_for_each_entry(t, &v9fs_trans_list, list) |
75 | if (strncmp(t->name, name->from, name->to-name->from) == 0) | 89 | if (strncmp(t->name, name->from, name->to-name->from) == 0 && |
76 | return t; | 90 | try_module_get(t->owner)) { |
77 | } | 91 | found = t; |
78 | return NULL; | 92 | break; |
93 | } | ||
94 | |||
95 | spin_unlock(&v9fs_trans_lock); | ||
96 | return found; | ||
79 | } | 97 | } |
80 | EXPORT_SYMBOL(v9fs_match_trans); | 98 | EXPORT_SYMBOL(v9fs_get_trans_by_name); |
81 | 99 | ||
82 | /** | 100 | /** |
83 | * v9fs_default_trans - returns pointer to default transport | 101 | * v9fs_get_default_trans - get the default transport |
84 | * | 102 | * |
85 | */ | 103 | */ |
86 | 104 | ||
87 | struct p9_trans_module *v9fs_default_trans(void) | 105 | struct p9_trans_module *v9fs_get_default_trans(void) |
88 | { | 106 | { |
89 | if (v9fs_default_transport) | 107 | struct p9_trans_module *t, *found = NULL; |
90 | return v9fs_default_transport; | 108 | |
91 | else if (!list_empty(&v9fs_trans_list)) | 109 | spin_lock(&v9fs_trans_lock); |
92 | return list_first_entry(&v9fs_trans_list, | 110 | |
93 | struct p9_trans_module, list); | 111 | list_for_each_entry(t, &v9fs_trans_list, list) |
94 | else | 112 | if (t->def && try_module_get(t->owner)) { |
95 | return NULL; | 113 | found = t; |
114 | break; | ||
115 | } | ||
116 | |||
117 | if (!found) | ||
118 | list_for_each_entry(t, &v9fs_trans_list, list) | ||
119 | if (try_module_get(t->owner)) { | ||
120 | found = t; | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | spin_unlock(&v9fs_trans_lock); | ||
125 | return found; | ||
96 | } | 126 | } |
97 | EXPORT_SYMBOL(v9fs_default_trans); | 127 | EXPORT_SYMBOL(v9fs_get_default_trans); |
98 | 128 | ||
129 | /** | ||
130 | * v9fs_put_trans - put trans | ||
131 | * @m: transport to put | ||
132 | * | ||
133 | */ | ||
134 | void v9fs_put_trans(struct p9_trans_module *m) | ||
135 | { | ||
136 | if (m) | ||
137 | module_put(m->owner); | ||
138 | } | ||
99 | 139 | ||
100 | /** | 140 | /** |
101 | * v9fs_init - Initialize module | 141 | * v9fs_init - Initialize module |
@@ -120,6 +160,8 @@ static int __init init_p9(void) | |||
120 | static void __exit exit_p9(void) | 160 | static void __exit exit_p9(void) |
121 | { | 161 | { |
122 | printk(KERN_INFO "Unloading 9P2000 support\n"); | 162 | printk(KERN_INFO "Unloading 9P2000 support\n"); |
163 | |||
164 | p9_trans_fd_exit(); | ||
123 | } | 165 | } |
124 | 166 | ||
125 | module_init(init_p9) | 167 | module_init(init_p9) |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index cdf137af7adc..d652baf5ff91 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -151,7 +151,6 @@ struct p9_mux_poll_task { | |||
151 | * @trans: reference to transport instance for this connection | 151 | * @trans: reference to transport instance for this connection |
152 | * @tagpool: id accounting for transactions | 152 | * @tagpool: id accounting for transactions |
153 | * @err: error state | 153 | * @err: error state |
154 | * @equeue: event wait_q (?) | ||
155 | * @req_list: accounting for requests which have been sent | 154 | * @req_list: accounting for requests which have been sent |
156 | * @unsent_req_list: accounting for requests that haven't been sent | 155 | * @unsent_req_list: accounting for requests that haven't been sent |
157 | * @rcall: current response &p9_fcall structure | 156 | * @rcall: current response &p9_fcall structure |
@@ -178,7 +177,6 @@ struct p9_conn { | |||
178 | struct p9_trans *trans; | 177 | struct p9_trans *trans; |
179 | struct p9_idpool *tagpool; | 178 | struct p9_idpool *tagpool; |
180 | int err; | 179 | int err; |
181 | wait_queue_head_t equeue; | ||
182 | struct list_head req_list; | 180 | struct list_head req_list; |
183 | struct list_head unsent_req_list; | 181 | struct list_head unsent_req_list; |
184 | struct p9_fcall *rcall; | 182 | struct p9_fcall *rcall; |
@@ -240,22 +238,6 @@ static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, | |||
240 | 238 | ||
241 | static void p9_conn_cancel(struct p9_conn *m, int err); | 239 | static void p9_conn_cancel(struct p9_conn *m, int err); |
242 | 240 | ||
243 | static int p9_mux_global_init(void) | ||
244 | { | ||
245 | int i; | ||
246 | |||
247 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) | ||
248 | p9_mux_poll_tasks[i].task = NULL; | ||
249 | |||
250 | p9_mux_wq = create_workqueue("v9fs"); | ||
251 | if (!p9_mux_wq) { | ||
252 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
253 | return -ENOMEM; | ||
254 | } | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static u16 p9_mux_get_tag(struct p9_conn *m) | 241 | static u16 p9_mux_get_tag(struct p9_conn *m) |
260 | { | 242 | { |
261 | int tag; | 243 | int tag; |
@@ -409,11 +391,11 @@ static void p9_mux_poll_stop(struct p9_conn *m) | |||
409 | static struct p9_conn *p9_conn_create(struct p9_trans *trans) | 391 | static struct p9_conn *p9_conn_create(struct p9_trans *trans) |
410 | { | 392 | { |
411 | int i, n; | 393 | int i, n; |
412 | struct p9_conn *m, *mtmp; | 394 | struct p9_conn *m; |
413 | 395 | ||
414 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, | 396 | P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, |
415 | trans->msize); | 397 | trans->msize); |
416 | m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL); | 398 | m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); |
417 | if (!m) | 399 | if (!m) |
418 | return ERR_PTR(-ENOMEM); | 400 | return ERR_PTR(-ENOMEM); |
419 | 401 | ||
@@ -424,25 +406,14 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans) | |||
424 | m->trans = trans; | 406 | m->trans = trans; |
425 | m->tagpool = p9_idpool_create(); | 407 | m->tagpool = p9_idpool_create(); |
426 | if (IS_ERR(m->tagpool)) { | 408 | if (IS_ERR(m->tagpool)) { |
427 | mtmp = ERR_PTR(-ENOMEM); | ||
428 | kfree(m); | 409 | kfree(m); |
429 | return mtmp; | 410 | return ERR_PTR(-ENOMEM); |
430 | } | 411 | } |
431 | 412 | ||
432 | m->err = 0; | ||
433 | init_waitqueue_head(&m->equeue); | ||
434 | INIT_LIST_HEAD(&m->req_list); | 413 | INIT_LIST_HEAD(&m->req_list); |
435 | INIT_LIST_HEAD(&m->unsent_req_list); | 414 | INIT_LIST_HEAD(&m->unsent_req_list); |
436 | m->rcall = NULL; | ||
437 | m->rpos = 0; | ||
438 | m->rbuf = NULL; | ||
439 | m->wpos = m->wsize = 0; | ||
440 | m->wbuf = NULL; | ||
441 | INIT_WORK(&m->rq, p9_read_work); | 415 | INIT_WORK(&m->rq, p9_read_work); |
442 | INIT_WORK(&m->wq, p9_write_work); | 416 | INIT_WORK(&m->wq, p9_write_work); |
443 | m->wsched = 0; | ||
444 | memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); | ||
445 | m->poll_task = NULL; | ||
446 | n = p9_mux_poll_start(m); | 417 | n = p9_mux_poll_start(m); |
447 | if (n) { | 418 | if (n) { |
448 | kfree(m); | 419 | kfree(m); |
@@ -463,10 +434,8 @@ static struct p9_conn *p9_conn_create(struct p9_trans *trans) | |||
463 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { | 434 | for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { |
464 | if (IS_ERR(m->poll_waddr[i])) { | 435 | if (IS_ERR(m->poll_waddr[i])) { |
465 | p9_mux_poll_stop(m); | 436 | p9_mux_poll_stop(m); |
466 | mtmp = (void *)m->poll_waddr; /* the error code */ | ||
467 | kfree(m); | 437 | kfree(m); |
468 | m = mtmp; | 438 | return (void *)m->poll_waddr; /* the error code */ |
469 | break; | ||
470 | } | 439 | } |
471 | } | 440 | } |
472 | 441 | ||
@@ -483,18 +452,13 @@ static void p9_conn_destroy(struct p9_conn *m) | |||
483 | { | 452 | { |
484 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, | 453 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, |
485 | m->mux_list.prev, m->mux_list.next); | 454 | m->mux_list.prev, m->mux_list.next); |
486 | p9_conn_cancel(m, -ECONNRESET); | ||
487 | |||
488 | if (!list_empty(&m->req_list)) { | ||
489 | /* wait until all processes waiting on this session exit */ | ||
490 | P9_DPRINTK(P9_DEBUG_MUX, | ||
491 | "mux %p waiting for empty request queue\n", m); | ||
492 | wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000); | ||
493 | P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m, | ||
494 | list_empty(&m->req_list)); | ||
495 | } | ||
496 | 455 | ||
497 | p9_mux_poll_stop(m); | 456 | p9_mux_poll_stop(m); |
457 | cancel_work_sync(&m->rq); | ||
458 | cancel_work_sync(&m->wq); | ||
459 | |||
460 | p9_conn_cancel(m, -ECONNRESET); | ||
461 | |||
498 | m->trans = NULL; | 462 | m->trans = NULL; |
499 | p9_idpool_destroy(m->tagpool); | 463 | p9_idpool_destroy(m->tagpool); |
500 | kfree(m); | 464 | kfree(m); |
@@ -840,8 +804,6 @@ static void p9_read_work(struct work_struct *work) | |||
840 | (*req->cb) (req, req->cba); | 804 | (*req->cb) (req, req->cba); |
841 | else | 805 | else |
842 | kfree(req->rcall); | 806 | kfree(req->rcall); |
843 | |||
844 | wake_up(&m->equeue); | ||
845 | } | 807 | } |
846 | } else { | 808 | } else { |
847 | if (err >= 0 && rcall->id != P9_RFLUSH) | 809 | if (err >= 0 && rcall->id != P9_RFLUSH) |
@@ -908,8 +870,10 @@ static struct p9_req *p9_send_request(struct p9_conn *m, | |||
908 | else | 870 | else |
909 | n = p9_mux_get_tag(m); | 871 | n = p9_mux_get_tag(m); |
910 | 872 | ||
911 | if (n < 0) | 873 | if (n < 0) { |
874 | kfree(req); | ||
912 | return ERR_PTR(-ENOMEM); | 875 | return ERR_PTR(-ENOMEM); |
876 | } | ||
913 | 877 | ||
914 | p9_set_tag(tc, n); | 878 | p9_set_tag(tc, n); |
915 | 879 | ||
@@ -984,8 +948,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a) | |||
984 | (*req->cb) (req, req->cba); | 948 | (*req->cb) (req, req->cba); |
985 | else | 949 | else |
986 | kfree(req->rcall); | 950 | kfree(req->rcall); |
987 | |||
988 | wake_up(&m->equeue); | ||
989 | } | 951 | } |
990 | 952 | ||
991 | kfree(freq->tcall); | 953 | kfree(freq->tcall); |
@@ -1191,8 +1153,6 @@ void p9_conn_cancel(struct p9_conn *m, int err) | |||
1191 | else | 1153 | else |
1192 | kfree(req->rcall); | 1154 | kfree(req->rcall); |
1193 | } | 1155 | } |
1194 | |||
1195 | wake_up(&m->equeue); | ||
1196 | } | 1156 | } |
1197 | 1157 | ||
1198 | /** | 1158 | /** |
@@ -1370,7 +1330,6 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt) | |||
1370 | { | 1330 | { |
1371 | int ret, n; | 1331 | int ret, n; |
1372 | struct p9_trans_fd *ts = NULL; | 1332 | struct p9_trans_fd *ts = NULL; |
1373 | mm_segment_t oldfs; | ||
1374 | 1333 | ||
1375 | if (trans && trans->status == Connected) | 1334 | if (trans && trans->status == Connected) |
1376 | ts = trans->priv; | 1335 | ts = trans->priv; |
@@ -1384,24 +1343,17 @@ p9_fd_poll(struct p9_trans *trans, struct poll_table_struct *pt) | |||
1384 | if (!ts->wr->f_op || !ts->wr->f_op->poll) | 1343 | if (!ts->wr->f_op || !ts->wr->f_op->poll) |
1385 | return -EIO; | 1344 | return -EIO; |
1386 | 1345 | ||
1387 | oldfs = get_fs(); | ||
1388 | set_fs(get_ds()); | ||
1389 | |||
1390 | ret = ts->rd->f_op->poll(ts->rd, pt); | 1346 | ret = ts->rd->f_op->poll(ts->rd, pt); |
1391 | if (ret < 0) | 1347 | if (ret < 0) |
1392 | goto end; | 1348 | return ret; |
1393 | 1349 | ||
1394 | if (ts->rd != ts->wr) { | 1350 | if (ts->rd != ts->wr) { |
1395 | n = ts->wr->f_op->poll(ts->wr, pt); | 1351 | n = ts->wr->f_op->poll(ts->wr, pt); |
1396 | if (n < 0) { | 1352 | if (n < 0) |
1397 | ret = n; | 1353 | return n; |
1398 | goto end; | ||
1399 | } | ||
1400 | ret = (ret & ~POLLOUT) | (n & ~POLLIN); | 1354 | ret = (ret & ~POLLOUT) | (n & ~POLLIN); |
1401 | } | 1355 | } |
1402 | 1356 | ||
1403 | end: | ||
1404 | set_fs(oldfs); | ||
1405 | return ret; | 1357 | return ret; |
1406 | } | 1358 | } |
1407 | 1359 | ||
@@ -1629,6 +1581,7 @@ static struct p9_trans_module p9_tcp_trans = { | |||
1629 | .maxsize = MAX_SOCK_BUF, | 1581 | .maxsize = MAX_SOCK_BUF, |
1630 | .def = 1, | 1582 | .def = 1, |
1631 | .create = p9_trans_create_tcp, | 1583 | .create = p9_trans_create_tcp, |
1584 | .owner = THIS_MODULE, | ||
1632 | }; | 1585 | }; |
1633 | 1586 | ||
1634 | static struct p9_trans_module p9_unix_trans = { | 1587 | static struct p9_trans_module p9_unix_trans = { |
@@ -1636,6 +1589,7 @@ static struct p9_trans_module p9_unix_trans = { | |||
1636 | .maxsize = MAX_SOCK_BUF, | 1589 | .maxsize = MAX_SOCK_BUF, |
1637 | .def = 0, | 1590 | .def = 0, |
1638 | .create = p9_trans_create_unix, | 1591 | .create = p9_trans_create_unix, |
1592 | .owner = THIS_MODULE, | ||
1639 | }; | 1593 | }; |
1640 | 1594 | ||
1641 | static struct p9_trans_module p9_fd_trans = { | 1595 | static struct p9_trans_module p9_fd_trans = { |
@@ -1643,14 +1597,20 @@ static struct p9_trans_module p9_fd_trans = { | |||
1643 | .maxsize = MAX_SOCK_BUF, | 1597 | .maxsize = MAX_SOCK_BUF, |
1644 | .def = 0, | 1598 | .def = 0, |
1645 | .create = p9_trans_create_fd, | 1599 | .create = p9_trans_create_fd, |
1600 | .owner = THIS_MODULE, | ||
1646 | }; | 1601 | }; |
1647 | 1602 | ||
1648 | int p9_trans_fd_init(void) | 1603 | int p9_trans_fd_init(void) |
1649 | { | 1604 | { |
1650 | int ret = p9_mux_global_init(); | 1605 | int i; |
1651 | if (ret) { | 1606 | |
1652 | printk(KERN_WARNING "9p: starting mux failed\n"); | 1607 | for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) |
1653 | return ret; | 1608 | p9_mux_poll_tasks[i].task = NULL; |
1609 | |||
1610 | p9_mux_wq = create_workqueue("v9fs"); | ||
1611 | if (!p9_mux_wq) { | ||
1612 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
1613 | return -ENOMEM; | ||
1654 | } | 1614 | } |
1655 | 1615 | ||
1656 | v9fs_register_trans(&p9_tcp_trans); | 1616 | v9fs_register_trans(&p9_tcp_trans); |
@@ -1659,4 +1619,12 @@ int p9_trans_fd_init(void) | |||
1659 | 1619 | ||
1660 | return 0; | 1620 | return 0; |
1661 | } | 1621 | } |
1662 | EXPORT_SYMBOL(p9_trans_fd_init); | 1622 | |
1623 | void p9_trans_fd_exit(void) | ||
1624 | { | ||
1625 | v9fs_unregister_trans(&p9_tcp_trans); | ||
1626 | v9fs_unregister_trans(&p9_unix_trans); | ||
1627 | v9fs_unregister_trans(&p9_fd_trans); | ||
1628 | |||
1629 | destroy_workqueue(p9_mux_wq); | ||
1630 | } | ||
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 42adc052b149..94912e077a55 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -528,6 +528,7 @@ static struct p9_trans_module p9_virtio_trans = { | |||
528 | .create = p9_virtio_create, | 528 | .create = p9_virtio_create, |
529 | .maxsize = PAGE_SIZE*16, | 529 | .maxsize = PAGE_SIZE*16, |
530 | .def = 0, | 530 | .def = 0, |
531 | .owner = THIS_MODULE, | ||
531 | }; | 532 | }; |
532 | 533 | ||
533 | /* The standard init function */ | 534 | /* The standard init function */ |
@@ -545,6 +546,7 @@ static int __init p9_virtio_init(void) | |||
545 | static void __exit p9_virtio_cleanup(void) | 546 | static void __exit p9_virtio_cleanup(void) |
546 | { | 547 | { |
547 | unregister_virtio_driver(&p9_virtio_drv); | 548 | unregister_virtio_driver(&p9_virtio_drv); |
549 | v9fs_unregister_trans(&p9_virtio_trans); | ||
548 | } | 550 | } |
549 | 551 | ||
550 | module_init(p9_virtio_init); | 552 | module_init(p9_virtio_init); |
diff --git a/net/core/dev.c b/net/core/dev.c index e719ed29310f..e8eb2b478344 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -122,6 +122,7 @@ | |||
122 | #include <linux/if_arp.h> | 122 | #include <linux/if_arp.h> |
123 | #include <linux/if_vlan.h> | 123 | #include <linux/if_vlan.h> |
124 | #include <linux/ip.h> | 124 | #include <linux/ip.h> |
125 | #include <net/ip.h> | ||
125 | #include <linux/ipv6.h> | 126 | #include <linux/ipv6.h> |
126 | #include <linux/in.h> | 127 | #include <linux/in.h> |
127 | #include <linux/jhash.h> | 128 | #include <linux/jhash.h> |
@@ -1667,7 +1668,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) | |||
1667 | { | 1668 | { |
1668 | u32 addr1, addr2, ports; | 1669 | u32 addr1, addr2, ports; |
1669 | u32 hash, ihl; | 1670 | u32 hash, ihl; |
1670 | u8 ip_proto; | 1671 | u8 ip_proto = 0; |
1671 | 1672 | ||
1672 | if (unlikely(!simple_tx_hashrnd_initialized)) { | 1673 | if (unlikely(!simple_tx_hashrnd_initialized)) { |
1673 | get_random_bytes(&simple_tx_hashrnd, 4); | 1674 | get_random_bytes(&simple_tx_hashrnd, 4); |
@@ -1676,7 +1677,8 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb) | |||
1676 | 1677 | ||
1677 | switch (skb->protocol) { | 1678 | switch (skb->protocol) { |
1678 | case __constant_htons(ETH_P_IP): | 1679 | case __constant_htons(ETH_P_IP): |
1679 | ip_proto = ip_hdr(skb)->protocol; | 1680 | if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))) |
1681 | ip_proto = ip_hdr(skb)->protocol; | ||
1680 | addr1 = ip_hdr(skb)->saddr; | 1682 | addr1 = ip_hdr(skb)->saddr; |
1681 | addr2 = ip_hdr(skb)->daddr; | 1683 | addr2 = ip_hdr(skb)->daddr; |
1682 | ihl = ip_hdr(skb)->ihl; | 1684 | ihl = ip_hdr(skb)->ihl; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 1b4fee20fc93..011478e46c40 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -618,7 +618,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
618 | ]; | 618 | ]; |
619 | } rep; | 619 | } rep; |
620 | struct ip_reply_arg arg; | 620 | struct ip_reply_arg arg; |
621 | struct net *net = dev_net(skb->dev); | 621 | struct net *net = dev_net(skb->dst->dev); |
622 | 622 | ||
623 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 623 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
624 | memset(&arg, 0, sizeof(arg)); | 624 | memset(&arg, 0, sizeof(arg)); |
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 62e39ace0588..26654b26d7fa 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c | |||
@@ -97,8 +97,6 @@ hbh_mt6(const struct sk_buff *skb, const struct net_device *in, | |||
97 | hdrlen -= 2; | 97 | hdrlen -= 2; |
98 | if (!(optinfo->flags & IP6T_OPTS_OPTS)) { | 98 | if (!(optinfo->flags & IP6T_OPTS_OPTS)) { |
99 | return ret; | 99 | return ret; |
100 | } else if (optinfo->flags & IP6T_OPTS_NSTRICT) { | ||
101 | pr_debug("Not strict - not implemented"); | ||
102 | } else { | 100 | } else { |
103 | pr_debug("Strict "); | 101 | pr_debug("Strict "); |
104 | pr_debug("#%d ", optinfo->optsnr); | 102 | pr_debug("#%d ", optinfo->optsnr); |
@@ -177,6 +175,12 @@ hbh_mt6_check(const char *tablename, const void *entry, | |||
177 | pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags); | 175 | pr_debug("ip6t_opts: unknown flags %X\n", optsinfo->invflags); |
178 | return false; | 176 | return false; |
179 | } | 177 | } |
178 | |||
179 | if (optsinfo->flags & IP6T_OPTS_NSTRICT) { | ||
180 | pr_debug("ip6t_opts: Not strict - not implemented"); | ||
181 | return false; | ||
182 | } | ||
183 | |||
180 | return true; | 184 | return true; |
181 | } | 185 | } |
182 | 186 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9af6115f0f50..63442a1e741c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -2688,6 +2688,8 @@ int __init ip6_route_init(void) | |||
2688 | if (ret) | 2688 | if (ret) |
2689 | goto out_kmem_cache; | 2689 | goto out_kmem_cache; |
2690 | 2690 | ||
2691 | ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; | ||
2692 | |||
2691 | /* Registering of the loopback is done before this portion of code, | 2693 | /* Registering of the loopback is done before this portion of code, |
2692 | * the loopback reference in rt6_info will not be taken, do it | 2694 | * the loopback reference in rt6_info will not be taken, do it |
2693 | * manually for init_net */ | 2695 | * manually for init_net */ |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index b585c850a89a..10e22fd48222 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1050,7 +1050,7 @@ static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 | |||
1050 | struct tcphdr *th = tcp_hdr(skb), *t1; | 1050 | struct tcphdr *th = tcp_hdr(skb), *t1; |
1051 | struct sk_buff *buff; | 1051 | struct sk_buff *buff; |
1052 | struct flowi fl; | 1052 | struct flowi fl; |
1053 | struct net *net = dev_net(skb->dev); | 1053 | struct net *net = dev_net(skb->dst->dev); |
1054 | struct sock *ctl_sk = net->ipv6.tcp_sk; | 1054 | struct sock *ctl_sk = net->ipv6.tcp_sk; |
1055 | unsigned int tot_len = sizeof(struct tcphdr); | 1055 | unsigned int tot_len = sizeof(struct tcphdr); |
1056 | __be32 *topt; | 1056 | __be32 *topt; |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 705959b31e24..d7b54b5bfa69 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -524,7 +524,6 @@ static int iucv_enable(void) | |||
524 | get_online_cpus(); | 524 | get_online_cpus(); |
525 | for_each_online_cpu(cpu) | 525 | for_each_online_cpu(cpu) |
526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); | 526 | smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); |
527 | preempt_enable(); | ||
528 | if (cpus_empty(iucv_buffer_cpumask)) | 527 | if (cpus_empty(iucv_buffer_cpumask)) |
529 | /* No cpu could declare an iucv buffer. */ | 528 | /* No cpu could declare an iucv buffer. */ |
530 | goto out_path; | 529 | goto out_path; |
@@ -547,7 +546,9 @@ out: | |||
547 | */ | 546 | */ |
548 | static void iucv_disable(void) | 547 | static void iucv_disable(void) |
549 | { | 548 | { |
549 | get_online_cpus(); | ||
550 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); | 550 | on_each_cpu(iucv_retrieve_cpu, NULL, 1); |
551 | put_online_cpus(); | ||
551 | kfree(iucv_path_table); | 552 | kfree(iucv_path_table); |
552 | } | 553 | } |
553 | 554 | ||
diff --git a/net/key/af_key.c b/net/key/af_key.c index d628df97e02e..b7f5a1c353ee 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -73,22 +73,18 @@ static int pfkey_can_dump(struct sock *sk) | |||
73 | return 0; | 73 | return 0; |
74 | } | 74 | } |
75 | 75 | ||
76 | static int pfkey_do_dump(struct pfkey_sock *pfk) | 76 | static void pfkey_terminate_dump(struct pfkey_sock *pfk) |
77 | { | 77 | { |
78 | int rc; | 78 | if (pfk->dump.dump) { |
79 | 79 | pfk->dump.done(pfk); | |
80 | rc = pfk->dump.dump(pfk); | 80 | pfk->dump.dump = NULL; |
81 | if (rc == -ENOBUFS) | 81 | pfk->dump.done = NULL; |
82 | return 0; | 82 | } |
83 | |||
84 | pfk->dump.done(pfk); | ||
85 | pfk->dump.dump = NULL; | ||
86 | pfk->dump.done = NULL; | ||
87 | return rc; | ||
88 | } | 83 | } |
89 | 84 | ||
90 | static void pfkey_sock_destruct(struct sock *sk) | 85 | static void pfkey_sock_destruct(struct sock *sk) |
91 | { | 86 | { |
87 | pfkey_terminate_dump(pfkey_sk(sk)); | ||
92 | skb_queue_purge(&sk->sk_receive_queue); | 88 | skb_queue_purge(&sk->sk_receive_queue); |
93 | 89 | ||
94 | if (!sock_flag(sk, SOCK_DEAD)) { | 90 | if (!sock_flag(sk, SOCK_DEAD)) { |
@@ -310,6 +306,18 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
310 | return err; | 306 | return err; |
311 | } | 307 | } |
312 | 308 | ||
309 | static int pfkey_do_dump(struct pfkey_sock *pfk) | ||
310 | { | ||
311 | int rc; | ||
312 | |||
313 | rc = pfk->dump.dump(pfk); | ||
314 | if (rc == -ENOBUFS) | ||
315 | return 0; | ||
316 | |||
317 | pfkey_terminate_dump(pfk); | ||
318 | return rc; | ||
319 | } | ||
320 | |||
313 | static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig) | 321 | static inline void pfkey_hdr_dup(struct sadb_msg *new, struct sadb_msg *orig) |
314 | { | 322 | { |
315 | *new = *orig; | 323 | *new = *orig; |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index b599cbba4fbe..d68869f966c3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -1012,6 +1012,29 @@ end: | |||
1012 | return retval; | 1012 | return retval; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | struct sctp_chunk *sctp_make_violation_paramlen( | ||
1016 | const struct sctp_association *asoc, | ||
1017 | const struct sctp_chunk *chunk, | ||
1018 | struct sctp_paramhdr *param) | ||
1019 | { | ||
1020 | struct sctp_chunk *retval; | ||
1021 | static const char error[] = "The following parameter had invalid length:"; | ||
1022 | size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + | ||
1023 | sizeof(sctp_paramhdr_t); | ||
1024 | |||
1025 | retval = sctp_make_abort(asoc, chunk, payload_len); | ||
1026 | if (!retval) | ||
1027 | goto nodata; | ||
1028 | |||
1029 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, | ||
1030 | sizeof(error) + sizeof(sctp_paramhdr_t)); | ||
1031 | sctp_addto_chunk(retval, sizeof(error), error); | ||
1032 | sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); | ||
1033 | |||
1034 | nodata: | ||
1035 | return retval; | ||
1036 | } | ||
1037 | |||
1015 | /* Make a HEARTBEAT chunk. */ | 1038 | /* Make a HEARTBEAT chunk. */ |
1016 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, | 1039 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, |
1017 | const struct sctp_transport *transport, | 1040 | const struct sctp_transport *transport, |
@@ -1782,11 +1805,6 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | |||
1782 | const struct sctp_chunk *chunk, | 1805 | const struct sctp_chunk *chunk, |
1783 | struct sctp_chunk **errp) | 1806 | struct sctp_chunk **errp) |
1784 | { | 1807 | { |
1785 | static const char error[] = "The following parameter had invalid length:"; | ||
1786 | size_t payload_len = WORD_ROUND(sizeof(error)) + | ||
1787 | sizeof(sctp_paramhdr_t); | ||
1788 | |||
1789 | |||
1790 | /* This is a fatal error. Any accumulated non-fatal errors are | 1808 | /* This is a fatal error. Any accumulated non-fatal errors are |
1791 | * not reported. | 1809 | * not reported. |
1792 | */ | 1810 | */ |
@@ -1794,14 +1812,7 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | |||
1794 | sctp_chunk_free(*errp); | 1812 | sctp_chunk_free(*errp); |
1795 | 1813 | ||
1796 | /* Create an error chunk and fill it in with our payload. */ | 1814 | /* Create an error chunk and fill it in with our payload. */ |
1797 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); | 1815 | *errp = sctp_make_violation_paramlen(asoc, chunk, param); |
1798 | |||
1799 | if (*errp) { | ||
1800 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, | ||
1801 | sizeof(error) + sizeof(sctp_paramhdr_t)); | ||
1802 | sctp_addto_chunk(*errp, sizeof(error), error); | ||
1803 | sctp_addto_param(*errp, sizeof(sctp_paramhdr_t), param); | ||
1804 | } | ||
1805 | 1816 | ||
1806 | return 0; | 1817 | return 0; |
1807 | } | 1818 | } |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8848d329aa2c..7c622af2ce55 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -119,7 +119,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( | |||
119 | const struct sctp_endpoint *ep, | 119 | const struct sctp_endpoint *ep, |
120 | const struct sctp_association *asoc, | 120 | const struct sctp_association *asoc, |
121 | const sctp_subtype_t type, | 121 | const sctp_subtype_t type, |
122 | void *arg, | 122 | void *arg, void *ext, |
123 | sctp_cmd_seq_t *commands); | 123 | sctp_cmd_seq_t *commands); |
124 | 124 | ||
125 | static sctp_disposition_t sctp_sf_violation_ctsn( | 125 | static sctp_disposition_t sctp_sf_violation_ctsn( |
@@ -3425,7 +3425,7 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3425 | addr_param = (union sctp_addr_param *)hdr->params; | 3425 | addr_param = (union sctp_addr_param *)hdr->params; |
3426 | length = ntohs(addr_param->p.length); | 3426 | length = ntohs(addr_param->p.length); |
3427 | if (length < sizeof(sctp_paramhdr_t)) | 3427 | if (length < sizeof(sctp_paramhdr_t)) |
3428 | return sctp_sf_violation_paramlen(ep, asoc, type, | 3428 | return sctp_sf_violation_paramlen(ep, asoc, type, arg, |
3429 | (void *)addr_param, commands); | 3429 | (void *)addr_param, commands); |
3430 | 3430 | ||
3431 | /* Verify the ASCONF chunk before processing it. */ | 3431 | /* Verify the ASCONF chunk before processing it. */ |
@@ -3433,8 +3433,8 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | |||
3433 | (sctp_paramhdr_t *)((void *)addr_param + length), | 3433 | (sctp_paramhdr_t *)((void *)addr_param + length), |
3434 | (void *)chunk->chunk_end, | 3434 | (void *)chunk->chunk_end, |
3435 | &err_param)) | 3435 | &err_param)) |
3436 | return sctp_sf_violation_paramlen(ep, asoc, type, | 3436 | return sctp_sf_violation_paramlen(ep, asoc, type, arg, |
3437 | (void *)&err_param, commands); | 3437 | (void *)err_param, commands); |
3438 | 3438 | ||
3439 | /* ADDIP 5.2 E1) Compare the value of the serial number to the value | 3439 | /* ADDIP 5.2 E1) Compare the value of the serial number to the value |
3440 | * the endpoint stored in a new association variable | 3440 | * the endpoint stored in a new association variable |
@@ -3542,8 +3542,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3542 | (sctp_paramhdr_t *)addip_hdr->params, | 3542 | (sctp_paramhdr_t *)addip_hdr->params, |
3543 | (void *)asconf_ack->chunk_end, | 3543 | (void *)asconf_ack->chunk_end, |
3544 | &err_param)) | 3544 | &err_param)) |
3545 | return sctp_sf_violation_paramlen(ep, asoc, type, | 3545 | return sctp_sf_violation_paramlen(ep, asoc, type, arg, |
3546 | (void *)&err_param, commands); | 3546 | (void *)err_param, commands); |
3547 | 3547 | ||
3548 | if (last_asconf) { | 3548 | if (last_asconf) { |
3549 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; | 3549 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; |
@@ -4240,12 +4240,38 @@ static sctp_disposition_t sctp_sf_violation_paramlen( | |||
4240 | const struct sctp_endpoint *ep, | 4240 | const struct sctp_endpoint *ep, |
4241 | const struct sctp_association *asoc, | 4241 | const struct sctp_association *asoc, |
4242 | const sctp_subtype_t type, | 4242 | const sctp_subtype_t type, |
4243 | void *arg, | 4243 | void *arg, void *ext, |
4244 | sctp_cmd_seq_t *commands) { | 4244 | sctp_cmd_seq_t *commands) |
4245 | static const char err_str[] = "The following parameter had invalid length:"; | 4245 | { |
4246 | struct sctp_chunk *chunk = arg; | ||
4247 | struct sctp_paramhdr *param = ext; | ||
4248 | struct sctp_chunk *abort = NULL; | ||
4246 | 4249 | ||
4247 | return sctp_sf_abort_violation(ep, asoc, arg, commands, err_str, | 4250 | if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) |
4248 | sizeof(err_str)); | 4251 | goto discard; |
4252 | |||
4253 | /* Make the abort chunk. */ | ||
4254 | abort = sctp_make_violation_paramlen(asoc, chunk, param); | ||
4255 | if (!abort) | ||
4256 | goto nomem; | ||
4257 | |||
4258 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | ||
4259 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
4260 | |||
4261 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4262 | SCTP_ERROR(ECONNABORTED)); | ||
4263 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4264 | SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); | ||
4265 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
4266 | |||
4267 | discard: | ||
4268 | sctp_sf_pdiscard(ep, asoc, SCTP_ST_CHUNK(0), arg, commands); | ||
4269 | |||
4270 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4271 | |||
4272 | return SCTP_DISPOSITION_ABORT; | ||
4273 | nomem: | ||
4274 | return SCTP_DISPOSITION_NOMEM; | ||
4249 | } | 4275 | } |
4250 | 4276 | ||
4251 | /* Handle a protocol violation when the peer trying to advance the | 4277 | /* Handle a protocol violation when the peer trying to advance the |
diff --git a/net/socket.c b/net/socket.c index 8ef8ba81b9e2..3e8d4e35c08f 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1511,6 +1511,7 @@ out_fd: | |||
1511 | goto out_put; | 1511 | goto out_put; |
1512 | } | 1512 | } |
1513 | 1513 | ||
1514 | #if 0 | ||
1514 | #ifdef HAVE_SET_RESTORE_SIGMASK | 1515 | #ifdef HAVE_SET_RESTORE_SIGMASK |
1515 | asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, | 1516 | asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, |
1516 | int __user *upeer_addrlen, | 1517 | int __user *upeer_addrlen, |
@@ -1564,6 +1565,7 @@ asmlinkage long sys_paccept(int fd, struct sockaddr __user *upeer_sockaddr, | |||
1564 | return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); | 1565 | return do_accept(fd, upeer_sockaddr, upeer_addrlen, flags); |
1565 | } | 1566 | } |
1566 | #endif | 1567 | #endif |
1568 | #endif | ||
1567 | 1569 | ||
1568 | asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, | 1570 | asmlinkage long sys_accept(int fd, struct sockaddr __user *upeer_sockaddr, |
1569 | int __user *upeer_addrlen) | 1571 | int __user *upeer_addrlen) |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index ac25b4c0e982..dc50f1e71f76 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -27,10 +27,14 @@ static int xfrm_state_check_space(struct xfrm_state *x, struct sk_buff *skb) | |||
27 | - skb_headroom(skb); | 27 | - skb_headroom(skb); |
28 | int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); | 28 | int ntail = dst->dev->needed_tailroom - skb_tailroom(skb); |
29 | 29 | ||
30 | if (nhead > 0 || ntail > 0) | 30 | if (nhead <= 0) { |
31 | return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); | 31 | if (ntail <= 0) |
32 | 32 | return 0; | |
33 | return 0; | 33 | nhead = 0; |
34 | } else if (ntail < 0) | ||
35 | ntail = 0; | ||
36 | |||
37 | return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC); | ||
34 | } | 38 | } |
35 | 39 | ||
36 | static int xfrm_output_one(struct sk_buff *skb, int err) | 40 | static int xfrm_output_one(struct sk_buff *skb, int err) |
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 36b5eedcdc75..3e1057f885c6 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c | |||
@@ -32,6 +32,7 @@ char *defconfig_file; | |||
32 | 32 | ||
33 | static int indent = 1; | 33 | static int indent = 1; |
34 | static int valid_stdin = 1; | 34 | static int valid_stdin = 1; |
35 | static int sync_kconfig; | ||
35 | static int conf_cnt; | 36 | static int conf_cnt; |
36 | static char line[128]; | 37 | static char line[128]; |
37 | static struct menu *rootEntry; | 38 | static struct menu *rootEntry; |
@@ -65,7 +66,7 @@ static void strip(char *str) | |||
65 | 66 | ||
66 | static void check_stdin(void) | 67 | static void check_stdin(void) |
67 | { | 68 | { |
68 | if (!valid_stdin && input_mode == ask_silent) { | 69 | if (!valid_stdin) { |
69 | printf(_("aborted!\n\n")); | 70 | printf(_("aborted!\n\n")); |
70 | printf(_("Console input/output is redirected. ")); | 71 | printf(_("Console input/output is redirected. ")); |
71 | printf(_("Run 'make oldconfig' to update configuration.\n\n")); | 72 | printf(_("Run 'make oldconfig' to update configuration.\n\n")); |
@@ -427,43 +428,6 @@ static void check_conf(struct menu *menu) | |||
427 | check_conf(child); | 428 | check_conf(child); |
428 | } | 429 | } |
429 | 430 | ||
430 | static void conf_do_update(void) | ||
431 | { | ||
432 | /* Update until a loop caused no more changes */ | ||
433 | do { | ||
434 | conf_cnt = 0; | ||
435 | check_conf(&rootmenu); | ||
436 | } while (conf_cnt); | ||
437 | } | ||
438 | |||
439 | static int conf_silent_update(void) | ||
440 | { | ||
441 | const char *name; | ||
442 | |||
443 | if (conf_get_changed()) { | ||
444 | name = getenv("KCONFIG_NOSILENTUPDATE"); | ||
445 | if (name && *name) { | ||
446 | fprintf(stderr, | ||
447 | _("\n*** Kernel configuration requires explicit update.\n\n")); | ||
448 | return 1; | ||
449 | } | ||
450 | conf_do_update(); | ||
451 | } | ||
452 | return 0; | ||
453 | } | ||
454 | |||
455 | static int conf_update(void) | ||
456 | { | ||
457 | rootEntry = &rootmenu; | ||
458 | conf(&rootmenu); | ||
459 | if (input_mode == ask_all) { | ||
460 | input_mode = ask_silent; | ||
461 | valid_stdin = 1; | ||
462 | } | ||
463 | conf_do_update(); | ||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | int main(int ac, char **av) | 431 | int main(int ac, char **av) |
468 | { | 432 | { |
469 | int opt; | 433 | int opt; |
@@ -477,11 +441,11 @@ int main(int ac, char **av) | |||
477 | while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) { | 441 | while ((opt = getopt(ac, av, "osdD:nmyrh")) != -1) { |
478 | switch (opt) { | 442 | switch (opt) { |
479 | case 'o': | 443 | case 'o': |
480 | input_mode = ask_new; | 444 | input_mode = ask_silent; |
481 | break; | 445 | break; |
482 | case 's': | 446 | case 's': |
483 | input_mode = ask_silent; | 447 | input_mode = ask_silent; |
484 | valid_stdin = isatty(0) && isatty(1) && isatty(2); | 448 | sync_kconfig = 1; |
485 | break; | 449 | break; |
486 | case 'd': | 450 | case 'd': |
487 | input_mode = set_default; | 451 | input_mode = set_default; |
@@ -519,6 +483,19 @@ int main(int ac, char **av) | |||
519 | name = av[optind]; | 483 | name = av[optind]; |
520 | conf_parse(name); | 484 | conf_parse(name); |
521 | //zconfdump(stdout); | 485 | //zconfdump(stdout); |
486 | if (sync_kconfig) { | ||
487 | if (stat(".config", &tmpstat)) { | ||
488 | fprintf(stderr, _("***\n" | ||
489 | "*** You have not yet configured your kernel!\n" | ||
490 | "*** (missing kernel .config file)\n" | ||
491 | "***\n" | ||
492 | "*** Please run some configurator (e.g. \"make oldconfig\" or\n" | ||
493 | "*** \"make menuconfig\" or \"make xconfig\").\n" | ||
494 | "***\n")); | ||
495 | exit(1); | ||
496 | } | ||
497 | } | ||
498 | |||
522 | switch (input_mode) { | 499 | switch (input_mode) { |
523 | case set_default: | 500 | case set_default: |
524 | if (!defconfig_file) | 501 | if (!defconfig_file) |
@@ -531,16 +508,6 @@ int main(int ac, char **av) | |||
531 | } | 508 | } |
532 | break; | 509 | break; |
533 | case ask_silent: | 510 | case ask_silent: |
534 | if (stat(".config", &tmpstat)) { | ||
535 | printf(_("***\n" | ||
536 | "*** You have not yet configured your kernel!\n" | ||
537 | "*** (missing kernel .config file)\n" | ||
538 | "***\n" | ||
539 | "*** Please run some configurator (e.g. \"make oldconfig\" or\n" | ||
540 | "*** \"make menuconfig\" or \"make xconfig\").\n" | ||
541 | "***\n")); | ||
542 | exit(1); | ||
543 | } | ||
544 | case ask_all: | 511 | case ask_all: |
545 | case ask_new: | 512 | case ask_new: |
546 | conf_read(NULL); | 513 | conf_read(NULL); |
@@ -569,6 +536,19 @@ int main(int ac, char **av) | |||
569 | default: | 536 | default: |
570 | break; | 537 | break; |
571 | } | 538 | } |
539 | |||
540 | if (sync_kconfig) { | ||
541 | if (conf_get_changed()) { | ||
542 | name = getenv("KCONFIG_NOSILENTUPDATE"); | ||
543 | if (name && *name) { | ||
544 | fprintf(stderr, | ||
545 | _("\n*** Kernel configuration requires explicit update.\n\n")); | ||
546 | return 1; | ||
547 | } | ||
548 | } | ||
549 | valid_stdin = isatty(0) && isatty(1) && isatty(2); | ||
550 | } | ||
551 | |||
572 | switch (input_mode) { | 552 | switch (input_mode) { |
573 | case set_no: | 553 | case set_no: |
574 | conf_set_all_new_symbols(def_no); | 554 | conf_set_all_new_symbols(def_no); |
@@ -585,27 +565,38 @@ int main(int ac, char **av) | |||
585 | case set_default: | 565 | case set_default: |
586 | conf_set_all_new_symbols(def_default); | 566 | conf_set_all_new_symbols(def_default); |
587 | break; | 567 | break; |
588 | case ask_silent: | ||
589 | case ask_new: | 568 | case ask_new: |
590 | if (conf_silent_update()) | ||
591 | exit(1); | ||
592 | break; | ||
593 | case ask_all: | 569 | case ask_all: |
594 | if (conf_update()) | 570 | rootEntry = &rootmenu; |
595 | exit(1); | 571 | conf(&rootmenu); |
572 | input_mode = ask_silent; | ||
573 | /* fall through */ | ||
574 | case ask_silent: | ||
575 | /* Update until a loop caused no more changes */ | ||
576 | do { | ||
577 | conf_cnt = 0; | ||
578 | check_conf(&rootmenu); | ||
579 | } while (conf_cnt); | ||
596 | break; | 580 | break; |
597 | } | 581 | } |
598 | 582 | ||
599 | if (conf_write(NULL)) { | 583 | if (sync_kconfig) { |
600 | fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); | 584 | /* silentoldconfig is used during the build so we shall update autoconf. |
601 | exit(1); | 585 | * All other commands are only used to generate a config. |
602 | } | 586 | */ |
603 | /* ask_silent is used during the build so we shall update autoconf. | 587 | if (conf_get_changed() && conf_write(NULL)) { |
604 | * All other commands are only used to generate a config. | 588 | fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); |
605 | */ | 589 | exit(1); |
606 | if (input_mode == ask_silent && conf_write_autoconf()) { | 590 | } |
607 | fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); | 591 | if (conf_write_autoconf()) { |
608 | return 1; | 592 | fprintf(stderr, _("\n*** Error during update of the kernel configuration.\n\n")); |
593 | return 1; | ||
594 | } | ||
595 | } else { | ||
596 | if (conf_write(NULL)) { | ||
597 | fprintf(stderr, _("\n*** Error during writing of the kernel configuration.\n\n")); | ||
598 | exit(1); | ||
599 | } | ||
609 | } | 600 | } |
610 | return 0; | 601 | return 0; |
611 | } | 602 | } |
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index df6a188b9930..b91cf241a539 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c | |||
@@ -222,8 +222,10 @@ load: | |||
222 | continue; | 222 | continue; |
223 | if (def == S_DEF_USER) { | 223 | if (def == S_DEF_USER) { |
224 | sym = sym_find(line + 9); | 224 | sym = sym_find(line + 9); |
225 | if (!sym) | 225 | if (!sym) { |
226 | sym_add_change_count(1); | ||
226 | break; | 227 | break; |
228 | } | ||
227 | } else { | 229 | } else { |
228 | sym = sym_lookup(line + 9, 0); | 230 | sym = sym_lookup(line + 9, 0); |
229 | if (sym->type == S_UNKNOWN) | 231 | if (sym->type == S_UNKNOWN) |
@@ -259,8 +261,10 @@ load: | |||
259 | } | 261 | } |
260 | if (def == S_DEF_USER) { | 262 | if (def == S_DEF_USER) { |
261 | sym = sym_find(line + 7); | 263 | sym = sym_find(line + 7); |
262 | if (!sym) | 264 | if (!sym) { |
265 | sym_add_change_count(1); | ||
263 | break; | 266 | break; |
267 | } | ||
264 | } else { | 268 | } else { |
265 | sym = sym_lookup(line + 7, 0); | 269 | sym = sym_lookup(line + 7, 0); |
266 | if (sym->type == S_UNKNOWN) | 270 | if (sym->type == S_UNKNOWN) |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index ff787e6ff8ed..44ee94d2ab76 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
@@ -781,6 +781,7 @@ sub output_struct_xml(%) { | |||
781 | print " <refsect1>\n"; | 781 | print " <refsect1>\n"; |
782 | print " <title>Members</title>\n"; | 782 | print " <title>Members</title>\n"; |
783 | 783 | ||
784 | if ($#{$args{'parameterlist'}} >= 0) { | ||
784 | print " <variablelist>\n"; | 785 | print " <variablelist>\n"; |
785 | foreach $parameter (@{$args{'parameterlist'}}) { | 786 | foreach $parameter (@{$args{'parameterlist'}}) { |
786 | ($parameter =~ /^#/) && next; | 787 | ($parameter =~ /^#/) && next; |
@@ -798,6 +799,9 @@ sub output_struct_xml(%) { | |||
798 | print " </varlistentry>\n"; | 799 | print " </varlistentry>\n"; |
799 | } | 800 | } |
800 | print " </variablelist>\n"; | 801 | print " </variablelist>\n"; |
802 | } else { | ||
803 | print " <para>\n None\n </para>\n"; | ||
804 | } | ||
801 | print " </refsect1>\n"; | 805 | print " </refsect1>\n"; |
802 | 806 | ||
803 | output_section_xml(@_); | 807 | output_section_xml(@_); |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index d11a8154500f..8551952ef329 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -2737,6 +2737,7 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, | |||
2737 | if (ctx == NULL) | 2737 | if (ctx == NULL) |
2738 | goto netlbl_secattr_to_sid_return; | 2738 | goto netlbl_secattr_to_sid_return; |
2739 | 2739 | ||
2740 | context_init(&ctx_new); | ||
2740 | ctx_new.user = ctx->user; | 2741 | ctx_new.user = ctx->user; |
2741 | ctx_new.role = ctx->role; | 2742 | ctx_new.role = ctx->role; |
2742 | ctx_new.type = ctx->type; | 2743 | ctx_new.type = ctx->type; |
@@ -2745,13 +2746,9 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, | |||
2745 | if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, | 2746 | if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, |
2746 | secattr->attr.mls.cat) != 0) | 2747 | secattr->attr.mls.cat) != 0) |
2747 | goto netlbl_secattr_to_sid_return; | 2748 | goto netlbl_secattr_to_sid_return; |
2748 | ctx_new.range.level[1].cat.highbit = | 2749 | memcpy(&ctx_new.range.level[1].cat, |
2749 | ctx_new.range.level[0].cat.highbit; | 2750 | &ctx_new.range.level[0].cat, |
2750 | ctx_new.range.level[1].cat.node = | 2751 | sizeof(ctx_new.range.level[0].cat)); |
2751 | ctx_new.range.level[0].cat.node; | ||
2752 | } else { | ||
2753 | ebitmap_init(&ctx_new.range.level[0].cat); | ||
2754 | ebitmap_init(&ctx_new.range.level[1].cat); | ||
2755 | } | 2752 | } |
2756 | if (mls_context_isvalid(&policydb, &ctx_new) != 1) | 2753 | if (mls_context_isvalid(&policydb, &ctx_new) != 1) |
2757 | goto netlbl_secattr_to_sid_return_cleanup; | 2754 | goto netlbl_secattr_to_sid_return_cleanup; |
diff --git a/sound/core/pcm.c b/sound/core/pcm.c index 9dd9bc73fe1d..ece25c718e95 100644 --- a/sound/core/pcm.c +++ b/sound/core/pcm.c | |||
@@ -781,7 +781,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, | |||
781 | return -ENODEV; | 781 | return -ENODEV; |
782 | 782 | ||
783 | card = pcm->card; | 783 | card = pcm->card; |
784 | down_read(&card->controls_rwsem); | 784 | read_lock(&card->ctl_files_rwlock); |
785 | list_for_each_entry(kctl, &card->ctl_files, list) { | 785 | list_for_each_entry(kctl, &card->ctl_files, list) { |
786 | if (kctl->pid == current->pid) { | 786 | if (kctl->pid == current->pid) { |
787 | prefer_subdevice = kctl->prefer_pcm_subdevice; | 787 | prefer_subdevice = kctl->prefer_pcm_subdevice; |
@@ -789,7 +789,7 @@ int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, | |||
789 | break; | 789 | break; |
790 | } | 790 | } |
791 | } | 791 | } |
792 | up_read(&card->controls_rwsem); | 792 | read_unlock(&card->ctl_files_rwlock); |
793 | 793 | ||
794 | switch (stream) { | 794 | switch (stream) { |
795 | case SNDRV_PCM_STREAM_PLAYBACK: | 795 | case SNDRV_PCM_STREAM_PLAYBACK: |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index c49b9d9e303c..c487025d3457 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -1546,16 +1546,10 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream) | |||
1546 | card = substream->pcm->card; | 1546 | card = substream->pcm->card; |
1547 | 1547 | ||
1548 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN || | 1548 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN || |
1549 | runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED) | 1549 | runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || |
1550 | runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) | ||
1550 | return -EBADFD; | 1551 | return -EBADFD; |
1551 | 1552 | ||
1552 | snd_power_lock(card); | ||
1553 | if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { | ||
1554 | result = snd_power_wait(card, SNDRV_CTL_POWER_D0); | ||
1555 | if (result < 0) | ||
1556 | goto _unlock; | ||
1557 | } | ||
1558 | |||
1559 | snd_pcm_stream_lock_irq(substream); | 1553 | snd_pcm_stream_lock_irq(substream); |
1560 | /* resume pause */ | 1554 | /* resume pause */ |
1561 | if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) | 1555 | if (runtime->status->state == SNDRV_PCM_STATE_PAUSED) |
@@ -1564,8 +1558,7 @@ static int snd_pcm_drop(struct snd_pcm_substream *substream) | |||
1564 | snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); | 1558 | snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP); |
1565 | /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ | 1559 | /* runtime->control->appl_ptr = runtime->status->hw_ptr; */ |
1566 | snd_pcm_stream_unlock_irq(substream); | 1560 | snd_pcm_stream_unlock_irq(substream); |
1567 | _unlock: | 1561 | |
1568 | snd_power_unlock(card); | ||
1569 | return result; | 1562 | return result; |
1570 | } | 1563 | } |
1571 | 1564 | ||
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index f7ea7287c59c..b917a9f981c7 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c | |||
@@ -418,7 +418,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) | |||
418 | mutex_lock(&rmidi->open_mutex); | 418 | mutex_lock(&rmidi->open_mutex); |
419 | while (1) { | 419 | while (1) { |
420 | subdevice = -1; | 420 | subdevice = -1; |
421 | down_read(&card->controls_rwsem); | 421 | read_lock(&card->ctl_files_rwlock); |
422 | list_for_each_entry(kctl, &card->ctl_files, list) { | 422 | list_for_each_entry(kctl, &card->ctl_files, list) { |
423 | if (kctl->pid == current->pid) { | 423 | if (kctl->pid == current->pid) { |
424 | subdevice = kctl->prefer_rawmidi_subdevice; | 424 | subdevice = kctl->prefer_rawmidi_subdevice; |
@@ -426,7 +426,7 @@ static int snd_rawmidi_open(struct inode *inode, struct file *file) | |||
426 | break; | 426 | break; |
427 | } | 427 | } |
428 | } | 428 | } |
429 | up_read(&card->controls_rwsem); | 429 | read_unlock(&card->ctl_files_rwlock); |
430 | err = snd_rawmidi_kernel_open(rmidi->card, rmidi->device, | 430 | err = snd_rawmidi_kernel_open(rmidi->card, rmidi->device, |
431 | subdevice, fflags, rawmidi_file); | 431 | subdevice, fflags, rawmidi_file); |
432 | if (err >= 0) | 432 | if (err >= 0) |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index ad994fcab725..f3da621f25c5 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -1683,8 +1683,8 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { | |||
1683 | /* Dell 3 stack systems with verb table in BIOS */ | 1683 | /* Dell 3 stack systems with verb table in BIOS */ |
1684 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), | 1684 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), |
1685 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), | 1685 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), |
1686 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell ", STAC_DELL_BIOS), | ||
1687 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), | 1686 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), |
1687 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_3ST), | ||
1688 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), | 1688 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0242, "Dell ", STAC_DELL_BIOS), |
1689 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), | 1689 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0243, "Dell ", STAC_DELL_BIOS), |
1690 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), | 1690 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ff, "Dell ", STAC_DELL_BIOS), |
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c index 566a6d0daf4a..106c48225bba 100644 --- a/sound/ppc/awacs.c +++ b/sound/ppc/awacs.c | |||
@@ -621,6 +621,13 @@ static struct snd_kcontrol_new snd_pmac_screamer_mixers_imac[] __initdata = { | |||
621 | AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), | 621 | AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), |
622 | }; | 622 | }; |
623 | 623 | ||
624 | static struct snd_kcontrol_new snd_pmac_screamer_mixers_g4agp[] __initdata = { | ||
625 | AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), | ||
626 | AWACS_VOLUME("Master Playback Volume", 5, 6, 1), | ||
627 | AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), | ||
628 | AWACS_SWITCH("Line Capture Switch", 0, SHIFT_MUX_MIC, 0), | ||
629 | }; | ||
630 | |||
624 | static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = { | 631 | static struct snd_kcontrol_new snd_pmac_awacs_mixers_pmac7500[] __initdata = { |
625 | AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), | 632 | AWACS_VOLUME("Line out Playback Volume", 2, 6, 1), |
626 | AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), | 633 | AWACS_SWITCH("CD Capture Switch", 0, SHIFT_MUX_CD, 0), |
@@ -688,7 +695,10 @@ static struct snd_kcontrol_new snd_pmac_awacs_speaker_vol[] __initdata = { | |||
688 | static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata = | 695 | static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw __initdata = |
689 | AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1); | 696 | AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_SPKMUTE, 1); |
690 | 697 | ||
691 | static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac __initdata = | 698 | static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac1 __initdata = |
699 | AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 1); | ||
700 | |||
701 | static struct snd_kcontrol_new snd_pmac_awacs_speaker_sw_imac2 __initdata = | ||
692 | AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0); | 702 | AWACS_SWITCH("PC Speaker Playback Switch", 1, SHIFT_PAROUT1, 0); |
693 | 703 | ||
694 | 704 | ||
@@ -765,11 +775,12 @@ static void snd_pmac_awacs_resume(struct snd_pmac *chip) | |||
765 | 775 | ||
766 | #define IS_PM7500 (machine_is_compatible("AAPL,7500")) | 776 | #define IS_PM7500 (machine_is_compatible("AAPL,7500")) |
767 | #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer")) | 777 | #define IS_BEIGE (machine_is_compatible("AAPL,Gossamer")) |
768 | #define IS_IMAC (machine_is_compatible("PowerMac2,1") \ | 778 | #define IS_IMAC1 (machine_is_compatible("PowerMac2,1")) |
769 | || machine_is_compatible("PowerMac2,2") \ | 779 | #define IS_IMAC2 (machine_is_compatible("PowerMac2,2") \ |
770 | || machine_is_compatible("PowerMac4,1")) | 780 | || machine_is_compatible("PowerMac4,1")) |
781 | #define IS_G4AGP (machine_is_compatible("PowerMac3,1")) | ||
771 | 782 | ||
772 | static int imac; | 783 | static int imac1, imac2; |
773 | 784 | ||
774 | #ifdef PMAC_SUPPORT_AUTOMUTE | 785 | #ifdef PMAC_SUPPORT_AUTOMUTE |
775 | /* | 786 | /* |
@@ -815,13 +826,18 @@ static void snd_pmac_awacs_update_automute(struct snd_pmac *chip, int do_notify) | |||
815 | { | 826 | { |
816 | int reg = chip->awacs_reg[1] | 827 | int reg = chip->awacs_reg[1] |
817 | | (MASK_HDMUTE | MASK_SPKMUTE); | 828 | | (MASK_HDMUTE | MASK_SPKMUTE); |
818 | if (imac) { | 829 | if (imac1) { |
830 | reg &= ~MASK_SPKMUTE; | ||
831 | reg |= MASK_PAROUT1; | ||
832 | } else if (imac2) { | ||
819 | reg &= ~MASK_SPKMUTE; | 833 | reg &= ~MASK_SPKMUTE; |
820 | reg &= ~MASK_PAROUT1; | 834 | reg &= ~MASK_PAROUT1; |
821 | } | 835 | } |
822 | if (snd_pmac_awacs_detect_headphone(chip)) | 836 | if (snd_pmac_awacs_detect_headphone(chip)) |
823 | reg &= ~MASK_HDMUTE; | 837 | reg &= ~MASK_HDMUTE; |
824 | else if (imac) | 838 | else if (imac1) |
839 | reg &= ~MASK_PAROUT1; | ||
840 | else if (imac2) | ||
825 | reg |= MASK_PAROUT1; | 841 | reg |= MASK_PAROUT1; |
826 | else | 842 | else |
827 | reg &= ~MASK_SPKMUTE; | 843 | reg &= ~MASK_SPKMUTE; |
@@ -850,9 +866,13 @@ snd_pmac_awacs_init(struct snd_pmac *chip) | |||
850 | { | 866 | { |
851 | int pm7500 = IS_PM7500; | 867 | int pm7500 = IS_PM7500; |
852 | int beige = IS_BEIGE; | 868 | int beige = IS_BEIGE; |
869 | int g4agp = IS_G4AGP; | ||
870 | int imac; | ||
853 | int err, vol; | 871 | int err, vol; |
854 | 872 | ||
855 | imac = IS_IMAC; | 873 | imac1 = IS_IMAC1; |
874 | imac2 = IS_IMAC2; | ||
875 | imac = imac1 || imac2; | ||
856 | /* looks like MASK_GAINLINE triggers something, so we set here | 876 | /* looks like MASK_GAINLINE triggers something, so we set here |
857 | * as start-up | 877 | * as start-up |
858 | */ | 878 | */ |
@@ -939,7 +959,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip) | |||
939 | snd_pmac_awacs_mixers); | 959 | snd_pmac_awacs_mixers); |
940 | if (err < 0) | 960 | if (err < 0) |
941 | return err; | 961 | return err; |
942 | if (beige) | 962 | if (beige || g4agp) |
943 | ; | 963 | ; |
944 | else if (chip->model == PMAC_SCREAMER) | 964 | else if (chip->model == PMAC_SCREAMER) |
945 | err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers2), | 965 | err = build_mixers(chip, ARRAY_SIZE(snd_pmac_screamer_mixers2), |
@@ -961,13 +981,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip) | |||
961 | err = build_mixers(chip, | 981 | err = build_mixers(chip, |
962 | ARRAY_SIZE(snd_pmac_screamer_mixers_imac), | 982 | ARRAY_SIZE(snd_pmac_screamer_mixers_imac), |
963 | snd_pmac_screamer_mixers_imac); | 983 | snd_pmac_screamer_mixers_imac); |
984 | else if (g4agp) | ||
985 | err = build_mixers(chip, | ||
986 | ARRAY_SIZE(snd_pmac_screamer_mixers_g4agp), | ||
987 | snd_pmac_screamer_mixers_g4agp); | ||
964 | else | 988 | else |
965 | err = build_mixers(chip, | 989 | err = build_mixers(chip, |
966 | ARRAY_SIZE(snd_pmac_awacs_mixers_pmac), | 990 | ARRAY_SIZE(snd_pmac_awacs_mixers_pmac), |
967 | snd_pmac_awacs_mixers_pmac); | 991 | snd_pmac_awacs_mixers_pmac); |
968 | if (err < 0) | 992 | if (err < 0) |
969 | return err; | 993 | return err; |
970 | chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac) | 994 | chip->master_sw_ctl = snd_ctl_new1((pm7500 || imac || g4agp) |
971 | ? &snd_pmac_awacs_master_sw_imac | 995 | ? &snd_pmac_awacs_master_sw_imac |
972 | : &snd_pmac_awacs_master_sw, chip); | 996 | : &snd_pmac_awacs_master_sw, chip); |
973 | err = snd_ctl_add(chip->card, chip->master_sw_ctl); | 997 | err = snd_ctl_add(chip->card, chip->master_sw_ctl); |
@@ -1004,15 +1028,17 @@ snd_pmac_awacs_init(struct snd_pmac *chip) | |||
1004 | snd_pmac_awacs_speaker_vol); | 1028 | snd_pmac_awacs_speaker_vol); |
1005 | if (err < 0) | 1029 | if (err < 0) |
1006 | return err; | 1030 | return err; |
1007 | chip->speaker_sw_ctl = snd_ctl_new1(imac | 1031 | chip->speaker_sw_ctl = snd_ctl_new1(imac1 |
1008 | ? &snd_pmac_awacs_speaker_sw_imac | 1032 | ? &snd_pmac_awacs_speaker_sw_imac1 |
1033 | : imac2 | ||
1034 | ? &snd_pmac_awacs_speaker_sw_imac2 | ||
1009 | : &snd_pmac_awacs_speaker_sw, chip); | 1035 | : &snd_pmac_awacs_speaker_sw, chip); |
1010 | err = snd_ctl_add(chip->card, chip->speaker_sw_ctl); | 1036 | err = snd_ctl_add(chip->card, chip->speaker_sw_ctl); |
1011 | if (err < 0) | 1037 | if (err < 0) |
1012 | return err; | 1038 | return err; |
1013 | } | 1039 | } |
1014 | 1040 | ||
1015 | if (beige) | 1041 | if (beige || g4agp) |
1016 | err = build_mixers(chip, | 1042 | err = build_mixers(chip, |
1017 | ARRAY_SIZE(snd_pmac_screamer_mic_boost_beige), | 1043 | ARRAY_SIZE(snd_pmac_screamer_mic_boost_beige), |
1018 | snd_pmac_screamer_mic_boost_beige); | 1044 | snd_pmac_screamer_mic_boost_beige); |
diff --git a/sound/soc/at32/at32-pcm.c b/sound/soc/at32/at32-pcm.c index 435f1daf177c..c83584f989a9 100644 --- a/sound/soc/at32/at32-pcm.c +++ b/sound/soc/at32/at32-pcm.c | |||
@@ -434,7 +434,8 @@ static int at32_pcm_suspend(struct platform_device *pdev, | |||
434 | params = prtd->params; | 434 | params = prtd->params; |
435 | 435 | ||
436 | /* Disable the PDC and save the PDC registers */ | 436 | /* Disable the PDC and save the PDC registers */ |
437 | ssc_writex(params->ssc->regs, PDC_PTCR, params->mask->pdc_disable); | 437 | ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR, |
438 | params->mask->pdc_disable); | ||
438 | 439 | ||
439 | prtd->pdc_xpr_save = ssc_readx(params->ssc->regs, params->pdc->xpr); | 440 | prtd->pdc_xpr_save = ssc_readx(params->ssc->regs, params->pdc->xpr); |
440 | prtd->pdc_xcr_save = ssc_readx(params->ssc->regs, params->pdc->xcr); | 441 | prtd->pdc_xcr_save = ssc_readx(params->ssc->regs, params->pdc->xcr); |
@@ -464,7 +465,7 @@ static int at32_pcm_resume(struct platform_device *pdev, | |||
464 | ssc_writex(params->ssc->regs, params->pdc->xnpr, prtd->pdc_xnpr_save); | 465 | ssc_writex(params->ssc->regs, params->pdc->xnpr, prtd->pdc_xnpr_save); |
465 | ssc_writex(params->ssc->regs, params->pdc->xncr, prtd->pdc_xncr_save); | 466 | ssc_writex(params->ssc->regs, params->pdc->xncr, prtd->pdc_xncr_save); |
466 | 467 | ||
467 | ssc_writex(params->ssc->regs, PDC_PTCR, params->mask->pdc_enable); | 468 | ssc_writex(params->ssc->regs, ATMEL_PDC_PTCR, params->mask->pdc_enable); |
468 | return 0; | 469 | return 0; |
469 | } | 470 | } |
470 | #else /* CONFIG_PM */ | 471 | #else /* CONFIG_PM */ |
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c index 9deb8c74fdfd..0bbd94501d7e 100644 --- a/sound/soc/codecs/cs4270.c +++ b/sound/soc/codecs/cs4270.c | |||
@@ -490,34 +490,7 @@ static int cs4270_mute(struct snd_soc_dai *dai, int mute) | |||
490 | 490 | ||
491 | #endif | 491 | #endif |
492 | 492 | ||
493 | static int cs4270_i2c_probe(struct i2c_adapter *adap, int addr, int kind); | 493 | static int cs4270_i2c_probe(struct i2c_client *, const struct i2c_device_id *); |
494 | |||
495 | /* | ||
496 | * Notify the driver that a new I2C bus has been found. | ||
497 | * | ||
498 | * This function is called for each I2C bus in the system. The function | ||
499 | * then asks the I2C subsystem to probe that bus at the addresses on which | ||
500 | * our device (the CS4270) could exist. If a device is found at one of | ||
501 | * those addresses, then our probe function (cs4270_i2c_probe) is called. | ||
502 | */ | ||
503 | static int cs4270_i2c_attach(struct i2c_adapter *adapter) | ||
504 | { | ||
505 | return i2c_probe(adapter, &addr_data, cs4270_i2c_probe); | ||
506 | } | ||
507 | |||
508 | static int cs4270_i2c_detach(struct i2c_client *client) | ||
509 | { | ||
510 | struct snd_soc_codec *codec = i2c_get_clientdata(client); | ||
511 | |||
512 | i2c_detach_client(client); | ||
513 | codec->control_data = NULL; | ||
514 | |||
515 | kfree(codec->reg_cache); | ||
516 | codec->reg_cache = NULL; | ||
517 | |||
518 | kfree(client); | ||
519 | return 0; | ||
520 | } | ||
521 | 494 | ||
522 | /* A list of non-DAPM controls that the CS4270 supports */ | 495 | /* A list of non-DAPM controls that the CS4270 supports */ |
523 | static const struct snd_kcontrol_new cs4270_snd_controls[] = { | 496 | static const struct snd_kcontrol_new cs4270_snd_controls[] = { |
@@ -525,14 +498,19 @@ static const struct snd_kcontrol_new cs4270_snd_controls[] = { | |||
525 | CS4270_VOLA, CS4270_VOLB, 0, 0xFF, 1) | 498 | CS4270_VOLA, CS4270_VOLB, 0, 0xFF, 1) |
526 | }; | 499 | }; |
527 | 500 | ||
501 | static const struct i2c_device_id cs4270_id[] = { | ||
502 | {"cs4270", 0}, | ||
503 | {} | ||
504 | }; | ||
505 | MODULE_DEVICE_TABLE(i2c, cs4270_id); | ||
506 | |||
528 | static struct i2c_driver cs4270_i2c_driver = { | 507 | static struct i2c_driver cs4270_i2c_driver = { |
529 | .driver = { | 508 | .driver = { |
530 | .name = "CS4270 I2C", | 509 | .name = "CS4270 I2C", |
531 | .owner = THIS_MODULE, | 510 | .owner = THIS_MODULE, |
532 | }, | 511 | }, |
533 | .id = I2C_DRIVERID_CS4270, | 512 | .id_table = cs4270_id, |
534 | .attach_adapter = cs4270_i2c_attach, | 513 | .probe = cs4270_i2c_probe, |
535 | .detach_client = cs4270_i2c_detach, | ||
536 | }; | 514 | }; |
537 | 515 | ||
538 | /* | 516 | /* |
@@ -561,11 +539,11 @@ static struct snd_soc_device *cs4270_socdev; | |||
561 | * Note: snd_soc_new_pcms() must be called before this function can be called, | 539 | * Note: snd_soc_new_pcms() must be called before this function can be called, |
562 | * because of snd_ctl_add(). | 540 | * because of snd_ctl_add(). |
563 | */ | 541 | */ |
564 | static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) | 542 | static int cs4270_i2c_probe(struct i2c_client *i2c_client, |
543 | const struct i2c_device_id *id) | ||
565 | { | 544 | { |
566 | struct snd_soc_device *socdev = cs4270_socdev; | 545 | struct snd_soc_device *socdev = cs4270_socdev; |
567 | struct snd_soc_codec *codec = socdev->codec; | 546 | struct snd_soc_codec *codec = socdev->codec; |
568 | struct i2c_client *i2c_client = NULL; | ||
569 | int i; | 547 | int i; |
570 | int ret = 0; | 548 | int ret = 0; |
571 | 549 | ||
@@ -578,12 +556,6 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) | |||
578 | 556 | ||
579 | /* Note: codec_dai->codec is NULL here */ | 557 | /* Note: codec_dai->codec is NULL here */ |
580 | 558 | ||
581 | i2c_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL); | ||
582 | if (!i2c_client) { | ||
583 | printk(KERN_ERR "cs4270: could not allocate I2C client\n"); | ||
584 | return -ENOMEM; | ||
585 | } | ||
586 | |||
587 | codec->reg_cache = kzalloc(CS4270_NUMREGS, GFP_KERNEL); | 559 | codec->reg_cache = kzalloc(CS4270_NUMREGS, GFP_KERNEL); |
588 | if (!codec->reg_cache) { | 560 | if (!codec->reg_cache) { |
589 | printk(KERN_ERR "cs4270: could not allocate register cache\n"); | 561 | printk(KERN_ERR "cs4270: could not allocate register cache\n"); |
@@ -591,13 +563,6 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) | |||
591 | goto error; | 563 | goto error; |
592 | } | 564 | } |
593 | 565 | ||
594 | i2c_set_clientdata(i2c_client, codec); | ||
595 | strcpy(i2c_client->name, "CS4270"); | ||
596 | |||
597 | i2c_client->driver = &cs4270_i2c_driver; | ||
598 | i2c_client->adapter = adapter; | ||
599 | i2c_client->addr = addr; | ||
600 | |||
601 | /* Verify that we have a CS4270 */ | 566 | /* Verify that we have a CS4270 */ |
602 | 567 | ||
603 | ret = i2c_smbus_read_byte_data(i2c_client, CS4270_CHIPID); | 568 | ret = i2c_smbus_read_byte_data(i2c_client, CS4270_CHIPID); |
@@ -612,18 +577,10 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) | |||
612 | goto error; | 577 | goto error; |
613 | } | 578 | } |
614 | 579 | ||
615 | printk(KERN_INFO "cs4270: found device at I2C address %X\n", addr); | 580 | printk(KERN_INFO "cs4270: found device at I2C address %X\n", |
581 | i2c_client->addr); | ||
616 | printk(KERN_INFO "cs4270: hardware revision %X\n", ret & 0xF); | 582 | printk(KERN_INFO "cs4270: hardware revision %X\n", ret & 0xF); |
617 | 583 | ||
618 | /* Tell the I2C layer a new client has arrived */ | ||
619 | |||
620 | ret = i2c_attach_client(i2c_client); | ||
621 | if (ret) { | ||
622 | printk(KERN_ERR "cs4270: could not attach codec, " | ||
623 | "I2C address %x, error code %i\n", addr, ret); | ||
624 | goto error; | ||
625 | } | ||
626 | |||
627 | codec->control_data = i2c_client; | 584 | codec->control_data = i2c_client; |
628 | codec->read = cs4270_read_reg_cache; | 585 | codec->read = cs4270_read_reg_cache; |
629 | codec->write = cs4270_i2c_write; | 586 | codec->write = cs4270_i2c_write; |
@@ -648,20 +605,17 @@ static int cs4270_i2c_probe(struct i2c_adapter *adapter, int addr, int kind) | |||
648 | goto error; | 605 | goto error; |
649 | } | 606 | } |
650 | 607 | ||
608 | i2c_set_clientdata(i2c_client, codec); | ||
609 | |||
651 | return 0; | 610 | return 0; |
652 | 611 | ||
653 | error: | 612 | error: |
654 | if (codec->control_data) { | 613 | codec->control_data = NULL; |
655 | i2c_detach_client(i2c_client); | ||
656 | codec->control_data = NULL; | ||
657 | } | ||
658 | 614 | ||
659 | kfree(codec->reg_cache); | 615 | kfree(codec->reg_cache); |
660 | codec->reg_cache = NULL; | 616 | codec->reg_cache = NULL; |
661 | codec->reg_cache_size = 0; | 617 | codec->reg_cache_size = 0; |
662 | 618 | ||
663 | kfree(i2c_client); | ||
664 | |||
665 | return ret; | 619 | return ret; |
666 | } | 620 | } |
667 | 621 | ||
@@ -727,7 +681,7 @@ static int cs4270_probe(struct platform_device *pdev) | |||
727 | ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); | 681 | ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1); |
728 | if (ret < 0) { | 682 | if (ret < 0) { |
729 | printk(KERN_ERR "cs4270: failed to create PCMs\n"); | 683 | printk(KERN_ERR "cs4270: failed to create PCMs\n"); |
730 | return ret; | 684 | goto error_free_codec; |
731 | } | 685 | } |
732 | 686 | ||
733 | #ifdef USE_I2C | 687 | #ifdef USE_I2C |
@@ -736,8 +690,7 @@ static int cs4270_probe(struct platform_device *pdev) | |||
736 | ret = i2c_add_driver(&cs4270_i2c_driver); | 690 | ret = i2c_add_driver(&cs4270_i2c_driver); |
737 | if (ret) { | 691 | if (ret) { |
738 | printk(KERN_ERR "cs4270: failed to attach driver"); | 692 | printk(KERN_ERR "cs4270: failed to attach driver"); |
739 | snd_soc_free_pcms(socdev); | 693 | goto error_free_pcms; |
740 | return ret; | ||
741 | } | 694 | } |
742 | 695 | ||
743 | /* Did we find a CS4270 on the I2C bus? */ | 696 | /* Did we find a CS4270 on the I2C bus? */ |
@@ -759,10 +712,23 @@ static int cs4270_probe(struct platform_device *pdev) | |||
759 | ret = snd_soc_register_card(socdev); | 712 | ret = snd_soc_register_card(socdev); |
760 | if (ret < 0) { | 713 | if (ret < 0) { |
761 | printk(KERN_ERR "cs4270: failed to register card\n"); | 714 | printk(KERN_ERR "cs4270: failed to register card\n"); |
762 | snd_soc_free_pcms(socdev); | 715 | goto error_del_driver; |
763 | return ret; | ||
764 | } | 716 | } |
765 | 717 | ||
718 | return 0; | ||
719 | |||
720 | error_del_driver: | ||
721 | #ifdef USE_I2C | ||
722 | i2c_del_driver(&cs4270_i2c_driver); | ||
723 | |||
724 | error_free_pcms: | ||
725 | #endif | ||
726 | snd_soc_free_pcms(socdev); | ||
727 | |||
728 | error_free_codec: | ||
729 | kfree(socdev->codec); | ||
730 | socdev->codec = NULL; | ||
731 | |||
766 | return ret; | 732 | return ret; |
767 | } | 733 | } |
768 | 734 | ||
@@ -773,8 +739,7 @@ static int cs4270_remove(struct platform_device *pdev) | |||
773 | snd_soc_free_pcms(socdev); | 739 | snd_soc_free_pcms(socdev); |
774 | 740 | ||
775 | #ifdef USE_I2C | 741 | #ifdef USE_I2C |
776 | if (socdev->codec->control_data) | 742 | i2c_del_driver(&cs4270_i2c_driver); |
777 | i2c_del_driver(&cs4270_i2c_driver); | ||
778 | #endif | 743 | #endif |
779 | 744 | ||
780 | kfree(socdev->codec); | 745 | kfree(socdev->codec); |
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index 5761164fe16d..e873414840c8 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c | |||
@@ -583,7 +583,7 @@ static const struct snd_soc_dapm_route audio_map[] = { | |||
583 | 583 | ||
584 | /* out 4 */ | 584 | /* out 4 */ |
585 | {"Out4 Mux", "VREF", "VREF"}, | 585 | {"Out4 Mux", "VREF", "VREF"}, |
586 | {"Out4 Mux", "Capture ST", "Capture ST Mixer"}, | 586 | {"Out4 Mux", "Capture ST", "Playback Mixer"}, |
587 | {"Out4 Mux", "LOUT2", "LOUT2"}, | 587 | {"Out4 Mux", "LOUT2", "LOUT2"}, |
588 | {"Out 4", NULL, "Out4 Mux"}, | 588 | {"Out 4", NULL, "Out4 Mux"}, |
589 | {"OUT4", NULL, "Out 4"}, | 589 | {"OUT4", NULL, "Out 4"}, |
@@ -607,7 +607,7 @@ static const struct snd_soc_dapm_route audio_map[] = { | |||
607 | /* Capture Right Mux */ | 607 | /* Capture Right Mux */ |
608 | {"Capture Right Mux", "PGA", "Right Capture Volume"}, | 608 | {"Capture Right Mux", "PGA", "Right Capture Volume"}, |
609 | {"Capture Right Mux", "Line or RXP-RXN", "Line Right Mux"}, | 609 | {"Capture Right Mux", "Line or RXP-RXN", "Line Right Mux"}, |
610 | {"Capture Right Mux", "Sidetone", "Capture ST Mixer"}, | 610 | {"Capture Right Mux", "Sidetone", "Playback Mixer"}, |
611 | 611 | ||
612 | /* Mono Capture mixer-mux */ | 612 | /* Mono Capture mixer-mux */ |
613 | {"Capture Right Mixer", "Stereo", "Capture Right Mux"}, | 613 | {"Capture Right Mixer", "Stereo", "Capture Right Mux"}, |