diff options
37 files changed, 596 insertions, 509 deletions
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 2 | 2 | PATCHLEVEL = 3 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = | 4 | EXTRAVERSION = -rc1 |
5 | NAME = Saber-toothed Squirrel | 5 | NAME = Saber-toothed Squirrel |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index ebafe8aa8956..0c4b76ab4d8e 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -202,7 +202,6 @@ static struct irda_platform_data assabet_irda_data = { | |||
202 | static struct mcp_plat_data assabet_mcp_data = { | 202 | static struct mcp_plat_data assabet_mcp_data = { |
203 | .mccr0 = MCCR0_ADM, | 203 | .mccr0 = MCCR0_ADM, |
204 | .sclk_rate = 11981000, | 204 | .sclk_rate = 11981000, |
205 | .codec = "ucb1x00", | ||
206 | }; | 205 | }; |
207 | 206 | ||
208 | static void __init assabet_init(void) | 207 | static void __init assabet_init(void) |
@@ -253,17 +252,6 @@ static void __init assabet_init(void) | |||
253 | sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources, | 252 | sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources, |
254 | ARRAY_SIZE(assabet_flash_resources)); | 253 | ARRAY_SIZE(assabet_flash_resources)); |
255 | sa11x0_register_irda(&assabet_irda_data); | 254 | sa11x0_register_irda(&assabet_irda_data); |
256 | |||
257 | /* | ||
258 | * Setup the PPC unit correctly. | ||
259 | */ | ||
260 | PPDR &= ~PPC_RXD4; | ||
261 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
262 | PSDR |= PPC_RXD4; | ||
263 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
264 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
265 | |||
266 | ASSABET_BCR_set(ASSABET_BCR_CODEC_RST); | ||
267 | sa11x0_register_mcp(&assabet_mcp_data); | 255 | sa11x0_register_mcp(&assabet_mcp_data); |
268 | } | 256 | } |
269 | 257 | ||
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c index d12d0f48b1dc..11bb6d0b9be3 100644 --- a/arch/arm/mach-sa1100/cerf.c +++ b/arch/arm/mach-sa1100/cerf.c | |||
@@ -124,23 +124,12 @@ static void __init cerf_map_io(void) | |||
124 | static struct mcp_plat_data cerf_mcp_data = { | 124 | static struct mcp_plat_data cerf_mcp_data = { |
125 | .mccr0 = MCCR0_ADM, | 125 | .mccr0 = MCCR0_ADM, |
126 | .sclk_rate = 11981000, | 126 | .sclk_rate = 11981000, |
127 | .codec = "ucb1x00", | ||
128 | }; | 127 | }; |
129 | 128 | ||
130 | static void __init cerf_init(void) | 129 | static void __init cerf_init(void) |
131 | { | 130 | { |
132 | platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); | 131 | platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices)); |
133 | sa11x0_register_mtd(&cerf_flash_data, &cerf_flash_resource, 1); | 132 | sa11x0_register_mtd(&cerf_flash_data, &cerf_flash_resource, 1); |
134 | |||
135 | /* | ||
136 | * Setup the PPC unit correctly. | ||
137 | */ | ||
138 | PPDR &= ~PPC_RXD4; | ||
139 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
140 | PSDR |= PPC_RXD4; | ||
141 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
142 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
143 | |||
144 | sa11x0_register_mcp(&cerf_mcp_data); | 133 | sa11x0_register_mcp(&cerf_mcp_data); |
145 | } | 134 | } |
146 | 135 | ||
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index cce8763d0839..fd5652118ed1 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/timer.h> | 27 | #include <linux/timer.h> |
28 | #include <linux/gpio.h> | 28 | #include <linux/gpio.h> |
29 | #include <linux/pda_power.h> | 29 | #include <linux/pda_power.h> |
30 | #include <linux/mfd/ucb1x00.h> | ||
31 | 30 | ||
32 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
33 | #include <asm/mach-types.h> | 32 | #include <asm/mach-types.h> |
@@ -86,15 +85,10 @@ static struct scoop_pcmcia_config collie_pcmcia_config = { | |||
86 | .num_devs = 1, | 85 | .num_devs = 1, |
87 | }; | 86 | }; |
88 | 87 | ||
89 | static struct ucb1x00_plat_data collie_ucb1x00_data = { | ||
90 | .gpio_base = COLLIE_TC35143_GPIO_BASE, | ||
91 | }; | ||
92 | |||
93 | static struct mcp_plat_data collie_mcp_data = { | 88 | static struct mcp_plat_data collie_mcp_data = { |
94 | .mccr0 = MCCR0_ADM | MCCR0_ExtClk, | 89 | .mccr0 = MCCR0_ADM | MCCR0_ExtClk, |
95 | .sclk_rate = 9216000, | 90 | .sclk_rate = 9216000, |
96 | .codec = "ucb1x00", | 91 | .gpio_base = COLLIE_TC35143_GPIO_BASE, |
97 | .codec_pdata = &collie_ucb1x00_data, | ||
98 | }; | 92 | }; |
99 | 93 | ||
100 | /* | 94 | /* |
@@ -356,16 +350,6 @@ static void __init collie_init(void) | |||
356 | 350 | ||
357 | sa11x0_register_mtd(&collie_flash_data, collie_flash_resources, | 351 | sa11x0_register_mtd(&collie_flash_data, collie_flash_resources, |
358 | ARRAY_SIZE(collie_flash_resources)); | 352 | ARRAY_SIZE(collie_flash_resources)); |
359 | |||
360 | /* | ||
361 | * Setup the PPC unit correctly. | ||
362 | */ | ||
363 | PPDR &= ~PPC_RXD4; | ||
364 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
365 | PSDR |= PPC_RXD4; | ||
366 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
367 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
368 | |||
369 | sa11x0_register_mcp(&collie_mcp_data); | 353 | sa11x0_register_mcp(&collie_mcp_data); |
370 | 354 | ||
371 | sharpsl_save_param(); | 355 | sharpsl_save_param(); |
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index a7c0df6e670c..bb10ee2cb89f 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c | |||
@@ -217,15 +217,10 @@ static struct platform_device sa11x0uart3_device = { | |||
217 | static struct resource sa11x0mcp_resources[] = { | 217 | static struct resource sa11x0mcp_resources[] = { |
218 | [0] = { | 218 | [0] = { |
219 | .start = __PREG(Ser4MCCR0), | 219 | .start = __PREG(Ser4MCCR0), |
220 | .end = __PREG(Ser4MCCR0) + 0x1C - 1, | 220 | .end = __PREG(Ser4MCCR0) + 0xffff, |
221 | .flags = IORESOURCE_MEM, | 221 | .flags = IORESOURCE_MEM, |
222 | }, | 222 | }, |
223 | [1] = { | 223 | [1] = { |
224 | .start = __PREG(Ser4MCCR1), | ||
225 | .end = __PREG(Ser4MCCR1) + 0x4 - 1, | ||
226 | .flags = IORESOURCE_MEM, | ||
227 | }, | ||
228 | [2] = { | ||
229 | .start = IRQ_Ser4MCP, | 224 | .start = IRQ_Ser4MCP, |
230 | .end = IRQ_Ser4MCP, | 225 | .end = IRQ_Ser4MCP, |
231 | .flags = IORESOURCE_IRQ, | 226 | .flags = IORESOURCE_IRQ, |
diff --git a/arch/arm/mach-sa1100/include/mach/mcp.h b/arch/arm/mach-sa1100/include/mach/mcp.h index 586cec898b35..ed1a331508a7 100644 --- a/arch/arm/mach-sa1100/include/mach/mcp.h +++ b/arch/arm/mach-sa1100/include/mach/mcp.h | |||
@@ -17,8 +17,6 @@ struct mcp_plat_data { | |||
17 | u32 mccr1; | 17 | u32 mccr1; |
18 | unsigned int sclk_rate; | 18 | unsigned int sclk_rate; |
19 | int gpio_base; | 19 | int gpio_base; |
20 | const char *codec; | ||
21 | void *codec_pdata; | ||
22 | }; | 20 | }; |
23 | 21 | ||
24 | #endif | 22 | #endif |
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c index d117ceab6215..af4e2761f3db 100644 --- a/arch/arm/mach-sa1100/lart.c +++ b/arch/arm/mach-sa1100/lart.c | |||
@@ -24,20 +24,10 @@ | |||
24 | static struct mcp_plat_data lart_mcp_data = { | 24 | static struct mcp_plat_data lart_mcp_data = { |
25 | .mccr0 = MCCR0_ADM, | 25 | .mccr0 = MCCR0_ADM, |
26 | .sclk_rate = 11981000, | 26 | .sclk_rate = 11981000, |
27 | .codec = "ucb1x00", | ||
28 | }; | 27 | }; |
29 | 28 | ||
30 | static void __init lart_init(void) | 29 | static void __init lart_init(void) |
31 | { | 30 | { |
32 | /* | ||
33 | * Setup the PPC unit correctly. | ||
34 | */ | ||
35 | PPDR &= ~PPC_RXD4; | ||
36 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
37 | PSDR |= PPC_RXD4; | ||
38 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
39 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
40 | |||
41 | sa11x0_register_mcp(&lart_mcp_data); | 31 | sa11x0_register_mcp(&lart_mcp_data); |
42 | } | 32 | } |
43 | 33 | ||
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c index 748d34435b3f..318b2b766a0b 100644 --- a/arch/arm/mach-sa1100/shannon.c +++ b/arch/arm/mach-sa1100/shannon.c | |||
@@ -55,22 +55,11 @@ static struct resource shannon_flash_resource = { | |||
55 | static struct mcp_plat_data shannon_mcp_data = { | 55 | static struct mcp_plat_data shannon_mcp_data = { |
56 | .mccr0 = MCCR0_ADM, | 56 | .mccr0 = MCCR0_ADM, |
57 | .sclk_rate = 11981000, | 57 | .sclk_rate = 11981000, |
58 | .codec = "ucb1x00", | ||
59 | }; | 58 | }; |
60 | 59 | ||
61 | static void __init shannon_init(void) | 60 | static void __init shannon_init(void) |
62 | { | 61 | { |
63 | sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1); | 62 | sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1); |
64 | |||
65 | /* | ||
66 | * Setup the PPC unit correctly. | ||
67 | */ | ||
68 | PPDR &= ~PPC_RXD4; | ||
69 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
70 | PSDR |= PPC_RXD4; | ||
71 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
72 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
73 | |||
74 | sa11x0_register_mcp(&shannon_mcp_data); | 63 | sa11x0_register_mcp(&shannon_mcp_data); |
75 | } | 64 | } |
76 | 65 | ||
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c index 458ececefa58..e17c04d6e324 100644 --- a/arch/arm/mach-sa1100/simpad.c +++ b/arch/arm/mach-sa1100/simpad.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mtd/partitions.h> | 14 | #include <linux/mtd/partitions.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/gpio.h> | 16 | #include <linux/gpio.h> |
17 | #include <linux/mfd/ucb1x00.h> | ||
18 | 17 | ||
19 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
20 | #include <mach/hardware.h> | 19 | #include <mach/hardware.h> |
@@ -188,15 +187,10 @@ static struct resource simpad_flash_resources [] = { | |||
188 | } | 187 | } |
189 | }; | 188 | }; |
190 | 189 | ||
191 | static struct ucb1x00_plat_data simpad_ucb1x00_data = { | ||
192 | .gpio_base = SIMPAD_UCB1X00_GPIO_BASE, | ||
193 | }; | ||
194 | |||
195 | static struct mcp_plat_data simpad_mcp_data = { | 190 | static struct mcp_plat_data simpad_mcp_data = { |
196 | .mccr0 = MCCR0_ADM, | 191 | .mccr0 = MCCR0_ADM, |
197 | .sclk_rate = 11981000, | 192 | .sclk_rate = 11981000, |
198 | .codec = "ucb1300", | 193 | .gpio_base = SIMPAD_UCB1X00_GPIO_BASE, |
199 | .codec_pdata = &simpad_ucb1x00_data, | ||
200 | }; | 194 | }; |
201 | 195 | ||
202 | 196 | ||
@@ -384,16 +378,6 @@ static int __init simpad_init(void) | |||
384 | 378 | ||
385 | sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources, | 379 | sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources, |
386 | ARRAY_SIZE(simpad_flash_resources)); | 380 | ARRAY_SIZE(simpad_flash_resources)); |
387 | |||
388 | /* | ||
389 | * Setup the PPC unit correctly. | ||
390 | */ | ||
391 | PPDR &= ~PPC_RXD4; | ||
392 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
393 | PSDR |= PPC_RXD4; | ||
394 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
395 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
396 | |||
397 | sa11x0_register_mcp(&simpad_mcp_data); | 381 | sa11x0_register_mcp(&simpad_mcp_data); |
398 | 382 | ||
399 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); | 383 | ret = platform_add_devices(devices, ARRAY_SIZE(devices)); |
diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore index 028079065af6..7cab8c08e6d1 100644 --- a/arch/x86/.gitignore +++ b/arch/x86/.gitignore | |||
@@ -1,3 +1,4 @@ | |||
1 | boot/compressed/vmlinux | 1 | boot/compressed/vmlinux |
2 | tools/test_get_len | 2 | tools/test_get_len |
3 | tools/insn_sanity | ||
3 | 4 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6c14ecd851d0..864cc6e6ac8e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -125,16 +125,6 @@ config HAVE_LATENCYTOP_SUPPORT | |||
125 | config MMU | 125 | config MMU |
126 | def_bool y | 126 | def_bool y |
127 | 127 | ||
128 | config ZONE_DMA | ||
129 | bool "DMA memory allocation support" if EXPERT | ||
130 | default y | ||
131 | help | ||
132 | DMA memory allocation support allows devices with less than 32-bit | ||
133 | addressing to allocate within the first 16MB of address space. | ||
134 | Disable if no such devices will be used. | ||
135 | |||
136 | If unsure, say Y. | ||
137 | |||
138 | config SBUS | 128 | config SBUS |
139 | bool | 129 | bool |
140 | 130 | ||
@@ -255,6 +245,16 @@ source "kernel/Kconfig.freezer" | |||
255 | 245 | ||
256 | menu "Processor type and features" | 246 | menu "Processor type and features" |
257 | 247 | ||
248 | config ZONE_DMA | ||
249 | bool "DMA memory allocation support" if EXPERT | ||
250 | default y | ||
251 | help | ||
252 | DMA memory allocation support allows devices with less than 32-bit | ||
253 | addressing to allocate within the first 16MB of address space. | ||
254 | Disable if no such devices will be used. | ||
255 | |||
256 | If unsure, say Y. | ||
257 | |||
258 | source "kernel/time/Kconfig" | 258 | source "kernel/time/Kconfig" |
259 | 259 | ||
260 | config SMP | 260 | config SMP |
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index b4a3db7ce140..21f77b89e47a 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h | |||
@@ -7,6 +7,7 @@ | |||
7 | # include <asm/unistd_32.h> | 7 | # include <asm/unistd_32.h> |
8 | # define __ARCH_WANT_IPC_PARSE_VERSION | 8 | # define __ARCH_WANT_IPC_PARSE_VERSION |
9 | # define __ARCH_WANT_STAT64 | 9 | # define __ARCH_WANT_STAT64 |
10 | # define __ARCH_WANT_SYS_IPC | ||
10 | # define __ARCH_WANT_SYS_OLD_MMAP | 11 | # define __ARCH_WANT_SYS_OLD_MMAP |
11 | # define __ARCH_WANT_SYS_OLD_SELECT | 12 | # define __ARCH_WANT_SYS_OLD_SELECT |
12 | 13 | ||
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 8e862aaf0d90..becf47b81735 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -65,7 +65,7 @@ | |||
65 | * UV2: Bit 19 selects between | 65 | * UV2: Bit 19 selects between |
66 | * (0): 10 microsecond timebase and | 66 | * (0): 10 microsecond timebase and |
67 | * (1): 80 microseconds | 67 | * (1): 80 microseconds |
68 | * we're using 655us, similar to UV1: 65 units of 10us | 68 | * we're using 560us, similar to UV1: 65 units of 10us |
69 | */ | 69 | */ |
70 | #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) | 70 | #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL) |
71 | #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) | 71 | #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL) |
@@ -167,6 +167,7 @@ | |||
167 | #define FLUSH_RETRY_TIMEOUT 2 | 167 | #define FLUSH_RETRY_TIMEOUT 2 |
168 | #define FLUSH_GIVEUP 3 | 168 | #define FLUSH_GIVEUP 3 |
169 | #define FLUSH_COMPLETE 4 | 169 | #define FLUSH_COMPLETE 4 |
170 | #define FLUSH_RETRY_BUSYBUG 5 | ||
170 | 171 | ||
171 | /* | 172 | /* |
172 | * tuning the action when the numalink network is extremely delayed | 173 | * tuning the action when the numalink network is extremely delayed |
@@ -235,10 +236,10 @@ struct bau_msg_payload { | |||
235 | 236 | ||
236 | 237 | ||
237 | /* | 238 | /* |
238 | * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) | 239 | * UV1 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) |
239 | * see table 4.2.3.0.1 in broacast_assist spec. | 240 | * see table 4.2.3.0.1 in broacast_assist spec. |
240 | */ | 241 | */ |
241 | struct bau_msg_header { | 242 | struct uv1_bau_msg_header { |
242 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 243 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
243 | /* bits 5:0 */ | 244 | /* bits 5:0 */ |
244 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ | 245 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ |
@@ -318,19 +319,87 @@ struct bau_msg_header { | |||
318 | }; | 319 | }; |
319 | 320 | ||
320 | /* | 321 | /* |
322 | * UV2 Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor) | ||
323 | * see figure 9-2 of harp_sys.pdf | ||
324 | */ | ||
325 | struct uv2_bau_msg_header { | ||
326 | unsigned int base_dest_nasid:15; /* nasid of the first bit */ | ||
327 | /* bits 14:0 */ /* in uvhub map */ | ||
328 | unsigned int dest_subnodeid:5; /* must be 0x10, for the LB */ | ||
329 | /* bits 19:15 */ | ||
330 | unsigned int rsvd_1:1; /* must be zero */ | ||
331 | /* bit 20 */ | ||
332 | /* Address bits 59:21 */ | ||
333 | /* bits 25:2 of address (44:21) are payload */ | ||
334 | /* these next 24 bits become bytes 12-14 of msg */ | ||
335 | /* bits 28:21 land in byte 12 */ | ||
336 | unsigned int replied_to:1; /* sent as 0 by the source to | ||
337 | byte 12 */ | ||
338 | /* bit 21 */ | ||
339 | unsigned int msg_type:3; /* software type of the | ||
340 | message */ | ||
341 | /* bits 24:22 */ | ||
342 | unsigned int canceled:1; /* message canceled, resource | ||
343 | is to be freed*/ | ||
344 | /* bit 25 */ | ||
345 | unsigned int payload_1:3; /* not currently used */ | ||
346 | /* bits 28:26 */ | ||
347 | |||
348 | /* bits 36:29 land in byte 13 */ | ||
349 | unsigned int payload_2a:3; /* not currently used */ | ||
350 | unsigned int payload_2b:5; /* not currently used */ | ||
351 | /* bits 36:29 */ | ||
352 | |||
353 | /* bits 44:37 land in byte 14 */ | ||
354 | unsigned int payload_3:8; /* not currently used */ | ||
355 | /* bits 44:37 */ | ||
356 | |||
357 | unsigned int rsvd_2:7; /* reserved */ | ||
358 | /* bits 51:45 */ | ||
359 | unsigned int swack_flag:1; /* software acknowledge flag */ | ||
360 | /* bit 52 */ | ||
361 | unsigned int rsvd_3a:3; /* must be zero */ | ||
362 | unsigned int rsvd_3b:8; /* must be zero */ | ||
363 | unsigned int rsvd_3c:8; /* must be zero */ | ||
364 | unsigned int rsvd_3d:3; /* must be zero */ | ||
365 | /* bits 74:53 */ | ||
366 | unsigned int fairness:3; /* usually zero */ | ||
367 | /* bits 77:75 */ | ||
368 | |||
369 | unsigned int sequence:16; /* message sequence number */ | ||
370 | /* bits 93:78 Suppl_A */ | ||
371 | unsigned int chaining:1; /* next descriptor is part of | ||
372 | this activation*/ | ||
373 | /* bit 94 */ | ||
374 | unsigned int multilevel:1; /* multi-level multicast | ||
375 | format */ | ||
376 | /* bit 95 */ | ||
377 | unsigned int rsvd_4:24; /* ordered / source node / | ||
378 | source subnode / aging | ||
379 | must be zero */ | ||
380 | /* bits 119:96 */ | ||
381 | unsigned int command:8; /* message type */ | ||
382 | /* bits 127:120 */ | ||
383 | }; | ||
384 | |||
385 | /* | ||
321 | * The activation descriptor: | 386 | * The activation descriptor: |
322 | * The format of the message to send, plus all accompanying control | 387 | * The format of the message to send, plus all accompanying control |
323 | * Should be 64 bytes | 388 | * Should be 64 bytes |
324 | */ | 389 | */ |
325 | struct bau_desc { | 390 | struct bau_desc { |
326 | struct pnmask distribution; | 391 | struct pnmask distribution; |
327 | /* | 392 | /* |
328 | * message template, consisting of header and payload: | 393 | * message template, consisting of header and payload: |
329 | */ | 394 | */ |
330 | struct bau_msg_header header; | 395 | union bau_msg_header { |
331 | struct bau_msg_payload payload; | 396 | struct uv1_bau_msg_header uv1_hdr; |
397 | struct uv2_bau_msg_header uv2_hdr; | ||
398 | } header; | ||
399 | |||
400 | struct bau_msg_payload payload; | ||
332 | }; | 401 | }; |
333 | /* | 402 | /* UV1: |
334 | * -payload-- ---------header------ | 403 | * -payload-- ---------header------ |
335 | * bytes 0-11 bits 41-56 bits 58-81 | 404 | * bytes 0-11 bits 41-56 bits 58-81 |
336 | * A B (2) C (3) | 405 | * A B (2) C (3) |
@@ -340,6 +409,16 @@ struct bau_desc { | |||
340 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) | 409 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) |
341 | * ------------payload queue----------- | 410 | * ------------payload queue----------- |
342 | */ | 411 | */ |
412 | /* UV2: | ||
413 | * -payload-- ---------header------ | ||
414 | * bytes 0-11 bits 70-78 bits 21-44 | ||
415 | * A B (2) C (3) | ||
416 | * | ||
417 | * A/B/C are moved to: | ||
418 | * A C B | ||
419 | * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector) | ||
420 | * ------------payload queue----------- | ||
421 | */ | ||
343 | 422 | ||
344 | /* | 423 | /* |
345 | * The payload queue on the destination side is an array of these. | 424 | * The payload queue on the destination side is an array of these. |
@@ -385,7 +464,6 @@ struct bau_pq_entry { | |||
385 | struct msg_desc { | 464 | struct msg_desc { |
386 | struct bau_pq_entry *msg; | 465 | struct bau_pq_entry *msg; |
387 | int msg_slot; | 466 | int msg_slot; |
388 | int swack_slot; | ||
389 | struct bau_pq_entry *queue_first; | 467 | struct bau_pq_entry *queue_first; |
390 | struct bau_pq_entry *queue_last; | 468 | struct bau_pq_entry *queue_last; |
391 | }; | 469 | }; |
@@ -405,6 +483,7 @@ struct ptc_stats { | |||
405 | requests */ | 483 | requests */ |
406 | unsigned long s_stimeout; /* source side timeouts */ | 484 | unsigned long s_stimeout; /* source side timeouts */ |
407 | unsigned long s_dtimeout; /* destination side timeouts */ | 485 | unsigned long s_dtimeout; /* destination side timeouts */ |
486 | unsigned long s_strongnacks; /* number of strong nack's */ | ||
408 | unsigned long s_time; /* time spent in sending side */ | 487 | unsigned long s_time; /* time spent in sending side */ |
409 | unsigned long s_retriesok; /* successful retries */ | 488 | unsigned long s_retriesok; /* successful retries */ |
410 | unsigned long s_ntargcpu; /* total number of cpu's | 489 | unsigned long s_ntargcpu; /* total number of cpu's |
@@ -439,6 +518,9 @@ struct ptc_stats { | |||
439 | unsigned long s_retry_messages; /* retry broadcasts */ | 518 | unsigned long s_retry_messages; /* retry broadcasts */ |
440 | unsigned long s_bau_reenabled; /* for bau enable/disable */ | 519 | unsigned long s_bau_reenabled; /* for bau enable/disable */ |
441 | unsigned long s_bau_disabled; /* for bau enable/disable */ | 520 | unsigned long s_bau_disabled; /* for bau enable/disable */ |
521 | unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */ | ||
522 | unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */ | ||
523 | unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */ | ||
442 | /* destination statistics */ | 524 | /* destination statistics */ |
443 | unsigned long d_alltlb; /* times all tlb's on this | 525 | unsigned long d_alltlb; /* times all tlb's on this |
444 | cpu were flushed */ | 526 | cpu were flushed */ |
@@ -511,9 +593,12 @@ struct bau_control { | |||
511 | short osnode; | 593 | short osnode; |
512 | short uvhub_cpu; | 594 | short uvhub_cpu; |
513 | short uvhub; | 595 | short uvhub; |
596 | short uvhub_version; | ||
514 | short cpus_in_socket; | 597 | short cpus_in_socket; |
515 | short cpus_in_uvhub; | 598 | short cpus_in_uvhub; |
516 | short partition_base_pnode; | 599 | short partition_base_pnode; |
600 | short using_desc; /* an index, like uvhub_cpu */ | ||
601 | unsigned int inuse_map; | ||
517 | unsigned short message_number; | 602 | unsigned short message_number; |
518 | unsigned short uvhub_quiesce; | 603 | unsigned short uvhub_quiesce; |
519 | short socket_acknowledge_count[DEST_Q_SIZE]; | 604 | short socket_acknowledge_count[DEST_Q_SIZE]; |
@@ -531,6 +616,7 @@ struct bau_control { | |||
531 | int cong_response_us; | 616 | int cong_response_us; |
532 | int cong_reps; | 617 | int cong_reps; |
533 | int cong_period; | 618 | int cong_period; |
619 | unsigned long clocks_per_100_usec; | ||
534 | cycles_t period_time; | 620 | cycles_t period_time; |
535 | long period_requests; | 621 | long period_requests; |
536 | struct hub_and_pnode *thp; | 622 | struct hub_and_pnode *thp; |
@@ -591,6 +677,11 @@ static inline void write_mmr_sw_ack(unsigned long mr) | |||
591 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); | 677 | uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); |
592 | } | 678 | } |
593 | 679 | ||
680 | static inline void write_gmmr_sw_ack(int pnode, unsigned long mr) | ||
681 | { | ||
682 | write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr); | ||
683 | } | ||
684 | |||
594 | static inline unsigned long read_mmr_sw_ack(void) | 685 | static inline unsigned long read_mmr_sw_ack(void) |
595 | { | 686 | { |
596 | return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); | 687 | return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index c0dd5b603749..a62c201c97ec 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -290,14 +290,15 @@ static inline int pit_verify_msb(unsigned char val) | |||
290 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) | 290 | static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap) |
291 | { | 291 | { |
292 | int count; | 292 | int count; |
293 | u64 tsc = 0; | 293 | u64 tsc = 0, prev_tsc = 0; |
294 | 294 | ||
295 | for (count = 0; count < 50000; count++) { | 295 | for (count = 0; count < 50000; count++) { |
296 | if (!pit_verify_msb(val)) | 296 | if (!pit_verify_msb(val)) |
297 | break; | 297 | break; |
298 | prev_tsc = tsc; | ||
298 | tsc = get_cycles(); | 299 | tsc = get_cycles(); |
299 | } | 300 | } |
300 | *deltap = get_cycles() - tsc; | 301 | *deltap = get_cycles() - prev_tsc; |
301 | *tscp = tsc; | 302 | *tscp = tsc; |
302 | 303 | ||
303 | /* | 304 | /* |
@@ -311,9 +312,9 @@ static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *de | |||
311 | * How many MSB values do we want to see? We aim for | 312 | * How many MSB values do we want to see? We aim for |
312 | * a maximum error rate of 500ppm (in practice the | 313 | * a maximum error rate of 500ppm (in practice the |
313 | * real error is much smaller), but refuse to spend | 314 | * real error is much smaller), but refuse to spend |
314 | * more than 25ms on it. | 315 | * more than 50ms on it. |
315 | */ | 316 | */ |
316 | #define MAX_QUICK_PIT_MS 25 | 317 | #define MAX_QUICK_PIT_MS 50 |
317 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) | 318 | #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256) |
318 | 319 | ||
319 | static unsigned long quick_pit_calibrate(void) | 320 | static unsigned long quick_pit_calibrate(void) |
@@ -383,15 +384,12 @@ success: | |||
383 | * | 384 | * |
384 | * As a result, we can depend on there not being | 385 | * As a result, we can depend on there not being |
385 | * any odd delays anywhere, and the TSC reads are | 386 | * any odd delays anywhere, and the TSC reads are |
386 | * reliable (within the error). We also adjust the | 387 | * reliable (within the error). |
387 | * delta to the middle of the error bars, just | ||
388 | * because it looks nicer. | ||
389 | * | 388 | * |
390 | * kHz = ticks / time-in-seconds / 1000; | 389 | * kHz = ticks / time-in-seconds / 1000; |
391 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 | 390 | * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000 |
392 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) | 391 | * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000) |
393 | */ | 392 | */ |
394 | delta += (long)(d2 - d1)/2; | ||
395 | delta *= PIT_TICK_RATE; | 393 | delta *= PIT_TICK_RATE; |
396 | do_div(delta, i*256*1000); | 394 | do_div(delta, i*256*1000); |
397 | printk("Fast TSC calibration using PIT\n"); | 395 | printk("Fast TSC calibration using PIT\n"); |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 5b83c51c12e0..819137904428 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -219,7 +219,9 @@ ab: STOS/W/D/Q Yv,rAX | |||
219 | ac: LODS/B AL,Xb | 219 | ac: LODS/B AL,Xb |
220 | ad: LODS/W/D/Q rAX,Xv | 220 | ad: LODS/W/D/Q rAX,Xv |
221 | ae: SCAS/B AL,Yb | 221 | ae: SCAS/B AL,Yb |
222 | af: SCAS/W/D/Q rAX,Xv | 222 | # Note: The May 2011 Intel manual shows Xv for the second parameter of the |
223 | # next instruction but Yv is correct | ||
224 | af: SCAS/W/D/Q rAX,Yv | ||
223 | # 0xb0 - 0xbf | 225 | # 0xb0 - 0xbf |
224 | b0: MOV AL/R8L,Ib | 226 | b0: MOV AL/R8L,Ib |
225 | b1: MOV CL/R9L,Ib | 227 | b1: MOV CL/R9L,Ib |
@@ -729,8 +731,8 @@ de: VAESDEC Vdq,Hdq,Wdq (66),(v1) | |||
729 | df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) | 731 | df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) |
730 | f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | 732 | f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) |
731 | f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | 733 | f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) |
732 | f3: ANDN Gy,By,Ey (v) | 734 | f2: ANDN Gy,By,Ey (v) |
733 | f4: Grp17 (1A) | 735 | f3: Grp17 (1A) |
734 | f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) | 736 | f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) |
735 | f6: MULX By,Gy,rDX,Ey (F2),(v) | 737 | f6: MULX By,Gy,rDX,Ey (F2),(v) |
736 | f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) | 738 | f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 5b552198f774..9be4cff00a2d 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub) | |||
157 | * clear of the Timeout bit (as well) will free the resource. No reply will | 157 | * clear of the Timeout bit (as well) will free the resource. No reply will |
158 | * be sent (the hardware will only do one reply per message). | 158 | * be sent (the hardware will only do one reply per message). |
159 | */ | 159 | */ |
160 | static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp) | 160 | static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, |
161 | int do_acknowledge) | ||
161 | { | 162 | { |
162 | unsigned long dw; | 163 | unsigned long dw; |
163 | struct bau_pq_entry *msg; | 164 | struct bau_pq_entry *msg; |
164 | 165 | ||
165 | msg = mdp->msg; | 166 | msg = mdp->msg; |
166 | if (!msg->canceled) { | 167 | if (!msg->canceled && do_acknowledge) { |
167 | dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; | 168 | dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec; |
168 | write_mmr_sw_ack(dw); | 169 | write_mmr_sw_ack(dw); |
169 | } | 170 | } |
@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp, | |||
212 | if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { | 213 | if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { |
213 | unsigned long mr; | 214 | unsigned long mr; |
214 | /* | 215 | /* |
215 | * is the resource timed out? | 216 | * Is the resource timed out? |
216 | * make everyone ignore the cancelled message. | 217 | * Make everyone ignore the cancelled message. |
217 | */ | 218 | */ |
218 | msg2->canceled = 1; | 219 | msg2->canceled = 1; |
219 | stat->d_canceled++; | 220 | stat->d_canceled++; |
@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp, | |||
231 | * Do all the things a cpu should do for a TLB shootdown message. | 232 | * Do all the things a cpu should do for a TLB shootdown message. |
232 | * Other cpu's may come here at the same time for this message. | 233 | * Other cpu's may come here at the same time for this message. |
233 | */ | 234 | */ |
234 | static void bau_process_message(struct msg_desc *mdp, | 235 | static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, |
235 | struct bau_control *bcp) | 236 | int do_acknowledge) |
236 | { | 237 | { |
237 | short socket_ack_count = 0; | 238 | short socket_ack_count = 0; |
238 | short *sp; | 239 | short *sp; |
@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp, | |||
284 | if (msg_ack_count == bcp->cpus_in_uvhub) { | 285 | if (msg_ack_count == bcp->cpus_in_uvhub) { |
285 | /* | 286 | /* |
286 | * All cpus in uvhub saw it; reply | 287 | * All cpus in uvhub saw it; reply |
288 | * (unless we are in the UV2 workaround) | ||
287 | */ | 289 | */ |
288 | reply_to_message(mdp, bcp); | 290 | reply_to_message(mdp, bcp, do_acknowledge); |
289 | } | 291 | } |
290 | } | 292 | } |
291 | 293 | ||
@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc, | |||
491 | /* | 493 | /* |
492 | * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register. | 494 | * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register. |
493 | */ | 495 | */ |
494 | static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu) | 496 | static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc) |
495 | { | 497 | { |
496 | unsigned long descriptor_status; | 498 | unsigned long descriptor_status; |
497 | unsigned long descriptor_status2; | 499 | unsigned long descriptor_status2; |
498 | 500 | ||
499 | descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK); | 501 | descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK); |
500 | descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL; | 502 | descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL; |
501 | descriptor_status = (descriptor_status << 1) | descriptor_status2; | 503 | descriptor_status = (descriptor_status << 1) | descriptor_status2; |
502 | return descriptor_status; | 504 | return descriptor_status; |
503 | } | 505 | } |
504 | 506 | ||
507 | /* | ||
508 | * Return whether the status of the descriptor that is normally used for this | ||
509 | * cpu (the one indexed by its hub-relative cpu number) is busy. | ||
510 | * The status of the original 32 descriptors is always reflected in the 64 | ||
511 | * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0. | ||
512 | * The bit provided by the activation_status_2 register is irrelevant to | ||
513 | * the status if it is only being tested for busy or not busy. | ||
514 | */ | ||
515 | int normal_busy(struct bau_control *bcp) | ||
516 | { | ||
517 | int cpu = bcp->uvhub_cpu; | ||
518 | int mmr_offset; | ||
519 | int right_shift; | ||
520 | |||
521 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | ||
522 | right_shift = cpu * UV_ACT_STATUS_SIZE; | ||
523 | return (((((read_lmmr(mmr_offset) >> right_shift) & | ||
524 | UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY); | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Entered when a bau descriptor has gone into a permanent busy wait because | ||
529 | * of a hardware bug. | ||
530 | * Workaround the bug. | ||
531 | */ | ||
532 | int handle_uv2_busy(struct bau_control *bcp) | ||
533 | { | ||
534 | int busy_one = bcp->using_desc; | ||
535 | int normal = bcp->uvhub_cpu; | ||
536 | int selected = -1; | ||
537 | int i; | ||
538 | unsigned long descriptor_status; | ||
539 | unsigned long status; | ||
540 | int mmr_offset; | ||
541 | struct bau_desc *bau_desc_old; | ||
542 | struct bau_desc *bau_desc_new; | ||
543 | struct bau_control *hmaster = bcp->uvhub_master; | ||
544 | struct ptc_stats *stat = bcp->statp; | ||
545 | cycles_t ttm; | ||
546 | |||
547 | stat->s_uv2_wars++; | ||
548 | spin_lock(&hmaster->uvhub_lock); | ||
549 | /* try for the original first */ | ||
550 | if (busy_one != normal) { | ||
551 | if (!normal_busy(bcp)) | ||
552 | selected = normal; | ||
553 | } | ||
554 | if (selected < 0) { | ||
555 | /* can't use the normal, select an alternate */ | ||
556 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | ||
557 | descriptor_status = read_lmmr(mmr_offset); | ||
558 | |||
559 | /* scan available descriptors 32-63 */ | ||
560 | for (i = 0; i < UV_CPUS_PER_AS; i++) { | ||
561 | if ((hmaster->inuse_map & (1 << i)) == 0) { | ||
562 | status = ((descriptor_status >> | ||
563 | (i * UV_ACT_STATUS_SIZE)) & | ||
564 | UV_ACT_STATUS_MASK) << 1; | ||
565 | if (status != UV2H_DESC_BUSY) { | ||
566 | selected = i + UV_CPUS_PER_AS; | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | } | ||
571 | } | ||
572 | |||
573 | if (busy_one != normal) | ||
574 | /* mark the busy alternate as not in-use */ | ||
575 | hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS)); | ||
576 | |||
577 | if (selected >= 0) { | ||
578 | /* switch to the selected descriptor */ | ||
579 | if (selected != normal) { | ||
580 | /* set the selected alternate as in-use */ | ||
581 | hmaster->inuse_map |= | ||
582 | (1 << (selected - UV_CPUS_PER_AS)); | ||
583 | if (selected > stat->s_uv2_wars_hw) | ||
584 | stat->s_uv2_wars_hw = selected; | ||
585 | } | ||
586 | bau_desc_old = bcp->descriptor_base; | ||
587 | bau_desc_old += (ITEMS_PER_DESC * busy_one); | ||
588 | bcp->using_desc = selected; | ||
589 | bau_desc_new = bcp->descriptor_base; | ||
590 | bau_desc_new += (ITEMS_PER_DESC * selected); | ||
591 | *bau_desc_new = *bau_desc_old; | ||
592 | } else { | ||
593 | /* | ||
594 | * All are busy. Wait for the normal one for this cpu to | ||
595 | * free up. | ||
596 | */ | ||
597 | stat->s_uv2_war_waits++; | ||
598 | spin_unlock(&hmaster->uvhub_lock); | ||
599 | ttm = get_cycles(); | ||
600 | do { | ||
601 | cpu_relax(); | ||
602 | } while (normal_busy(bcp)); | ||
603 | spin_lock(&hmaster->uvhub_lock); | ||
604 | /* switch to the original descriptor */ | ||
605 | bcp->using_desc = normal; | ||
606 | bau_desc_old = bcp->descriptor_base; | ||
607 | bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc); | ||
608 | bcp->using_desc = (ITEMS_PER_DESC * normal); | ||
609 | bau_desc_new = bcp->descriptor_base; | ||
610 | bau_desc_new += (ITEMS_PER_DESC * normal); | ||
611 | *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */ | ||
612 | } | ||
613 | spin_unlock(&hmaster->uvhub_lock); | ||
614 | return FLUSH_RETRY_BUSYBUG; | ||
615 | } | ||
616 | |||
505 | static int uv2_wait_completion(struct bau_desc *bau_desc, | 617 | static int uv2_wait_completion(struct bau_desc *bau_desc, |
506 | unsigned long mmr_offset, int right_shift, | 618 | unsigned long mmr_offset, int right_shift, |
507 | struct bau_control *bcp, long try) | 619 | struct bau_control *bcp, long try) |
508 | { | 620 | { |
509 | unsigned long descriptor_stat; | 621 | unsigned long descriptor_stat; |
510 | cycles_t ttm; | 622 | cycles_t ttm; |
511 | int cpu = bcp->uvhub_cpu; | 623 | int desc = bcp->using_desc; |
624 | long busy_reps = 0; | ||
512 | struct ptc_stats *stat = bcp->statp; | 625 | struct ptc_stats *stat = bcp->statp; |
513 | 626 | ||
514 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); | 627 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc); |
515 | 628 | ||
516 | /* spin on the status MMR, waiting for it to go idle */ | 629 | /* spin on the status MMR, waiting for it to go idle */ |
517 | while (descriptor_stat != UV2H_DESC_IDLE) { | 630 | while (descriptor_stat != UV2H_DESC_IDLE) { |
@@ -522,32 +635,35 @@ static int uv2_wait_completion(struct bau_desc *bau_desc, | |||
522 | * our message and its state will stay IDLE. | 635 | * our message and its state will stay IDLE. |
523 | */ | 636 | */ |
524 | if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) || | 637 | if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) || |
525 | (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) || | ||
526 | (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) { | 638 | (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) { |
527 | stat->s_stimeout++; | 639 | stat->s_stimeout++; |
528 | return FLUSH_GIVEUP; | 640 | return FLUSH_GIVEUP; |
641 | } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) { | ||
642 | stat->s_strongnacks++; | ||
643 | bcp->conseccompletes = 0; | ||
644 | return FLUSH_GIVEUP; | ||
529 | } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { | 645 | } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) { |
530 | stat->s_dtimeout++; | 646 | stat->s_dtimeout++; |
531 | ttm = get_cycles(); | ||
532 | /* | ||
533 | * Our retries may be blocked by all destination | ||
534 | * swack resources being consumed, and a timeout | ||
535 | * pending. In that case hardware returns the | ||
536 | * ERROR that looks like a destination timeout. | ||
537 | */ | ||
538 | if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { | ||
539 | bcp->conseccompletes = 0; | ||
540 | return FLUSH_RETRY_PLUGGED; | ||
541 | } | ||
542 | bcp->conseccompletes = 0; | 647 | bcp->conseccompletes = 0; |
543 | return FLUSH_RETRY_TIMEOUT; | 648 | return FLUSH_RETRY_TIMEOUT; |
544 | } else { | 649 | } else { |
650 | busy_reps++; | ||
651 | if (busy_reps > 1000000) { | ||
652 | /* not to hammer on the clock */ | ||
653 | busy_reps = 0; | ||
654 | ttm = get_cycles(); | ||
655 | if ((ttm - bcp->send_message) > | ||
656 | (bcp->clocks_per_100_usec)) { | ||
657 | return handle_uv2_busy(bcp); | ||
658 | } | ||
659 | } | ||
545 | /* | 660 | /* |
546 | * descriptor_stat is still BUSY | 661 | * descriptor_stat is still BUSY |
547 | */ | 662 | */ |
548 | cpu_relax(); | 663 | cpu_relax(); |
549 | } | 664 | } |
550 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu); | 665 | descriptor_stat = uv2_read_status(mmr_offset, right_shift, |
666 | desc); | ||
551 | } | 667 | } |
552 | bcp->conseccompletes++; | 668 | bcp->conseccompletes++; |
553 | return FLUSH_COMPLETE; | 669 | return FLUSH_COMPLETE; |
@@ -563,17 +679,17 @@ static int wait_completion(struct bau_desc *bau_desc, | |||
563 | { | 679 | { |
564 | int right_shift; | 680 | int right_shift; |
565 | unsigned long mmr_offset; | 681 | unsigned long mmr_offset; |
566 | int cpu = bcp->uvhub_cpu; | 682 | int desc = bcp->using_desc; |
567 | 683 | ||
568 | if (cpu < UV_CPUS_PER_AS) { | 684 | if (desc < UV_CPUS_PER_AS) { |
569 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; | 685 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; |
570 | right_shift = cpu * UV_ACT_STATUS_SIZE; | 686 | right_shift = desc * UV_ACT_STATUS_SIZE; |
571 | } else { | 687 | } else { |
572 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; | 688 | mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; |
573 | right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); | 689 | right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE); |
574 | } | 690 | } |
575 | 691 | ||
576 | if (is_uv1_hub()) | 692 | if (bcp->uvhub_version == 1) |
577 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, | 693 | return uv1_wait_completion(bau_desc, mmr_offset, right_shift, |
578 | bcp, try); | 694 | bcp, try); |
579 | else | 695 | else |
@@ -752,19 +868,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc, | |||
752 | * Returns 1 if it gives up entirely and the original cpu mask is to be | 868 | * Returns 1 if it gives up entirely and the original cpu mask is to be |
753 | * returned to the kernel. | 869 | * returned to the kernel. |
754 | */ | 870 | */ |
755 | int uv_flush_send_and_wait(struct bau_desc *bau_desc, | 871 | int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp) |
756 | struct cpumask *flush_mask, struct bau_control *bcp) | ||
757 | { | 872 | { |
758 | int seq_number = 0; | 873 | int seq_number = 0; |
759 | int completion_stat = 0; | 874 | int completion_stat = 0; |
875 | int uv1 = 0; | ||
760 | long try = 0; | 876 | long try = 0; |
761 | unsigned long index; | 877 | unsigned long index; |
762 | cycles_t time1; | 878 | cycles_t time1; |
763 | cycles_t time2; | 879 | cycles_t time2; |
764 | struct ptc_stats *stat = bcp->statp; | 880 | struct ptc_stats *stat = bcp->statp; |
765 | struct bau_control *hmaster = bcp->uvhub_master; | 881 | struct bau_control *hmaster = bcp->uvhub_master; |
882 | struct uv1_bau_msg_header *uv1_hdr = NULL; | ||
883 | struct uv2_bau_msg_header *uv2_hdr = NULL; | ||
884 | struct bau_desc *bau_desc; | ||
766 | 885 | ||
767 | if (is_uv1_hub()) | 886 | if (bcp->uvhub_version == 1) |
768 | uv1_throttle(hmaster, stat); | 887 | uv1_throttle(hmaster, stat); |
769 | 888 | ||
770 | while (hmaster->uvhub_quiesce) | 889 | while (hmaster->uvhub_quiesce) |
@@ -772,22 +891,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
772 | 891 | ||
773 | time1 = get_cycles(); | 892 | time1 = get_cycles(); |
774 | do { | 893 | do { |
775 | if (try == 0) { | 894 | bau_desc = bcp->descriptor_base; |
776 | bau_desc->header.msg_type = MSG_REGULAR; | 895 | bau_desc += (ITEMS_PER_DESC * bcp->using_desc); |
896 | if (bcp->uvhub_version == 1) { | ||
897 | uv1 = 1; | ||
898 | uv1_hdr = &bau_desc->header.uv1_hdr; | ||
899 | } else | ||
900 | uv2_hdr = &bau_desc->header.uv2_hdr; | ||
901 | if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) { | ||
902 | if (uv1) | ||
903 | uv1_hdr->msg_type = MSG_REGULAR; | ||
904 | else | ||
905 | uv2_hdr->msg_type = MSG_REGULAR; | ||
777 | seq_number = bcp->message_number++; | 906 | seq_number = bcp->message_number++; |
778 | } else { | 907 | } else { |
779 | bau_desc->header.msg_type = MSG_RETRY; | 908 | if (uv1) |
909 | uv1_hdr->msg_type = MSG_RETRY; | ||
910 | else | ||
911 | uv2_hdr->msg_type = MSG_RETRY; | ||
780 | stat->s_retry_messages++; | 912 | stat->s_retry_messages++; |
781 | } | 913 | } |
782 | 914 | ||
783 | bau_desc->header.sequence = seq_number; | 915 | if (uv1) |
784 | index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; | 916 | uv1_hdr->sequence = seq_number; |
917 | else | ||
918 | uv2_hdr->sequence = seq_number; | ||
919 | index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc; | ||
785 | bcp->send_message = get_cycles(); | 920 | bcp->send_message = get_cycles(); |
786 | 921 | ||
787 | write_mmr_activation(index); | 922 | write_mmr_activation(index); |
788 | 923 | ||
789 | try++; | 924 | try++; |
790 | completion_stat = wait_completion(bau_desc, bcp, try); | 925 | completion_stat = wait_completion(bau_desc, bcp, try); |
926 | /* UV2: wait_completion() may change the bcp->using_desc */ | ||
791 | 927 | ||
792 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); | 928 | handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); |
793 | 929 | ||
@@ -798,6 +934,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
798 | } | 934 | } |
799 | cpu_relax(); | 935 | cpu_relax(); |
800 | } while ((completion_stat == FLUSH_RETRY_PLUGGED) || | 936 | } while ((completion_stat == FLUSH_RETRY_PLUGGED) || |
937 | (completion_stat == FLUSH_RETRY_BUSYBUG) || | ||
801 | (completion_stat == FLUSH_RETRY_TIMEOUT)); | 938 | (completion_stat == FLUSH_RETRY_TIMEOUT)); |
802 | 939 | ||
803 | time2 = get_cycles(); | 940 | time2 = get_cycles(); |
@@ -812,6 +949,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc, | |||
812 | record_send_stats(time1, time2, bcp, stat, completion_stat, try); | 949 | record_send_stats(time1, time2, bcp, stat, completion_stat, try); |
813 | 950 | ||
814 | if (completion_stat == FLUSH_GIVEUP) | 951 | if (completion_stat == FLUSH_GIVEUP) |
952 | /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */ | ||
815 | return 1; | 953 | return 1; |
816 | return 0; | 954 | return 0; |
817 | } | 955 | } |
@@ -967,7 +1105,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
967 | stat->s_ntargself++; | 1105 | stat->s_ntargself++; |
968 | 1106 | ||
969 | bau_desc = bcp->descriptor_base; | 1107 | bau_desc = bcp->descriptor_base; |
970 | bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu; | 1108 | bau_desc += (ITEMS_PER_DESC * bcp->using_desc); |
971 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); | 1109 | bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); |
972 | if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) | 1110 | if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) |
973 | return NULL; | 1111 | return NULL; |
@@ -980,13 +1118,86 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, | |||
980 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, | 1118 | * uv_flush_send_and_wait returns 0 if all cpu's were messaged, |
981 | * or 1 if it gave up and the original cpumask should be returned. | 1119 | * or 1 if it gave up and the original cpumask should be returned. |
982 | */ | 1120 | */ |
983 | if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp)) | 1121 | if (!uv_flush_send_and_wait(flush_mask, bcp)) |
984 | return NULL; | 1122 | return NULL; |
985 | else | 1123 | else |
986 | return cpumask; | 1124 | return cpumask; |
987 | } | 1125 | } |
988 | 1126 | ||
989 | /* | 1127 | /* |
1128 | * Search the message queue for any 'other' message with the same software | ||
1129 | * acknowledge resource bit vector. | ||
1130 | */ | ||
1131 | struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg, | ||
1132 | struct bau_control *bcp, unsigned char swack_vec) | ||
1133 | { | ||
1134 | struct bau_pq_entry *msg_next = msg + 1; | ||
1135 | |||
1136 | if (msg_next > bcp->queue_last) | ||
1137 | msg_next = bcp->queue_first; | ||
1138 | while ((msg_next->swack_vec != 0) && (msg_next != msg)) { | ||
1139 | if (msg_next->swack_vec == swack_vec) | ||
1140 | return msg_next; | ||
1141 | msg_next++; | ||
1142 | if (msg_next > bcp->queue_last) | ||
1143 | msg_next = bcp->queue_first; | ||
1144 | } | ||
1145 | return NULL; | ||
1146 | } | ||
1147 | |||
1148 | /* | ||
1149 | * UV2 needs to work around a bug in which an arriving message has not | ||
1150 | * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register. | ||
1151 | * Such a message must be ignored. | ||
1152 | */ | ||
1153 | void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) | ||
1154 | { | ||
1155 | unsigned long mmr_image; | ||
1156 | unsigned char swack_vec; | ||
1157 | struct bau_pq_entry *msg = mdp->msg; | ||
1158 | struct bau_pq_entry *other_msg; | ||
1159 | |||
1160 | mmr_image = read_mmr_sw_ack(); | ||
1161 | swack_vec = msg->swack_vec; | ||
1162 | |||
1163 | if ((swack_vec & mmr_image) == 0) { | ||
1164 | /* | ||
1165 | * This message was assigned a swack resource, but no | ||
1166 | * reserved acknowlegment is pending. | ||
1167 | * The bug has prevented this message from setting the MMR. | ||
1168 | * And no other message has used the same sw_ack resource. | ||
1169 | * Do the requested shootdown but do not reply to the msg. | ||
1170 | * (the 0 means make no acknowledge) | ||
1171 | */ | ||
1172 | bau_process_message(mdp, bcp, 0); | ||
1173 | return; | ||
1174 | } | ||
1175 | |||
1176 | /* | ||
1177 | * Some message has set the MMR 'pending' bit; it might have been | ||
1178 | * another message. Look for that message. | ||
1179 | */ | ||
1180 | other_msg = find_another_by_swack(msg, bcp, msg->swack_vec); | ||
1181 | if (other_msg) { | ||
1182 | /* There is another. Do not ack the current one. */ | ||
1183 | bau_process_message(mdp, bcp, 0); | ||
1184 | /* | ||
1185 | * Let the natural processing of that message acknowledge | ||
1186 | * it. Don't get the processing of sw_ack's out of order. | ||
1187 | */ | ||
1188 | return; | ||
1189 | } | ||
1190 | |||
1191 | /* | ||
1192 | * There is no other message using this sw_ack, so it is safe to | ||
1193 | * acknowledge it. | ||
1194 | */ | ||
1195 | bau_process_message(mdp, bcp, 1); | ||
1196 | |||
1197 | return; | ||
1198 | } | ||
1199 | |||
1200 | /* | ||
990 | * The BAU message interrupt comes here. (registered by set_intr_gate) | 1201 | * The BAU message interrupt comes here. (registered by set_intr_gate) |
991 | * See entry_64.S | 1202 | * See entry_64.S |
992 | * | 1203 | * |
@@ -1009,6 +1220,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
1009 | struct ptc_stats *stat; | 1220 | struct ptc_stats *stat; |
1010 | struct msg_desc msgdesc; | 1221 | struct msg_desc msgdesc; |
1011 | 1222 | ||
1223 | ack_APIC_irq(); | ||
1012 | time_start = get_cycles(); | 1224 | time_start = get_cycles(); |
1013 | 1225 | ||
1014 | bcp = &per_cpu(bau_control, smp_processor_id()); | 1226 | bcp = &per_cpu(bau_control, smp_processor_id()); |
@@ -1022,9 +1234,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
1022 | count++; | 1234 | count++; |
1023 | 1235 | ||
1024 | msgdesc.msg_slot = msg - msgdesc.queue_first; | 1236 | msgdesc.msg_slot = msg - msgdesc.queue_first; |
1025 | msgdesc.swack_slot = ffs(msg->swack_vec) - 1; | ||
1026 | msgdesc.msg = msg; | 1237 | msgdesc.msg = msg; |
1027 | bau_process_message(&msgdesc, bcp); | 1238 | if (bcp->uvhub_version == 2) |
1239 | process_uv2_message(&msgdesc, bcp); | ||
1240 | else | ||
1241 | bau_process_message(&msgdesc, bcp, 1); | ||
1028 | 1242 | ||
1029 | msg++; | 1243 | msg++; |
1030 | if (msg > msgdesc.queue_last) | 1244 | if (msg > msgdesc.queue_last) |
@@ -1036,8 +1250,6 @@ void uv_bau_message_interrupt(struct pt_regs *regs) | |||
1036 | stat->d_nomsg++; | 1250 | stat->d_nomsg++; |
1037 | else if (count > 1) | 1251 | else if (count > 1) |
1038 | stat->d_multmsg++; | 1252 | stat->d_multmsg++; |
1039 | |||
1040 | ack_APIC_irq(); | ||
1041 | } | 1253 | } |
1042 | 1254 | ||
1043 | /* | 1255 | /* |
@@ -1083,7 +1295,7 @@ static void __init enable_timeouts(void) | |||
1083 | */ | 1295 | */ |
1084 | mmr_image |= (1L << SOFTACK_MSHIFT); | 1296 | mmr_image |= (1L << SOFTACK_MSHIFT); |
1085 | if (is_uv2_hub()) { | 1297 | if (is_uv2_hub()) { |
1086 | mmr_image |= (1L << UV2_LEG_SHFT); | 1298 | mmr_image &= ~(1L << UV2_LEG_SHFT); |
1087 | mmr_image |= (1L << UV2_EXT_SHFT); | 1299 | mmr_image |= (1L << UV2_EXT_SHFT); |
1088 | } | 1300 | } |
1089 | write_mmr_misc_control(pnode, mmr_image); | 1301 | write_mmr_misc_control(pnode, mmr_image); |
@@ -1136,13 +1348,13 @@ static int ptc_seq_show(struct seq_file *file, void *data) | |||
1136 | seq_printf(file, | 1348 | seq_printf(file, |
1137 | "remotehub numuvhubs numuvhubs16 numuvhubs8 "); | 1349 | "remotehub numuvhubs numuvhubs16 numuvhubs8 "); |
1138 | seq_printf(file, | 1350 | seq_printf(file, |
1139 | "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok "); | 1351 | "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok "); |
1140 | seq_printf(file, | 1352 | seq_printf(file, |
1141 | "resetp resett giveup sto bz throt swack recv rtime "); | 1353 | "resetp resett giveup sto bz throt swack recv rtime "); |
1142 | seq_printf(file, | 1354 | seq_printf(file, |
1143 | "all one mult none retry canc nocan reset rcan "); | 1355 | "all one mult none retry canc nocan reset rcan "); |
1144 | seq_printf(file, | 1356 | seq_printf(file, |
1145 | "disable enable\n"); | 1357 | "disable enable wars warshw warwaits\n"); |
1146 | } | 1358 | } |
1147 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { | 1359 | if (cpu < num_possible_cpus() && cpu_online(cpu)) { |
1148 | stat = &per_cpu(ptcstats, cpu); | 1360 | stat = &per_cpu(ptcstats, cpu); |
@@ -1154,10 +1366,10 @@ static int ptc_seq_show(struct seq_file *file, void *data) | |||
1154 | stat->s_ntargremotes, stat->s_ntargcpu, | 1366 | stat->s_ntargremotes, stat->s_ntargcpu, |
1155 | stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, | 1367 | stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub, |
1156 | stat->s_ntarguvhub, stat->s_ntarguvhub16); | 1368 | stat->s_ntarguvhub, stat->s_ntarguvhub16); |
1157 | seq_printf(file, "%ld %ld %ld %ld %ld ", | 1369 | seq_printf(file, "%ld %ld %ld %ld %ld %ld ", |
1158 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, | 1370 | stat->s_ntarguvhub8, stat->s_ntarguvhub4, |
1159 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, | 1371 | stat->s_ntarguvhub2, stat->s_ntarguvhub1, |
1160 | stat->s_dtimeout); | 1372 | stat->s_dtimeout, stat->s_strongnacks); |
1161 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", | 1373 | seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ", |
1162 | stat->s_retry_messages, stat->s_retriesok, | 1374 | stat->s_retry_messages, stat->s_retriesok, |
1163 | stat->s_resets_plug, stat->s_resets_timeout, | 1375 | stat->s_resets_plug, stat->s_resets_timeout, |
@@ -1173,8 +1385,10 @@ static int ptc_seq_show(struct seq_file *file, void *data) | |||
1173 | stat->d_nomsg, stat->d_retries, stat->d_canceled, | 1385 | stat->d_nomsg, stat->d_retries, stat->d_canceled, |
1174 | stat->d_nocanceled, stat->d_resets, | 1386 | stat->d_nocanceled, stat->d_resets, |
1175 | stat->d_rcanceled); | 1387 | stat->d_rcanceled); |
1176 | seq_printf(file, "%ld %ld\n", | 1388 | seq_printf(file, "%ld %ld %ld %ld %ld\n", |
1177 | stat->s_bau_disabled, stat->s_bau_reenabled); | 1389 | stat->s_bau_disabled, stat->s_bau_reenabled, |
1390 | stat->s_uv2_wars, stat->s_uv2_wars_hw, | ||
1391 | stat->s_uv2_war_waits); | ||
1178 | } | 1392 | } |
1179 | return 0; | 1393 | return 0; |
1180 | } | 1394 | } |
@@ -1432,12 +1646,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) | |||
1432 | { | 1646 | { |
1433 | int i; | 1647 | int i; |
1434 | int cpu; | 1648 | int cpu; |
1649 | int uv1 = 0; | ||
1435 | unsigned long gpa; | 1650 | unsigned long gpa; |
1436 | unsigned long m; | 1651 | unsigned long m; |
1437 | unsigned long n; | 1652 | unsigned long n; |
1438 | size_t dsize; | 1653 | size_t dsize; |
1439 | struct bau_desc *bau_desc; | 1654 | struct bau_desc *bau_desc; |
1440 | struct bau_desc *bd2; | 1655 | struct bau_desc *bd2; |
1656 | struct uv1_bau_msg_header *uv1_hdr; | ||
1657 | struct uv2_bau_msg_header *uv2_hdr; | ||
1441 | struct bau_control *bcp; | 1658 | struct bau_control *bcp; |
1442 | 1659 | ||
1443 | /* | 1660 | /* |
@@ -1451,6 +1668,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) | |||
1451 | gpa = uv_gpa(bau_desc); | 1668 | gpa = uv_gpa(bau_desc); |
1452 | n = uv_gpa_to_gnode(gpa); | 1669 | n = uv_gpa_to_gnode(gpa); |
1453 | m = uv_gpa_to_offset(gpa); | 1670 | m = uv_gpa_to_offset(gpa); |
1671 | if (is_uv1_hub()) | ||
1672 | uv1 = 1; | ||
1454 | 1673 | ||
1455 | /* the 14-bit pnode */ | 1674 | /* the 14-bit pnode */ |
1456 | write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m)); | 1675 | write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m)); |
@@ -1461,21 +1680,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode) | |||
1461 | */ | 1680 | */ |
1462 | for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { | 1681 | for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) { |
1463 | memset(bd2, 0, sizeof(struct bau_desc)); | 1682 | memset(bd2, 0, sizeof(struct bau_desc)); |
1464 | bd2->header.swack_flag = 1; | 1683 | if (uv1) { |
1465 | /* | 1684 | uv1_hdr = &bd2->header.uv1_hdr; |
1466 | * The base_dest_nasid set in the message header is the nasid | 1685 | uv1_hdr->swack_flag = 1; |
1467 | * of the first uvhub in the partition. The bit map will | 1686 | /* |
1468 | * indicate destination pnode numbers relative to that base. | 1687 | * The base_dest_nasid set in the message header |
1469 | * They may not be consecutive if nasid striding is being used. | 1688 | * is the nasid of the first uvhub in the partition. |
1470 | */ | 1689 | * The bit map will indicate destination pnode numbers |
1471 | bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode); | 1690 | * relative to that base. They may not be consecutive |
1472 | bd2->header.dest_subnodeid = UV_LB_SUBNODEID; | 1691 | * if nasid striding is being used. |
1473 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1692 | */ |
1474 | bd2->header.int_both = 1; | 1693 | uv1_hdr->base_dest_nasid = |
1475 | /* | 1694 | UV_PNODE_TO_NASID(base_pnode); |
1476 | * all others need to be set to zero: | 1695 | uv1_hdr->dest_subnodeid = UV_LB_SUBNODEID; |
1477 | * fairness chaining multilevel count replied_to | 1696 | uv1_hdr->command = UV_NET_ENDPOINT_INTD; |
1478 | */ | 1697 | uv1_hdr->int_both = 1; |
1698 | /* | ||
1699 | * all others need to be set to zero: | ||
1700 | * fairness chaining multilevel count replied_to | ||
1701 | */ | ||
1702 | } else { | ||
1703 | uv2_hdr = &bd2->header.uv2_hdr; | ||
1704 | uv2_hdr->swack_flag = 1; | ||
1705 | uv2_hdr->base_dest_nasid = | ||
1706 | UV_PNODE_TO_NASID(base_pnode); | ||
1707 | uv2_hdr->dest_subnodeid = UV_LB_SUBNODEID; | ||
1708 | uv2_hdr->command = UV_NET_ENDPOINT_INTD; | ||
1709 | } | ||
1479 | } | 1710 | } |
1480 | for_each_present_cpu(cpu) { | 1711 | for_each_present_cpu(cpu) { |
1481 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) | 1712 | if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu))) |
@@ -1531,6 +1762,7 @@ static void pq_init(int node, int pnode) | |||
1531 | write_mmr_payload_first(pnode, pn_first); | 1762 | write_mmr_payload_first(pnode, pn_first); |
1532 | write_mmr_payload_tail(pnode, first); | 1763 | write_mmr_payload_tail(pnode, first); |
1533 | write_mmr_payload_last(pnode, last); | 1764 | write_mmr_payload_last(pnode, last); |
1765 | write_gmmr_sw_ack(pnode, 0xffffUL); | ||
1534 | 1766 | ||
1535 | /* in effect, all msg_type's are set to MSG_NOOP */ | 1767 | /* in effect, all msg_type's are set to MSG_NOOP */ |
1536 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); | 1768 | memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); |
@@ -1584,14 +1816,14 @@ static int calculate_destination_timeout(void) | |||
1584 | ts_ns = base * mult1 * mult2; | 1816 | ts_ns = base * mult1 * mult2; |
1585 | ret = ts_ns / 1000; | 1817 | ret = ts_ns / 1000; |
1586 | } else { | 1818 | } else { |
1587 | /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */ | 1819 | /* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */ |
1588 | mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL); | 1820 | mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL); |
1589 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; | 1821 | mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT; |
1590 | if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) | 1822 | if (mmr_image & (1L << UV2_ACK_UNITS_SHFT)) |
1591 | mult1 = 80; | 1823 | base = 80; |
1592 | else | 1824 | else |
1593 | mult1 = 10; | 1825 | base = 10; |
1594 | base = mmr_image & UV2_ACK_MASK; | 1826 | mult1 = mmr_image & UV2_ACK_MASK; |
1595 | ret = mult1 * base; | 1827 | ret = mult1 * base; |
1596 | } | 1828 | } |
1597 | return ret; | 1829 | return ret; |
@@ -1618,6 +1850,7 @@ static void __init init_per_cpu_tunables(void) | |||
1618 | bcp->cong_response_us = congested_respns_us; | 1850 | bcp->cong_response_us = congested_respns_us; |
1619 | bcp->cong_reps = congested_reps; | 1851 | bcp->cong_reps = congested_reps; |
1620 | bcp->cong_period = congested_period; | 1852 | bcp->cong_period = congested_period; |
1853 | bcp->clocks_per_100_usec = usec_2_cycles(100); | ||
1621 | } | 1854 | } |
1622 | } | 1855 | } |
1623 | 1856 | ||
@@ -1728,8 +1961,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp, | |||
1728 | bcp->cpus_in_socket = sdp->num_cpus; | 1961 | bcp->cpus_in_socket = sdp->num_cpus; |
1729 | bcp->socket_master = *smasterp; | 1962 | bcp->socket_master = *smasterp; |
1730 | bcp->uvhub = bdp->uvhub; | 1963 | bcp->uvhub = bdp->uvhub; |
1964 | if (is_uv1_hub()) | ||
1965 | bcp->uvhub_version = 1; | ||
1966 | else if (is_uv2_hub()) | ||
1967 | bcp->uvhub_version = 2; | ||
1968 | else { | ||
1969 | printk(KERN_EMERG "uvhub version not 1 or 2\n"); | ||
1970 | return 1; | ||
1971 | } | ||
1731 | bcp->uvhub_master = *hmasterp; | 1972 | bcp->uvhub_master = *hmasterp; |
1732 | bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; | 1973 | bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; |
1974 | bcp->using_desc = bcp->uvhub_cpu; | ||
1733 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { | 1975 | if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { |
1734 | printk(KERN_EMERG "%d cpus per uvhub invalid\n", | 1976 | printk(KERN_EMERG "%d cpus per uvhub invalid\n", |
1735 | bcp->uvhub_cpu); | 1977 | bcp->uvhub_cpu); |
@@ -1845,6 +2087,8 @@ static int __init uv_bau_init(void) | |||
1845 | uv_base_pnode = uv_blade_to_pnode(uvhub); | 2087 | uv_base_pnode = uv_blade_to_pnode(uvhub); |
1846 | } | 2088 | } |
1847 | 2089 | ||
2090 | enable_timeouts(); | ||
2091 | |||
1848 | if (init_per_cpu(nuvhubs, uv_base_pnode)) { | 2092 | if (init_per_cpu(nuvhubs, uv_base_pnode)) { |
1849 | nobau = 1; | 2093 | nobau = 1; |
1850 | return 0; | 2094 | return 0; |
@@ -1855,7 +2099,6 @@ static int __init uv_bau_init(void) | |||
1855 | if (uv_blade_nr_possible_cpus(uvhub)) | 2099 | if (uv_blade_nr_possible_cpus(uvhub)) |
1856 | init_uvhub(uvhub, vector, uv_base_pnode); | 2100 | init_uvhub(uvhub, vector, uv_base_pnode); |
1857 | 2101 | ||
1858 | enable_timeouts(); | ||
1859 | alloc_intr_gate(vector, uv_bau_message_intr1); | 2102 | alloc_intr_gate(vector, uv_bau_message_intr1); |
1860 | 2103 | ||
1861 | for_each_possible_blade(uvhub) { | 2104 | for_each_possible_blade(uvhub) { |
@@ -1867,7 +2110,8 @@ static int __init uv_bau_init(void) | |||
1867 | val = 1L << 63; | 2110 | val = 1L << 63; |
1868 | write_gmmr_activation(pnode, val); | 2111 | write_gmmr_activation(pnode, val); |
1869 | mmr = 1; /* should be 1 to broadcast to both sockets */ | 2112 | mmr = 1; /* should be 1 to broadcast to both sockets */ |
1870 | write_mmr_data_broadcast(pnode, mmr); | 2113 | if (!is_uv1_hub()) |
2114 | write_mmr_data_broadcast(pnode, mmr); | ||
1871 | } | 2115 | } |
1872 | } | 2116 | } |
1873 | 2117 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 37c4bd1cacd5..d0c41188d4e5 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM | |||
87 | 87 | ||
88 | config GPIO_IT8761E | 88 | config GPIO_IT8761E |
89 | tristate "IT8761E GPIO support" | 89 | tristate "IT8761E GPIO support" |
90 | depends on X86 # unconditional access to IO space. | ||
90 | help | 91 | help |
91 | Say yes here to support GPIO functionality of IT8761E super I/O chip. | 92 | Say yes here to support GPIO functionality of IT8761E super I/O chip. |
92 | 93 | ||
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c index 461958fc2264..03d6dd5dcb77 100644 --- a/drivers/gpio/gpio-ml-ioh.c +++ b/drivers/gpio/gpio-ml-ioh.c | |||
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) | |||
248 | static int ioh_irq_type(struct irq_data *d, unsigned int type) | 248 | static int ioh_irq_type(struct irq_data *d, unsigned int type) |
249 | { | 249 | { |
250 | u32 im; | 250 | u32 im; |
251 | u32 *im_reg; | 251 | void __iomem *im_reg; |
252 | u32 ien; | 252 | u32 ien; |
253 | u32 im_pos; | 253 | u32 im_pos; |
254 | int ch; | 254 | int ch; |
@@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev, | |||
412 | int i, j; | 412 | int i, j; |
413 | struct ioh_gpio *chip; | 413 | struct ioh_gpio *chip; |
414 | void __iomem *base; | 414 | void __iomem *base; |
415 | void __iomem *chip_save; | 415 | void *chip_save; |
416 | int irq_base; | 416 | int irq_base; |
417 | 417 | ||
418 | ret = pci_enable_device(pdev); | 418 | ret = pci_enable_device(pdev); |
@@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev, | |||
428 | } | 428 | } |
429 | 429 | ||
430 | base = pci_iomap(pdev, 1, 0); | 430 | base = pci_iomap(pdev, 1, 0); |
431 | if (base == 0) { | 431 | if (!base) { |
432 | dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); | 432 | dev_err(&pdev->dev, "%s : pci_iomap failed", __func__); |
433 | ret = -ENOMEM; | 433 | ret = -ENOMEM; |
434 | goto err_iomap; | 434 | goto err_iomap; |
@@ -521,7 +521,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev) | |||
521 | int err; | 521 | int err; |
522 | int i; | 522 | int i; |
523 | struct ioh_gpio *chip = pci_get_drvdata(pdev); | 523 | struct ioh_gpio *chip = pci_get_drvdata(pdev); |
524 | void __iomem *chip_save; | 524 | void *chip_save; |
525 | 525 | ||
526 | chip_save = chip; | 526 | chip_save = chip; |
527 | 527 | ||
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index f0603297f829..68fa55e86eb1 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c | |||
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip) | |||
231 | static int pch_irq_type(struct irq_data *d, unsigned int type) | 231 | static int pch_irq_type(struct irq_data *d, unsigned int type) |
232 | { | 232 | { |
233 | u32 im; | 233 | u32 im; |
234 | u32 *im_reg; | 234 | u32 __iomem *im_reg; |
235 | u32 ien; | 235 | u32 ien; |
236 | u32 im_pos; | 236 | u32 im_pos; |
237 | int ch; | 237 | int ch; |
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, | |||
376 | } | 376 | } |
377 | 377 | ||
378 | chip->base = pci_iomap(pdev, 1, 0); | 378 | chip->base = pci_iomap(pdev, 1, 0); |
379 | if (chip->base == 0) { | 379 | if (!chip->base) { |
380 | dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); | 380 | dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__); |
381 | ret = -ENOMEM; | 381 | ret = -ENOMEM; |
382 | goto err_iomap; | 382 | goto err_iomap; |
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index b9c1c297669e..91f45b965d1e 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c | |||
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, | |||
52 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); | 52 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); |
53 | 53 | ||
54 | /* Set the initial value */ | 54 | /* Set the initial value */ |
55 | tps65910_gpio_set(gc, 0, value); | 55 | tps65910_gpio_set(gc, offset, value); |
56 | 56 | ||
57 | return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, | 57 | return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, |
58 | GPIO_CFG_MASK); | 58 | GPIO_CFG_MASK); |
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c index 63be60bc3455..86cc3f7841cd 100644 --- a/drivers/mfd/mcp-core.c +++ b/drivers/mfd/mcp-core.c | |||
@@ -26,35 +26,9 @@ | |||
26 | #define to_mcp(d) container_of(d, struct mcp, attached_device) | 26 | #define to_mcp(d) container_of(d, struct mcp, attached_device) |
27 | #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) | 27 | #define to_mcp_driver(d) container_of(d, struct mcp_driver, drv) |
28 | 28 | ||
29 | static const struct mcp_device_id *mcp_match_id(const struct mcp_device_id *id, | ||
30 | const char *codec) | ||
31 | { | ||
32 | while (id->name[0]) { | ||
33 | if (strcmp(codec, id->name) == 0) | ||
34 | return id; | ||
35 | id++; | ||
36 | } | ||
37 | return NULL; | ||
38 | } | ||
39 | |||
40 | const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp) | ||
41 | { | ||
42 | const struct mcp_driver *driver = | ||
43 | to_mcp_driver(mcp->attached_device.driver); | ||
44 | |||
45 | return mcp_match_id(driver->id_table, mcp->codec); | ||
46 | } | ||
47 | EXPORT_SYMBOL(mcp_get_device_id); | ||
48 | |||
49 | static int mcp_bus_match(struct device *dev, struct device_driver *drv) | 29 | static int mcp_bus_match(struct device *dev, struct device_driver *drv) |
50 | { | 30 | { |
51 | const struct mcp *mcp = to_mcp(dev); | 31 | return 1; |
52 | const struct mcp_driver *driver = to_mcp_driver(drv); | ||
53 | |||
54 | if (driver->id_table) | ||
55 | return !!mcp_match_id(driver->id_table, mcp->codec); | ||
56 | |||
57 | return 0; | ||
58 | } | 32 | } |
59 | 33 | ||
60 | static int mcp_bus_probe(struct device *dev) | 34 | static int mcp_bus_probe(struct device *dev) |
@@ -100,18 +74,9 @@ static int mcp_bus_resume(struct device *dev) | |||
100 | return ret; | 74 | return ret; |
101 | } | 75 | } |
102 | 76 | ||
103 | static int mcp_bus_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
104 | { | ||
105 | struct mcp *mcp = to_mcp(dev); | ||
106 | |||
107 | add_uevent_var(env, "MODALIAS=%s%s", MCP_MODULE_PREFIX, mcp->codec); | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static struct bus_type mcp_bus_type = { | 77 | static struct bus_type mcp_bus_type = { |
112 | .name = "mcp", | 78 | .name = "mcp", |
113 | .match = mcp_bus_match, | 79 | .match = mcp_bus_match, |
114 | .uevent = mcp_bus_uevent, | ||
115 | .probe = mcp_bus_probe, | 80 | .probe = mcp_bus_probe, |
116 | .remove = mcp_bus_remove, | 81 | .remove = mcp_bus_remove, |
117 | .suspend = mcp_bus_suspend, | 82 | .suspend = mcp_bus_suspend, |
@@ -128,9 +93,11 @@ static struct bus_type mcp_bus_type = { | |||
128 | */ | 93 | */ |
129 | void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div) | 94 | void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div) |
130 | { | 95 | { |
131 | spin_lock_irq(&mcp->lock); | 96 | unsigned long flags; |
97 | |||
98 | spin_lock_irqsave(&mcp->lock, flags); | ||
132 | mcp->ops->set_telecom_divisor(mcp, div); | 99 | mcp->ops->set_telecom_divisor(mcp, div); |
133 | spin_unlock_irq(&mcp->lock); | 100 | spin_unlock_irqrestore(&mcp->lock, flags); |
134 | } | 101 | } |
135 | EXPORT_SYMBOL(mcp_set_telecom_divisor); | 102 | EXPORT_SYMBOL(mcp_set_telecom_divisor); |
136 | 103 | ||
@@ -143,9 +110,11 @@ EXPORT_SYMBOL(mcp_set_telecom_divisor); | |||
143 | */ | 110 | */ |
144 | void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div) | 111 | void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div) |
145 | { | 112 | { |
146 | spin_lock_irq(&mcp->lock); | 113 | unsigned long flags; |
114 | |||
115 | spin_lock_irqsave(&mcp->lock, flags); | ||
147 | mcp->ops->set_audio_divisor(mcp, div); | 116 | mcp->ops->set_audio_divisor(mcp, div); |
148 | spin_unlock_irq(&mcp->lock); | 117 | spin_unlock_irqrestore(&mcp->lock, flags); |
149 | } | 118 | } |
150 | EXPORT_SYMBOL(mcp_set_audio_divisor); | 119 | EXPORT_SYMBOL(mcp_set_audio_divisor); |
151 | 120 | ||
@@ -198,10 +167,11 @@ EXPORT_SYMBOL(mcp_reg_read); | |||
198 | */ | 167 | */ |
199 | void mcp_enable(struct mcp *mcp) | 168 | void mcp_enable(struct mcp *mcp) |
200 | { | 169 | { |
201 | spin_lock_irq(&mcp->lock); | 170 | unsigned long flags; |
171 | spin_lock_irqsave(&mcp->lock, flags); | ||
202 | if (mcp->use_count++ == 0) | 172 | if (mcp->use_count++ == 0) |
203 | mcp->ops->enable(mcp); | 173 | mcp->ops->enable(mcp); |
204 | spin_unlock_irq(&mcp->lock); | 174 | spin_unlock_irqrestore(&mcp->lock, flags); |
205 | } | 175 | } |
206 | EXPORT_SYMBOL(mcp_enable); | 176 | EXPORT_SYMBOL(mcp_enable); |
207 | 177 | ||
@@ -247,14 +217,9 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size) | |||
247 | } | 217 | } |
248 | EXPORT_SYMBOL(mcp_host_alloc); | 218 | EXPORT_SYMBOL(mcp_host_alloc); |
249 | 219 | ||
250 | int mcp_host_register(struct mcp *mcp, void *pdata) | 220 | int mcp_host_register(struct mcp *mcp) |
251 | { | 221 | { |
252 | if (!mcp->codec) | ||
253 | return -EINVAL; | ||
254 | |||
255 | mcp->attached_device.platform_data = pdata; | ||
256 | dev_set_name(&mcp->attached_device, "mcp0"); | 222 | dev_set_name(&mcp->attached_device, "mcp0"); |
257 | request_module("%s%s", MCP_MODULE_PREFIX, mcp->codec); | ||
258 | return device_register(&mcp->attached_device); | 223 | return device_register(&mcp->attached_device); |
259 | } | 224 | } |
260 | EXPORT_SYMBOL(mcp_host_register); | 225 | EXPORT_SYMBOL(mcp_host_register); |
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c index 9adc2eb69492..02c53a0766c4 100644 --- a/drivers/mfd/mcp-sa11x0.c +++ b/drivers/mfd/mcp-sa11x0.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/mfd/mcp.h> | 21 | #include <linux/mfd/mcp.h> |
22 | #include <linux/io.h> | ||
23 | 22 | ||
24 | #include <mach/dma.h> | 23 | #include <mach/dma.h> |
25 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
@@ -27,19 +26,12 @@ | |||
27 | #include <asm/system.h> | 26 | #include <asm/system.h> |
28 | #include <mach/mcp.h> | 27 | #include <mach/mcp.h> |
29 | 28 | ||
30 | /* Register offsets */ | 29 | #include <mach/assabet.h> |
31 | #define MCCR0 0x00 | 30 | |
32 | #define MCDR0 0x08 | ||
33 | #define MCDR1 0x0C | ||
34 | #define MCDR2 0x10 | ||
35 | #define MCSR 0x18 | ||
36 | #define MCCR1 0x00 | ||
37 | 31 | ||
38 | struct mcp_sa11x0 { | 32 | struct mcp_sa11x0 { |
39 | u32 mccr0; | 33 | u32 mccr0; |
40 | u32 mccr1; | 34 | u32 mccr1; |
41 | unsigned char *mccr0_base; | ||
42 | unsigned char *mccr1_base; | ||
43 | }; | 35 | }; |
44 | 36 | ||
45 | #define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp)) | 37 | #define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp)) |
@@ -47,25 +39,25 @@ struct mcp_sa11x0 { | |||
47 | static void | 39 | static void |
48 | mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor) | 40 | mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor) |
49 | { | 41 | { |
50 | struct mcp_sa11x0 *priv = priv(mcp); | 42 | unsigned int mccr0; |
51 | 43 | ||
52 | divisor /= 32; | 44 | divisor /= 32; |
53 | 45 | ||
54 | priv->mccr0 &= ~0x00007f00; | 46 | mccr0 = Ser4MCCR0 & ~0x00007f00; |
55 | priv->mccr0 |= divisor << 8; | 47 | mccr0 |= divisor << 8; |
56 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | 48 | Ser4MCCR0 = mccr0; |
57 | } | 49 | } |
58 | 50 | ||
59 | static void | 51 | static void |
60 | mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor) | 52 | mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor) |
61 | { | 53 | { |
62 | struct mcp_sa11x0 *priv = priv(mcp); | 54 | unsigned int mccr0; |
63 | 55 | ||
64 | divisor /= 32; | 56 | divisor /= 32; |
65 | 57 | ||
66 | priv->mccr0 &= ~0x0000007f; | 58 | mccr0 = Ser4MCCR0 & ~0x0000007f; |
67 | priv->mccr0 |= divisor; | 59 | mccr0 |= divisor; |
68 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | 60 | Ser4MCCR0 = mccr0; |
69 | } | 61 | } |
70 | 62 | ||
71 | /* | 63 | /* |
@@ -79,16 +71,12 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val) | |||
79 | { | 71 | { |
80 | int ret = -ETIME; | 72 | int ret = -ETIME; |
81 | int i; | 73 | int i; |
82 | u32 mcpreg; | ||
83 | struct mcp_sa11x0 *priv = priv(mcp); | ||
84 | 74 | ||
85 | mcpreg = reg << 17 | MCDR2_Wr | (val & 0xffff); | 75 | Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff); |
86 | __raw_writel(mcpreg, priv->mccr0_base + MCDR2); | ||
87 | 76 | ||
88 | for (i = 0; i < 2; i++) { | 77 | for (i = 0; i < 2; i++) { |
89 | udelay(mcp->rw_timeout); | 78 | udelay(mcp->rw_timeout); |
90 | mcpreg = __raw_readl(priv->mccr0_base + MCSR); | 79 | if (Ser4MCSR & MCSR_CWC) { |
91 | if (mcpreg & MCSR_CWC) { | ||
92 | ret = 0; | 80 | ret = 0; |
93 | break; | 81 | break; |
94 | } | 82 | } |
@@ -109,18 +97,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg) | |||
109 | { | 97 | { |
110 | int ret = -ETIME; | 98 | int ret = -ETIME; |
111 | int i; | 99 | int i; |
112 | u32 mcpreg; | ||
113 | struct mcp_sa11x0 *priv = priv(mcp); | ||
114 | 100 | ||
115 | mcpreg = reg << 17 | MCDR2_Rd; | 101 | Ser4MCDR2 = reg << 17 | MCDR2_Rd; |
116 | __raw_writel(mcpreg, priv->mccr0_base + MCDR2); | ||
117 | 102 | ||
118 | for (i = 0; i < 2; i++) { | 103 | for (i = 0; i < 2; i++) { |
119 | udelay(mcp->rw_timeout); | 104 | udelay(mcp->rw_timeout); |
120 | mcpreg = __raw_readl(priv->mccr0_base + MCSR); | 105 | if (Ser4MCSR & MCSR_CRC) { |
121 | if (mcpreg & MCSR_CRC) { | 106 | ret = Ser4MCDR2 & 0xffff; |
122 | ret = __raw_readl(priv->mccr0_base + MCDR2) | ||
123 | & 0xffff; | ||
124 | break; | 107 | break; |
125 | } | 108 | } |
126 | } | 109 | } |
@@ -133,19 +116,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg) | |||
133 | 116 | ||
134 | static void mcp_sa11x0_enable(struct mcp *mcp) | 117 | static void mcp_sa11x0_enable(struct mcp *mcp) |
135 | { | 118 | { |
136 | struct mcp_sa11x0 *priv = priv(mcp); | 119 | Ser4MCSR = -1; |
137 | 120 | Ser4MCCR0 |= MCCR0_MCE; | |
138 | __raw_writel(-1, priv->mccr0_base + MCSR); | ||
139 | priv->mccr0 |= MCCR0_MCE; | ||
140 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | ||
141 | } | 121 | } |
142 | 122 | ||
143 | static void mcp_sa11x0_disable(struct mcp *mcp) | 123 | static void mcp_sa11x0_disable(struct mcp *mcp) |
144 | { | 124 | { |
145 | struct mcp_sa11x0 *priv = priv(mcp); | 125 | Ser4MCCR0 &= ~MCCR0_MCE; |
146 | |||
147 | priv->mccr0 &= ~MCCR0_MCE; | ||
148 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | ||
149 | } | 126 | } |
150 | 127 | ||
151 | /* | 128 | /* |
@@ -165,69 +142,50 @@ static int mcp_sa11x0_probe(struct platform_device *pdev) | |||
165 | struct mcp_plat_data *data = pdev->dev.platform_data; | 142 | struct mcp_plat_data *data = pdev->dev.platform_data; |
166 | struct mcp *mcp; | 143 | struct mcp *mcp; |
167 | int ret; | 144 | int ret; |
168 | struct mcp_sa11x0 *priv; | ||
169 | struct resource *res_mem0, *res_mem1; | ||
170 | u32 size0, size1; | ||
171 | 145 | ||
172 | if (!data) | 146 | if (!data) |
173 | return -ENODEV; | 147 | return -ENODEV; |
174 | 148 | ||
175 | if (!data->codec) | 149 | if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp")) |
176 | return -ENODEV; | ||
177 | |||
178 | res_mem0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
179 | if (!res_mem0) | ||
180 | return -ENODEV; | ||
181 | size0 = res_mem0->end - res_mem0->start + 1; | ||
182 | |||
183 | res_mem1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
184 | if (!res_mem1) | ||
185 | return -ENODEV; | ||
186 | size1 = res_mem1->end - res_mem1->start + 1; | ||
187 | |||
188 | if (!request_mem_region(res_mem0->start, size0, "sa11x0-mcp")) | ||
189 | return -EBUSY; | 150 | return -EBUSY; |
190 | 151 | ||
191 | if (!request_mem_region(res_mem1->start, size1, "sa11x0-mcp")) { | ||
192 | ret = -EBUSY; | ||
193 | goto release; | ||
194 | } | ||
195 | |||
196 | mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0)); | 152 | mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0)); |
197 | if (!mcp) { | 153 | if (!mcp) { |
198 | ret = -ENOMEM; | 154 | ret = -ENOMEM; |
199 | goto release2; | 155 | goto release; |
200 | } | 156 | } |
201 | 157 | ||
202 | priv = priv(mcp); | ||
203 | |||
204 | mcp->owner = THIS_MODULE; | 158 | mcp->owner = THIS_MODULE; |
205 | mcp->ops = &mcp_sa11x0; | 159 | mcp->ops = &mcp_sa11x0; |
206 | mcp->sclk_rate = data->sclk_rate; | 160 | mcp->sclk_rate = data->sclk_rate; |
207 | mcp->dma_audio_rd = DDAR_DevAdd(res_mem0->start + MCDR0) | 161 | mcp->dma_audio_rd = DMA_Ser4MCP0Rd; |
208 | + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev; | 162 | mcp->dma_audio_wr = DMA_Ser4MCP0Wr; |
209 | mcp->dma_audio_wr = DDAR_DevAdd(res_mem0->start + MCDR0) | 163 | mcp->dma_telco_rd = DMA_Ser4MCP1Rd; |
210 | + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev; | 164 | mcp->dma_telco_wr = DMA_Ser4MCP1Wr; |
211 | mcp->dma_telco_rd = DDAR_DevAdd(res_mem0->start + MCDR1) | 165 | mcp->gpio_base = data->gpio_base; |
212 | + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev; | ||
213 | mcp->dma_telco_wr = DDAR_DevAdd(res_mem0->start + MCDR1) | ||
214 | + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev; | ||
215 | mcp->codec = data->codec; | ||
216 | 166 | ||
217 | platform_set_drvdata(pdev, mcp); | 167 | platform_set_drvdata(pdev, mcp); |
218 | 168 | ||
169 | if (machine_is_assabet()) { | ||
170 | ASSABET_BCR_set(ASSABET_BCR_CODEC_RST); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Setup the PPC unit correctly. | ||
175 | */ | ||
176 | PPDR &= ~PPC_RXD4; | ||
177 | PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM; | ||
178 | PSDR |= PPC_RXD4; | ||
179 | PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
180 | PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM); | ||
181 | |||
219 | /* | 182 | /* |
220 | * Initialise device. Note that we initially | 183 | * Initialise device. Note that we initially |
221 | * set the sampling rate to minimum. | 184 | * set the sampling rate to minimum. |
222 | */ | 185 | */ |
223 | priv->mccr0_base = ioremap(res_mem0->start, size0); | 186 | Ser4MCSR = -1; |
224 | priv->mccr1_base = ioremap(res_mem1->start, size1); | 187 | Ser4MCCR1 = data->mccr1; |
225 | 188 | Ser4MCCR0 = data->mccr0 | 0x7f7f; | |
226 | __raw_writel(-1, priv->mccr0_base + MCSR); | ||
227 | priv->mccr1 = data->mccr1; | ||
228 | priv->mccr0 = data->mccr0 | 0x7f7f; | ||
229 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | ||
230 | __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1); | ||
231 | 189 | ||
232 | /* | 190 | /* |
233 | * Calculate the read/write timeout (us) from the bit clock | 191 | * Calculate the read/write timeout (us) from the bit clock |
@@ -237,53 +195,36 @@ static int mcp_sa11x0_probe(struct platform_device *pdev) | |||
237 | mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) / | 195 | mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) / |
238 | mcp->sclk_rate; | 196 | mcp->sclk_rate; |
239 | 197 | ||
240 | ret = mcp_host_register(mcp, data->codec_pdata); | 198 | ret = mcp_host_register(mcp); |
241 | if (ret == 0) | 199 | if (ret == 0) |
242 | goto out; | 200 | goto out; |
243 | 201 | ||
244 | release2: | ||
245 | release_mem_region(res_mem1->start, size1); | ||
246 | release: | 202 | release: |
247 | release_mem_region(res_mem0->start, size0); | 203 | release_mem_region(0x80060000, 0x60); |
248 | platform_set_drvdata(pdev, NULL); | 204 | platform_set_drvdata(pdev, NULL); |
249 | 205 | ||
250 | out: | 206 | out: |
251 | return ret; | 207 | return ret; |
252 | } | 208 | } |
253 | 209 | ||
254 | static int mcp_sa11x0_remove(struct platform_device *pdev) | 210 | static int mcp_sa11x0_remove(struct platform_device *dev) |
255 | { | 211 | { |
256 | struct mcp *mcp = platform_get_drvdata(pdev); | 212 | struct mcp *mcp = platform_get_drvdata(dev); |
257 | struct mcp_sa11x0 *priv = priv(mcp); | ||
258 | struct resource *res_mem; | ||
259 | u32 size; | ||
260 | 213 | ||
261 | platform_set_drvdata(pdev, NULL); | 214 | platform_set_drvdata(dev, NULL); |
262 | mcp_host_unregister(mcp); | 215 | mcp_host_unregister(mcp); |
216 | release_mem_region(0x80060000, 0x60); | ||
263 | 217 | ||
264 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
265 | if (res_mem) { | ||
266 | size = res_mem->end - res_mem->start + 1; | ||
267 | release_mem_region(res_mem->start, size); | ||
268 | } | ||
269 | res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
270 | if (res_mem) { | ||
271 | size = res_mem->end - res_mem->start + 1; | ||
272 | release_mem_region(res_mem->start, size); | ||
273 | } | ||
274 | iounmap(priv->mccr0_base); | ||
275 | iounmap(priv->mccr1_base); | ||
276 | return 0; | 218 | return 0; |
277 | } | 219 | } |
278 | 220 | ||
279 | static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) | 221 | static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) |
280 | { | 222 | { |
281 | struct mcp *mcp = platform_get_drvdata(dev); | 223 | struct mcp *mcp = platform_get_drvdata(dev); |
282 | struct mcp_sa11x0 *priv = priv(mcp); | ||
283 | u32 mccr0; | ||
284 | 224 | ||
285 | mccr0 = priv->mccr0 & ~MCCR0_MCE; | 225 | priv(mcp)->mccr0 = Ser4MCCR0; |
286 | __raw_writel(mccr0, priv->mccr0_base + MCCR0); | 226 | priv(mcp)->mccr1 = Ser4MCCR1; |
227 | Ser4MCCR0 &= ~MCCR0_MCE; | ||
287 | 228 | ||
288 | return 0; | 229 | return 0; |
289 | } | 230 | } |
@@ -291,10 +232,9 @@ static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state) | |||
291 | static int mcp_sa11x0_resume(struct platform_device *dev) | 232 | static int mcp_sa11x0_resume(struct platform_device *dev) |
292 | { | 233 | { |
293 | struct mcp *mcp = platform_get_drvdata(dev); | 234 | struct mcp *mcp = platform_get_drvdata(dev); |
294 | struct mcp_sa11x0 *priv = priv(mcp); | ||
295 | 235 | ||
296 | __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0); | 236 | Ser4MCCR1 = priv(mcp)->mccr1; |
297 | __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1); | 237 | Ser4MCCR0 = priv(mcp)->mccr0; |
298 | 238 | ||
299 | return 0; | 239 | return 0; |
300 | } | 240 | } |
@@ -311,7 +251,6 @@ static struct platform_driver mcp_sa11x0_driver = { | |||
311 | .resume = mcp_sa11x0_resume, | 251 | .resume = mcp_sa11x0_resume, |
312 | .driver = { | 252 | .driver = { |
313 | .name = "sa11x0-mcp", | 253 | .name = "sa11x0-mcp", |
314 | .owner = THIS_MODULE, | ||
315 | }, | 254 | }, |
316 | }; | 255 | }; |
317 | 256 | ||
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c index 91c4f25e0e55..febc90cdef7e 100644 --- a/drivers/mfd/ucb1x00-core.c +++ b/drivers/mfd/ucb1x00-core.c | |||
@@ -36,15 +36,6 @@ static DEFINE_MUTEX(ucb1x00_mutex); | |||
36 | static LIST_HEAD(ucb1x00_drivers); | 36 | static LIST_HEAD(ucb1x00_drivers); |
37 | static LIST_HEAD(ucb1x00_devices); | 37 | static LIST_HEAD(ucb1x00_devices); |
38 | 38 | ||
39 | static struct mcp_device_id ucb1x00_id[] = { | ||
40 | { "ucb1x00", 0 }, /* auto-detection */ | ||
41 | { "ucb1200", UCB_ID_1200 }, | ||
42 | { "ucb1300", UCB_ID_1300 }, | ||
43 | { "tc35143", UCB_ID_TC35143 }, | ||
44 | { } | ||
45 | }; | ||
46 | MODULE_DEVICE_TABLE(mcp, ucb1x00_id); | ||
47 | |||
48 | /** | 39 | /** |
49 | * ucb1x00_io_set_dir - set IO direction | 40 | * ucb1x00_io_set_dir - set IO direction |
50 | * @ucb: UCB1x00 structure describing chip | 41 | * @ucb: UCB1x00 structure describing chip |
@@ -157,16 +148,22 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset | |||
157 | { | 148 | { |
158 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); | 149 | struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); |
159 | unsigned long flags; | 150 | unsigned long flags; |
151 | unsigned old, mask = 1 << offset; | ||
160 | 152 | ||
161 | spin_lock_irqsave(&ucb->io_lock, flags); | 153 | spin_lock_irqsave(&ucb->io_lock, flags); |
162 | ucb->io_dir |= (1 << offset); | 154 | old = ucb->io_out; |
163 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | ||
164 | |||
165 | if (value) | 155 | if (value) |
166 | ucb->io_out |= 1 << offset; | 156 | ucb->io_out |= mask; |
167 | else | 157 | else |
168 | ucb->io_out &= ~(1 << offset); | 158 | ucb->io_out &= ~mask; |
169 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); | 159 | |
160 | if (old != ucb->io_out) | ||
161 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); | ||
162 | |||
163 | if (!(ucb->io_dir & mask)) { | ||
164 | ucb->io_dir |= mask; | ||
165 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | ||
166 | } | ||
170 | spin_unlock_irqrestore(&ucb->io_lock, flags); | 167 | spin_unlock_irqrestore(&ucb->io_lock, flags); |
171 | 168 | ||
172 | return 0; | 169 | return 0; |
@@ -536,33 +533,17 @@ static struct class ucb1x00_class = { | |||
536 | 533 | ||
537 | static int ucb1x00_probe(struct mcp *mcp) | 534 | static int ucb1x00_probe(struct mcp *mcp) |
538 | { | 535 | { |
539 | const struct mcp_device_id *mid; | ||
540 | struct ucb1x00 *ucb; | 536 | struct ucb1x00 *ucb; |
541 | struct ucb1x00_driver *drv; | 537 | struct ucb1x00_driver *drv; |
542 | struct ucb1x00_plat_data *pdata; | ||
543 | unsigned int id; | 538 | unsigned int id; |
544 | int ret = -ENODEV; | 539 | int ret = -ENODEV; |
545 | int temp; | 540 | int temp; |
546 | 541 | ||
547 | mcp_enable(mcp); | 542 | mcp_enable(mcp); |
548 | id = mcp_reg_read(mcp, UCB_ID); | 543 | id = mcp_reg_read(mcp, UCB_ID); |
549 | mid = mcp_get_device_id(mcp); | ||
550 | 544 | ||
551 | if (mid && mid->driver_data) { | 545 | if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { |
552 | if (id != mid->driver_data) { | 546 | printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); |
553 | printk(KERN_WARNING "%s wrong ID %04x found: %04x\n", | ||
554 | mid->name, (unsigned int) mid->driver_data, id); | ||
555 | goto err_disable; | ||
556 | } | ||
557 | } else { | ||
558 | mid = &ucb1x00_id[1]; | ||
559 | while (mid->driver_data) { | ||
560 | if (id == mid->driver_data) | ||
561 | break; | ||
562 | mid++; | ||
563 | } | ||
564 | printk(KERN_WARNING "%s ID not found: %04x\n", | ||
565 | ucb1x00_id[0].name, id); | ||
566 | goto err_disable; | 547 | goto err_disable; |
567 | } | 548 | } |
568 | 549 | ||
@@ -571,28 +552,28 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
571 | if (!ucb) | 552 | if (!ucb) |
572 | goto err_disable; | 553 | goto err_disable; |
573 | 554 | ||
574 | pdata = mcp->attached_device.platform_data; | 555 | |
575 | ucb->dev.class = &ucb1x00_class; | 556 | ucb->dev.class = &ucb1x00_class; |
576 | ucb->dev.parent = &mcp->attached_device; | 557 | ucb->dev.parent = &mcp->attached_device; |
577 | dev_set_name(&ucb->dev, mid->name); | 558 | dev_set_name(&ucb->dev, "ucb1x00"); |
578 | 559 | ||
579 | spin_lock_init(&ucb->lock); | 560 | spin_lock_init(&ucb->lock); |
580 | spin_lock_init(&ucb->io_lock); | 561 | spin_lock_init(&ucb->io_lock); |
581 | sema_init(&ucb->adc_sem, 1); | 562 | sema_init(&ucb->adc_sem, 1); |
582 | 563 | ||
583 | ucb->id = mid; | 564 | ucb->id = id; |
584 | ucb->mcp = mcp; | 565 | ucb->mcp = mcp; |
585 | ucb->irq = ucb1x00_detect_irq(ucb); | 566 | ucb->irq = ucb1x00_detect_irq(ucb); |
586 | if (ucb->irq == NO_IRQ) { | 567 | if (ucb->irq == NO_IRQ) { |
587 | printk(KERN_ERR "%s: IRQ probe failed\n", mid->name); | 568 | printk(KERN_ERR "UCB1x00: IRQ probe failed\n"); |
588 | ret = -ENODEV; | 569 | ret = -ENODEV; |
589 | goto err_free; | 570 | goto err_free; |
590 | } | 571 | } |
591 | 572 | ||
592 | ucb->gpio.base = -1; | 573 | ucb->gpio.base = -1; |
593 | if (pdata && (pdata->gpio_base >= 0)) { | 574 | if (mcp->gpio_base != 0) { |
594 | ucb->gpio.label = dev_name(&ucb->dev); | 575 | ucb->gpio.label = dev_name(&ucb->dev); |
595 | ucb->gpio.base = pdata->gpio_base; | 576 | ucb->gpio.base = mcp->gpio_base; |
596 | ucb->gpio.ngpio = 10; | 577 | ucb->gpio.ngpio = 10; |
597 | ucb->gpio.set = ucb1x00_gpio_set; | 578 | ucb->gpio.set = ucb1x00_gpio_set; |
598 | ucb->gpio.get = ucb1x00_gpio_get; | 579 | ucb->gpio.get = ucb1x00_gpio_get; |
@@ -605,10 +586,10 @@ static int ucb1x00_probe(struct mcp *mcp) | |||
605 | dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); | 586 | dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); |
606 | 587 | ||
607 | ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, | 588 | ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, |
608 | mid->name, ucb); | 589 | "UCB1x00", ucb); |
609 | if (ret) { | 590 | if (ret) { |
610 | printk(KERN_ERR "%s: unable to grab irq%d: %d\n", | 591 | printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n", |
611 | mid->name, ucb->irq, ret); | 592 | ucb->irq, ret); |
612 | goto err_gpio; | 593 | goto err_gpio; |
613 | } | 594 | } |
614 | 595 | ||
@@ -712,6 +693,7 @@ static int ucb1x00_resume(struct mcp *mcp) | |||
712 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); | 693 | struct ucb1x00 *ucb = mcp_get_drvdata(mcp); |
713 | struct ucb1x00_dev *dev; | 694 | struct ucb1x00_dev *dev; |
714 | 695 | ||
696 | ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); | ||
715 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); | 697 | ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); |
716 | mutex_lock(&ucb1x00_mutex); | 698 | mutex_lock(&ucb1x00_mutex); |
717 | list_for_each_entry(dev, &ucb->devs, dev_node) { | 699 | list_for_each_entry(dev, &ucb->devs, dev_node) { |
@@ -730,7 +712,6 @@ static struct mcp_driver ucb1x00_driver = { | |||
730 | .remove = ucb1x00_remove, | 712 | .remove = ucb1x00_remove, |
731 | .suspend = ucb1x00_suspend, | 713 | .suspend = ucb1x00_suspend, |
732 | .resume = ucb1x00_resume, | 714 | .resume = ucb1x00_resume, |
733 | .id_table = ucb1x00_id, | ||
734 | }; | 715 | }; |
735 | 716 | ||
736 | static int __init ucb1x00_init(void) | 717 | static int __init ucb1x00_init(void) |
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 40ec3c118868..63a3cbdfa3f3 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c | |||
@@ -47,7 +47,6 @@ struct ucb1x00_ts { | |||
47 | u16 x_res; | 47 | u16 x_res; |
48 | u16 y_res; | 48 | u16 y_res; |
49 | 49 | ||
50 | unsigned int restart:1; | ||
51 | unsigned int adcsync:1; | 50 | unsigned int adcsync:1; |
52 | }; | 51 | }; |
53 | 52 | ||
@@ -207,15 +206,17 @@ static int ucb1x00_thread(void *_ts) | |||
207 | { | 206 | { |
208 | struct ucb1x00_ts *ts = _ts; | 207 | struct ucb1x00_ts *ts = _ts; |
209 | DECLARE_WAITQUEUE(wait, current); | 208 | DECLARE_WAITQUEUE(wait, current); |
209 | bool frozen, ignore = false; | ||
210 | int valid = 0; | 210 | int valid = 0; |
211 | 211 | ||
212 | set_freezable(); | 212 | set_freezable(); |
213 | add_wait_queue(&ts->irq_wait, &wait); | 213 | add_wait_queue(&ts->irq_wait, &wait); |
214 | while (!kthread_should_stop()) { | 214 | while (!kthread_freezable_should_stop(&frozen)) { |
215 | unsigned int x, y, p; | 215 | unsigned int x, y, p; |
216 | signed long timeout; | 216 | signed long timeout; |
217 | 217 | ||
218 | ts->restart = 0; | 218 | if (frozen) |
219 | ignore = true; | ||
219 | 220 | ||
220 | ucb1x00_adc_enable(ts->ucb); | 221 | ucb1x00_adc_enable(ts->ucb); |
221 | 222 | ||
@@ -258,7 +259,7 @@ static int ucb1x00_thread(void *_ts) | |||
258 | * space. We therefore leave it to user space | 259 | * space. We therefore leave it to user space |
259 | * to do any filtering they please. | 260 | * to do any filtering they please. |
260 | */ | 261 | */ |
261 | if (!ts->restart) { | 262 | if (!ignore) { |
262 | ucb1x00_ts_evt_add(ts, p, x, y); | 263 | ucb1x00_ts_evt_add(ts, p, x, y); |
263 | valid = 1; | 264 | valid = 1; |
264 | } | 265 | } |
@@ -267,8 +268,6 @@ static int ucb1x00_thread(void *_ts) | |||
267 | timeout = HZ / 100; | 268 | timeout = HZ / 100; |
268 | } | 269 | } |
269 | 270 | ||
270 | try_to_freeze(); | ||
271 | |||
272 | schedule_timeout(timeout); | 271 | schedule_timeout(timeout); |
273 | } | 272 | } |
274 | 273 | ||
@@ -340,26 +339,6 @@ static void ucb1x00_ts_close(struct input_dev *idev) | |||
340 | ucb1x00_disable(ts->ucb); | 339 | ucb1x00_disable(ts->ucb); |
341 | } | 340 | } |
342 | 341 | ||
343 | #ifdef CONFIG_PM | ||
344 | static int ucb1x00_ts_resume(struct ucb1x00_dev *dev) | ||
345 | { | ||
346 | struct ucb1x00_ts *ts = dev->priv; | ||
347 | |||
348 | if (ts->rtask != NULL) { | ||
349 | /* | ||
350 | * Restart the TS thread to ensure the | ||
351 | * TS interrupt mode is set up again | ||
352 | * after sleep. | ||
353 | */ | ||
354 | ts->restart = 1; | ||
355 | wake_up(&ts->irq_wait); | ||
356 | } | ||
357 | return 0; | ||
358 | } | ||
359 | #else | ||
360 | #define ucb1x00_ts_resume NULL | ||
361 | #endif | ||
362 | |||
363 | 342 | ||
364 | /* | 343 | /* |
365 | * Initialisation. | 344 | * Initialisation. |
@@ -382,7 +361,7 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) | |||
382 | ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; | 361 | ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; |
383 | 362 | ||
384 | idev->name = "Touchscreen panel"; | 363 | idev->name = "Touchscreen panel"; |
385 | idev->id.product = ts->ucb->id->driver_data; | 364 | idev->id.product = ts->ucb->id; |
386 | idev->open = ucb1x00_ts_open; | 365 | idev->open = ucb1x00_ts_open; |
387 | idev->close = ucb1x00_ts_close; | 366 | idev->close = ucb1x00_ts_close; |
388 | 367 | ||
@@ -425,7 +404,6 @@ static void ucb1x00_ts_remove(struct ucb1x00_dev *dev) | |||
425 | static struct ucb1x00_driver ucb1x00_ts_driver = { | 404 | static struct ucb1x00_driver ucb1x00_ts_driver = { |
426 | .add = ucb1x00_ts_add, | 405 | .add = ucb1x00_ts_add, |
427 | .remove = ucb1x00_ts_remove, | 406 | .remove = ucb1x00_ts_remove, |
428 | .resume = ucb1x00_ts_resume, | ||
429 | }; | 407 | }; |
430 | 408 | ||
431 | static int __init ucb1x00_ts_init(void) | 409 | static int __init ucb1x00_ts_init(void) |
diff --git a/fs/inode.c b/fs/inode.c index 4fa4f0916af9..fb10d86ffad7 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -322,9 +322,6 @@ EXPORT_SYMBOL(clear_nlink); | |||
322 | void set_nlink(struct inode *inode, unsigned int nlink) | 322 | void set_nlink(struct inode *inode, unsigned int nlink) |
323 | { | 323 | { |
324 | if (!nlink) { | 324 | if (!nlink) { |
325 | printk_ratelimited(KERN_INFO | ||
326 | "set_nlink() clearing i_nlink on %s inode %li\n", | ||
327 | inode->i_sb->s_type->name, inode->i_ino); | ||
328 | clear_nlink(inode); | 325 | clear_nlink(inode); |
329 | } else { | 326 | } else { |
330 | /* Yes, some filesystems do change nlink from zero to one */ | 327 | /* Yes, some filesystems do change nlink from zero to one */ |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index d76ca6ae2b1b..121f77cfef76 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -77,6 +77,8 @@ static int show_stat(struct seq_file *p, void *v) | |||
77 | steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; | 77 | steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; |
78 | guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; | 78 | guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
79 | guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; | 79 | guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
80 | sum += kstat_cpu_irqs_sum(i); | ||
81 | sum += arch_irq_stat_cpu(i); | ||
80 | 82 | ||
81 | for (j = 0; j < NR_SOFTIRQS; j++) { | 83 | for (j = 0; j < NR_SOFTIRQS; j++) { |
82 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); | 84 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); |
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c index 2bfd987f4853..6b009548d2e0 100644 --- a/fs/qnx4/inode.c +++ b/fs/qnx4/inode.c | |||
@@ -179,47 +179,33 @@ static const char *qnx4_checkroot(struct super_block *sb) | |||
179 | struct qnx4_inode_entry *rootdir; | 179 | struct qnx4_inode_entry *rootdir; |
180 | int rd, rl; | 180 | int rd, rl; |
181 | int i, j; | 181 | int i, j; |
182 | int found = 0; | ||
183 | 182 | ||
184 | if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') { | 183 | if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') |
185 | return "no qnx4 filesystem (no root dir)."; | 184 | return "no qnx4 filesystem (no root dir)."; |
186 | } else { | 185 | QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id)); |
187 | QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id)); | 186 | rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1; |
188 | rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1; | 187 | rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size); |
189 | rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size); | 188 | for (j = 0; j < rl; j++) { |
190 | for (j = 0; j < rl; j++) { | 189 | bh = sb_bread(sb, rd + j); /* root dir, first block */ |
191 | bh = sb_bread(sb, rd + j); /* root dir, first block */ | 190 | if (bh == NULL) |
192 | if (bh == NULL) { | 191 | return "unable to read root entry."; |
193 | return "unable to read root entry."; | 192 | rootdir = (struct qnx4_inode_entry *) bh->b_data; |
194 | } | 193 | for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) { |
195 | for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) { | 194 | QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname)); |
196 | rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE); | 195 | if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0) |
197 | if (rootdir->di_fname != NULL) { | 196 | continue; |
198 | QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname)); | 197 | qnx4_sb(sb)->BitMap = kmemdup(rootdir, |
199 | if (!strcmp(rootdir->di_fname, | 198 | sizeof(struct qnx4_inode_entry), |
200 | QNX4_BMNAME)) { | 199 | GFP_KERNEL); |
201 | found = 1; | ||
202 | qnx4_sb(sb)->BitMap = kmemdup(rootdir, | ||
203 | sizeof(struct qnx4_inode_entry), | ||
204 | GFP_KERNEL); | ||
205 | if (!qnx4_sb(sb)->BitMap) { | ||
206 | brelse (bh); | ||
207 | return "not enough memory for bitmap inode"; | ||
208 | }/* keep bitmap inode known */ | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | } | ||
213 | brelse(bh); | 200 | brelse(bh); |
214 | if (found != 0) { | 201 | if (!qnx4_sb(sb)->BitMap) |
215 | break; | 202 | return "not enough memory for bitmap inode"; |
216 | } | 203 | /* keep bitmap inode known */ |
217 | } | 204 | return NULL; |
218 | if (found == 0) { | ||
219 | return "bitmap file not found."; | ||
220 | } | 205 | } |
206 | brelse(bh); | ||
221 | } | 207 | } |
222 | return NULL; | 208 | return "bitmap file not found."; |
223 | } | 209 | } |
224 | 210 | ||
225 | static int qnx4_fill_super(struct super_block *s, void *data, int silent) | 211 | static int qnx4_fill_super(struct super_block *s, void *data, int silent) |
@@ -270,7 +256,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
270 | if (IS_ERR(root)) { | 256 | if (IS_ERR(root)) { |
271 | printk(KERN_ERR "qnx4: get inode failed\n"); | 257 | printk(KERN_ERR "qnx4: get inode failed\n"); |
272 | ret = PTR_ERR(root); | 258 | ret = PTR_ERR(root); |
273 | goto out; | 259 | goto outb; |
274 | } | 260 | } |
275 | 261 | ||
276 | ret = -ENOMEM; | 262 | ret = -ENOMEM; |
@@ -283,6 +269,8 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent) | |||
283 | 269 | ||
284 | outi: | 270 | outi: |
285 | iput(root); | 271 | iput(root); |
272 | outb: | ||
273 | kfree(qs->BitMap); | ||
286 | out: | 274 | out: |
287 | brelse(bh); | 275 | brelse(bh); |
288 | outnobh: | 276 | outnobh: |
diff --git a/fs/super.c b/fs/super.c index de41e1e46f09..6015c02296b7 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -1186,6 +1186,8 @@ int freeze_super(struct super_block *sb) | |||
1186 | printk(KERN_ERR | 1186 | printk(KERN_ERR |
1187 | "VFS:Filesystem freeze failed\n"); | 1187 | "VFS:Filesystem freeze failed\n"); |
1188 | sb->s_frozen = SB_UNFROZEN; | 1188 | sb->s_frozen = SB_UNFROZEN; |
1189 | smp_wmb(); | ||
1190 | wake_up(&sb->s_wait_unfrozen); | ||
1189 | deactivate_locked_super(sb); | 1191 | deactivate_locked_super(sb); |
1190 | return ret; | 1192 | return ret; |
1191 | } | 1193 | } |
diff --git a/include/linux/key.h b/include/linux/key.h index bfc014c57351..5253471cd2ea 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -271,7 +271,7 @@ extern int keyring_add_key(struct key *keyring, | |||
271 | 271 | ||
272 | extern struct key *key_lookup(key_serial_t id); | 272 | extern struct key *key_lookup(key_serial_t id); |
273 | 273 | ||
274 | static inline key_serial_t key_serial(struct key *key) | 274 | static inline key_serial_t key_serial(const struct key *key) |
275 | { | 275 | { |
276 | return key ? key->serial : 0; | 276 | return key ? key->serial : 0; |
277 | } | 277 | } |
diff --git a/include/linux/mfd/mcp.h b/include/linux/mfd/mcp.h index 1515e64e3663..f88c1cc0cb0f 100644 --- a/include/linux/mfd/mcp.h +++ b/include/linux/mfd/mcp.h | |||
@@ -10,7 +10,6 @@ | |||
10 | #ifndef MCP_H | 10 | #ifndef MCP_H |
11 | #define MCP_H | 11 | #define MCP_H |
12 | 12 | ||
13 | #include <linux/mod_devicetable.h> | ||
14 | #include <mach/dma.h> | 13 | #include <mach/dma.h> |
15 | 14 | ||
16 | struct mcp_ops; | 15 | struct mcp_ops; |
@@ -27,7 +26,7 @@ struct mcp { | |||
27 | dma_device_t dma_telco_rd; | 26 | dma_device_t dma_telco_rd; |
28 | dma_device_t dma_telco_wr; | 27 | dma_device_t dma_telco_wr; |
29 | struct device attached_device; | 28 | struct device attached_device; |
30 | const char *codec; | 29 | int gpio_base; |
31 | }; | 30 | }; |
32 | 31 | ||
33 | struct mcp_ops { | 32 | struct mcp_ops { |
@@ -45,11 +44,10 @@ void mcp_reg_write(struct mcp *, unsigned int, unsigned int); | |||
45 | unsigned int mcp_reg_read(struct mcp *, unsigned int); | 44 | unsigned int mcp_reg_read(struct mcp *, unsigned int); |
46 | void mcp_enable(struct mcp *); | 45 | void mcp_enable(struct mcp *); |
47 | void mcp_disable(struct mcp *); | 46 | void mcp_disable(struct mcp *); |
48 | const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp); | ||
49 | #define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate) | 47 | #define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate) |
50 | 48 | ||
51 | struct mcp *mcp_host_alloc(struct device *, size_t); | 49 | struct mcp *mcp_host_alloc(struct device *, size_t); |
52 | int mcp_host_register(struct mcp *, void *); | 50 | int mcp_host_register(struct mcp *); |
53 | void mcp_host_unregister(struct mcp *); | 51 | void mcp_host_unregister(struct mcp *); |
54 | 52 | ||
55 | struct mcp_driver { | 53 | struct mcp_driver { |
@@ -58,7 +56,6 @@ struct mcp_driver { | |||
58 | void (*remove)(struct mcp *); | 56 | void (*remove)(struct mcp *); |
59 | int (*suspend)(struct mcp *, pm_message_t); | 57 | int (*suspend)(struct mcp *, pm_message_t); |
60 | int (*resume)(struct mcp *); | 58 | int (*resume)(struct mcp *); |
61 | const struct mcp_device_id *id_table; | ||
62 | }; | 59 | }; |
63 | 60 | ||
64 | int mcp_driver_register(struct mcp_driver *); | 61 | int mcp_driver_register(struct mcp_driver *); |
@@ -67,6 +64,9 @@ void mcp_driver_unregister(struct mcp_driver *); | |||
67 | #define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device) | 64 | #define mcp_get_drvdata(mcp) dev_get_drvdata(&(mcp)->attached_device) |
68 | #define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d) | 65 | #define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d) |
69 | 66 | ||
70 | #define mcp_priv(mcp) ((void *)((mcp)+1)) | 67 | static inline void *mcp_priv(struct mcp *mcp) |
68 | { | ||
69 | return mcp + 1; | ||
70 | } | ||
71 | 71 | ||
72 | #endif | 72 | #endif |
diff --git a/include/linux/mfd/ucb1x00.h b/include/linux/mfd/ucb1x00.h index bc19e5fb7ea8..4321f044d1e4 100644 --- a/include/linux/mfd/ucb1x00.h +++ b/include/linux/mfd/ucb1x00.h | |||
@@ -104,9 +104,6 @@ | |||
104 | #define UCB_MODE_DYN_VFLAG_ENA (1 << 12) | 104 | #define UCB_MODE_DYN_VFLAG_ENA (1 << 12) |
105 | #define UCB_MODE_AUD_OFF_CAN (1 << 13) | 105 | #define UCB_MODE_AUD_OFF_CAN (1 << 13) |
106 | 106 | ||
107 | struct ucb1x00_plat_data { | ||
108 | int gpio_base; | ||
109 | }; | ||
110 | 107 | ||
111 | struct ucb1x00_irq { | 108 | struct ucb1x00_irq { |
112 | void *devid; | 109 | void *devid; |
@@ -119,7 +116,7 @@ struct ucb1x00 { | |||
119 | unsigned int irq; | 116 | unsigned int irq; |
120 | struct semaphore adc_sem; | 117 | struct semaphore adc_sem; |
121 | spinlock_t io_lock; | 118 | spinlock_t io_lock; |
122 | const struct mcp_device_id *id; | 119 | u16 id; |
123 | u16 io_dir; | 120 | u16 io_dir; |
124 | u16 io_out; | 121 | u16 io_out; |
125 | u16 adc_cr; | 122 | u16 adc_cr; |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index b29e7f6f8fa5..83ac0713ed0a 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -436,17 +436,6 @@ struct spi_device_id { | |||
436 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | 436 | __attribute__((aligned(sizeof(kernel_ulong_t)))); |
437 | }; | 437 | }; |
438 | 438 | ||
439 | /* mcp */ | ||
440 | |||
441 | #define MCP_NAME_SIZE 20 | ||
442 | #define MCP_MODULE_PREFIX "mcp:" | ||
443 | |||
444 | struct mcp_device_id { | ||
445 | char name[MCP_NAME_SIZE]; | ||
446 | kernel_ulong_t driver_data /* Data private to the driver */ | ||
447 | __attribute__((aligned(sizeof(kernel_ulong_t)))); | ||
448 | }; | ||
449 | |||
450 | /* dmi */ | 439 | /* dmi */ |
451 | enum dmi_field { | 440 | enum dmi_field { |
452 | DMI_NONE, | 441 | DMI_NONE, |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index db110b8ae030..f1539decd99d 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod) | |||
634 | int ret = 0; | 634 | int ret = 0; |
635 | 635 | ||
636 | /* | 636 | /* |
637 | * We skip modules that tain the kernel, especially those with different | 637 | * We skip modules that taint the kernel, especially those with different |
638 | * module header (for forced load), to make sure we don't cause a crash. | 638 | * module headers (for forced load), to make sure we don't cause a crash. |
639 | * Staging and out-of-tree GPL modules are fine. | ||
639 | */ | 640 | */ |
640 | if (mod->taints) | 641 | if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) |
641 | return 0; | 642 | return 0; |
642 | mutex_lock(&tracepoints_mutex); | 643 | mutex_lock(&tracepoints_mutex); |
643 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); | 644 | tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index fe84bb978e3b..716802b774ea 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
@@ -255,6 +255,8 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | |||
255 | if (!n) | 255 | if (!n) |
256 | n++; /* avoid zero length allocation */ | 256 | n++; /* avoid zero length allocation */ |
257 | p = buffer = kmalloc(n, GFP_KERNEL); | 257 | p = buffer = kmalloc(n, GFP_KERNEL); |
258 | if (!p) | ||
259 | return NULL; | ||
258 | 260 | ||
259 | for (i = a->nlimbs - 1; i >= 0; i--) { | 261 | for (i = a->nlimbs - 1; i >= 0; i--) { |
260 | alimb = a->d[i]; | 262 | alimb = a->d[i]; |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index c0e14b3f2306..e8c969577768 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
@@ -823,16 +823,6 @@ static int do_spi_entry(const char *filename, struct spi_device_id *id, | |||
823 | } | 823 | } |
824 | ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry); | 824 | ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry); |
825 | 825 | ||
826 | /* Looks like: mcp:S */ | ||
827 | static int do_mcp_entry(const char *filename, struct mcp_device_id *id, | ||
828 | char *alias) | ||
829 | { | ||
830 | sprintf(alias, MCP_MODULE_PREFIX "%s", id->name); | ||
831 | |||
832 | return 1; | ||
833 | } | ||
834 | ADD_TO_DEVTABLE("mcp", struct mcp_device_id, do_mcp_entry); | ||
835 | |||
836 | static const struct dmifield { | 826 | static const struct dmifield { |
837 | const char *prefix; | 827 | const char *prefix; |
838 | int field; | 828 | int field; |
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index d661afbe474c..d45061d02fee 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c | |||
@@ -99,6 +99,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule, | |||
99 | struct inode *inode, enum ima_hooks func, int mask) | 99 | struct inode *inode, enum ima_hooks func, int mask) |
100 | { | 100 | { |
101 | struct task_struct *tsk = current; | 101 | struct task_struct *tsk = current; |
102 | const struct cred *cred = current_cred(); | ||
102 | int i; | 103 | int i; |
103 | 104 | ||
104 | if ((rule->flags & IMA_FUNC) && rule->func != func) | 105 | if ((rule->flags & IMA_FUNC) && rule->func != func) |
@@ -108,7 +109,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule, | |||
108 | if ((rule->flags & IMA_FSMAGIC) | 109 | if ((rule->flags & IMA_FSMAGIC) |
109 | && rule->fsmagic != inode->i_sb->s_magic) | 110 | && rule->fsmagic != inode->i_sb->s_magic) |
110 | return false; | 111 | return false; |
111 | if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) | 112 | if ((rule->flags & IMA_UID) && rule->uid != cred->uid) |
112 | return false; | 113 | return false; |
113 | for (i = 0; i < MAX_LSM_RULES; i++) { | 114 | for (i = 0; i < MAX_LSM_RULES; i++) { |
114 | int rc = 0; | 115 | int rc = 0; |
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 69ff52c08e97..2aee3c5a3b99 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c | |||
@@ -59,7 +59,7 @@ int user_instantiate(struct key *key, const void *data, size_t datalen) | |||
59 | /* attach the data */ | 59 | /* attach the data */ |
60 | upayload->datalen = datalen; | 60 | upayload->datalen = datalen; |
61 | memcpy(upayload->data, data, datalen); | 61 | memcpy(upayload->data, data, datalen); |
62 | rcu_assign_pointer(key->payload.data, upayload); | 62 | rcu_assign_keypointer(key, upayload); |
63 | ret = 0; | 63 | ret = 0; |
64 | 64 | ||
65 | error: | 65 | error: |
@@ -98,7 +98,7 @@ int user_update(struct key *key, const void *data, size_t datalen) | |||
98 | if (ret == 0) { | 98 | if (ret == 0) { |
99 | /* attach the new data, displacing the old */ | 99 | /* attach the new data, displacing the old */ |
100 | zap = key->payload.data; | 100 | zap = key->payload.data; |
101 | rcu_assign_pointer(key->payload.data, upayload); | 101 | rcu_assign_keypointer(key, upayload); |
102 | key->expiry = 0; | 102 | key->expiry = 0; |
103 | } | 103 | } |
104 | 104 | ||
@@ -133,7 +133,7 @@ void user_revoke(struct key *key) | |||
133 | key_payload_reserve(key, 0); | 133 | key_payload_reserve(key, 0); |
134 | 134 | ||
135 | if (upayload) { | 135 | if (upayload) { |
136 | rcu_assign_pointer(key->payload.data, NULL); | 136 | rcu_assign_keypointer(key, NULL); |
137 | kfree_rcu(upayload, rcu); | 137 | kfree_rcu(upayload, rcu); |
138 | } | 138 | } |
139 | } | 139 | } |