diff options
Diffstat (limited to 'arch/sh')
45 files changed, 1057 insertions, 760 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 0dc7e3cbeffa..266d422991e8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -37,6 +37,7 @@ config SUPERH32 | |||
37 | select HAVE_FTRACE_MCOUNT_RECORD | 37 | select HAVE_FTRACE_MCOUNT_RECORD |
38 | select HAVE_DYNAMIC_FTRACE | 38 | select HAVE_DYNAMIC_FTRACE |
39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 39 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
40 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | ||
40 | select HAVE_FUNCTION_GRAPH_TRACER | 41 | select HAVE_FUNCTION_GRAPH_TRACER |
41 | select HAVE_ARCH_KGDB | 42 | select HAVE_ARCH_KGDB |
42 | select ARCH_HIBERNATION_POSSIBLE if MMU | 43 | select ARCH_HIBERNATION_POSSIBLE if MMU |
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index c08d33fe2104..ce01d6a953b8 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/input.h> | 18 | #include <linux/input.h> |
19 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
20 | #include <linux/usb/r8a66597.h> | 20 | #include <linux/usb/r8a66597.h> |
21 | #include <media/soc_camera.h> | ||
22 | #include <media/sh_mobile_ceu.h> | ||
21 | #include <video/sh_mobile_lcdc.h> | 23 | #include <video/sh_mobile_lcdc.h> |
22 | #include <asm/clock.h> | 24 | #include <asm/clock.h> |
23 | #include <asm/machvec.h> | 25 | #include <asm/machvec.h> |
@@ -212,11 +214,131 @@ static struct platform_device kfr2r09_usb0_gadget_device = { | |||
212 | .resource = kfr2r09_usb0_gadget_resources, | 214 | .resource = kfr2r09_usb0_gadget_resources, |
213 | }; | 215 | }; |
214 | 216 | ||
217 | static struct sh_mobile_ceu_info sh_mobile_ceu_info = { | ||
218 | .flags = SH_CEU_FLAG_USE_8BIT_BUS, | ||
219 | }; | ||
220 | |||
221 | static struct resource kfr2r09_ceu_resources[] = { | ||
222 | [0] = { | ||
223 | .name = "CEU", | ||
224 | .start = 0xfe910000, | ||
225 | .end = 0xfe91009f, | ||
226 | .flags = IORESOURCE_MEM, | ||
227 | }, | ||
228 | [1] = { | ||
229 | .start = 52, | ||
230 | .end = 52, | ||
231 | .flags = IORESOURCE_IRQ, | ||
232 | }, | ||
233 | [2] = { | ||
234 | /* place holder for contiguous memory */ | ||
235 | }, | ||
236 | }; | ||
237 | |||
238 | static struct platform_device kfr2r09_ceu_device = { | ||
239 | .name = "sh_mobile_ceu", | ||
240 | .id = 0, /* "ceu0" clock */ | ||
241 | .num_resources = ARRAY_SIZE(kfr2r09_ceu_resources), | ||
242 | .resource = kfr2r09_ceu_resources, | ||
243 | .dev = { | ||
244 | .platform_data = &sh_mobile_ceu_info, | ||
245 | }, | ||
246 | .archdata = { | ||
247 | .hwblk_id = HWBLK_CEU0, | ||
248 | }, | ||
249 | }; | ||
250 | |||
251 | static struct i2c_board_info kfr2r09_i2c_camera = { | ||
252 | I2C_BOARD_INFO("rj54n1cb0c", 0x50), | ||
253 | }; | ||
254 | |||
255 | static struct clk *camera_clk; | ||
256 | |||
257 | #define DRVCRB 0xA405018C | ||
258 | static int camera_power(struct device *dev, int mode) | ||
259 | { | ||
260 | int ret; | ||
261 | |||
262 | if (mode) { | ||
263 | long rate; | ||
264 | |||
265 | camera_clk = clk_get(NULL, "video_clk"); | ||
266 | if (IS_ERR(camera_clk)) | ||
267 | return PTR_ERR(camera_clk); | ||
268 | |||
269 | /* set VIO_CKO clock to 25MHz */ | ||
270 | rate = clk_round_rate(camera_clk, 25000000); | ||
271 | ret = clk_set_rate(camera_clk, rate); | ||
272 | if (ret < 0) | ||
273 | goto eclkrate; | ||
274 | |||
275 | /* set DRVCRB | ||
276 | * | ||
277 | * use 1.8 V for VccQ_VIO | ||
278 | * use 2.85V for VccQ_SR | ||
279 | */ | ||
280 | ctrl_outw((ctrl_inw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB); | ||
281 | |||
282 | /* reset clear */ | ||
283 | ret = gpio_request(GPIO_PTB4, NULL); | ||
284 | if (ret < 0) | ||
285 | goto eptb4; | ||
286 | ret = gpio_request(GPIO_PTB7, NULL); | ||
287 | if (ret < 0) | ||
288 | goto eptb7; | ||
289 | |||
290 | ret = gpio_direction_output(GPIO_PTB4, 1); | ||
291 | if (!ret) | ||
292 | ret = gpio_direction_output(GPIO_PTB7, 1); | ||
293 | if (ret < 0) | ||
294 | goto egpioout; | ||
295 | msleep(1); | ||
296 | |||
297 | ret = clk_enable(camera_clk); /* start VIO_CKO */ | ||
298 | if (ret < 0) | ||
299 | goto eclkon; | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | ret = 0; | ||
305 | |||
306 | clk_disable(camera_clk); | ||
307 | eclkon: | ||
308 | gpio_set_value(GPIO_PTB7, 0); | ||
309 | egpioout: | ||
310 | gpio_set_value(GPIO_PTB4, 0); | ||
311 | gpio_free(GPIO_PTB7); | ||
312 | eptb7: | ||
313 | gpio_free(GPIO_PTB4); | ||
314 | eptb4: | ||
315 | eclkrate: | ||
316 | clk_put(camera_clk); | ||
317 | return ret; | ||
318 | } | ||
319 | |||
320 | static struct soc_camera_link rj54n1_link = { | ||
321 | .power = camera_power, | ||
322 | .board_info = &kfr2r09_i2c_camera, | ||
323 | .i2c_adapter_id = 1, | ||
324 | .module_name = "rj54n1cb0c", | ||
325 | }; | ||
326 | |||
327 | static struct platform_device kfr2r09_camera = { | ||
328 | .name = "soc-camera-pdrv", | ||
329 | .id = 0, | ||
330 | .dev = { | ||
331 | .platform_data = &rj54n1_link, | ||
332 | }, | ||
333 | }; | ||
334 | |||
215 | static struct platform_device *kfr2r09_devices[] __initdata = { | 335 | static struct platform_device *kfr2r09_devices[] __initdata = { |
216 | &kfr2r09_nor_flash_device, | 336 | &kfr2r09_nor_flash_device, |
217 | &kfr2r09_nand_flash_device, | 337 | &kfr2r09_nand_flash_device, |
218 | &kfr2r09_sh_keysc_device, | 338 | &kfr2r09_sh_keysc_device, |
219 | &kfr2r09_sh_lcdc_device, | 339 | &kfr2r09_sh_lcdc_device, |
340 | &kfr2r09_ceu_device, | ||
341 | &kfr2r09_camera, | ||
220 | }; | 342 | }; |
221 | 343 | ||
222 | #define BSC_CS0BCR 0xfec10004 | 344 | #define BSC_CS0BCR 0xfec10004 |
@@ -361,6 +483,23 @@ static int __init kfr2r09_devices_setup(void) | |||
361 | if (kfr2r09_usb0_gadget_setup() == 0) | 483 | if (kfr2r09_usb0_gadget_setup() == 0) |
362 | platform_device_register(&kfr2r09_usb0_gadget_device); | 484 | platform_device_register(&kfr2r09_usb0_gadget_device); |
363 | 485 | ||
486 | /* CEU */ | ||
487 | gpio_request(GPIO_FN_VIO_CKO, NULL); | ||
488 | gpio_request(GPIO_FN_VIO0_CLK, NULL); | ||
489 | gpio_request(GPIO_FN_VIO0_VD, NULL); | ||
490 | gpio_request(GPIO_FN_VIO0_HD, NULL); | ||
491 | gpio_request(GPIO_FN_VIO0_FLD, NULL); | ||
492 | gpio_request(GPIO_FN_VIO0_D7, NULL); | ||
493 | gpio_request(GPIO_FN_VIO0_D6, NULL); | ||
494 | gpio_request(GPIO_FN_VIO0_D5, NULL); | ||
495 | gpio_request(GPIO_FN_VIO0_D4, NULL); | ||
496 | gpio_request(GPIO_FN_VIO0_D3, NULL); | ||
497 | gpio_request(GPIO_FN_VIO0_D2, NULL); | ||
498 | gpio_request(GPIO_FN_VIO0_D1, NULL); | ||
499 | gpio_request(GPIO_FN_VIO0_D0, NULL); | ||
500 | |||
501 | platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20); | ||
502 | |||
364 | return platform_add_devices(kfr2r09_devices, | 503 | return platform_add_devices(kfr2r09_devices, |
365 | ARRAY_SIZE(kfr2r09_devices)); | 504 | ARRAY_SIZE(kfr2r09_devices)); |
366 | } | 505 | } |
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c index fd56a71ca9d9..b51b1fc4baae 100644 --- a/arch/sh/boot/compressed/misc.c +++ b/arch/sh/boot/compressed/misc.c | |||
@@ -131,7 +131,7 @@ void decompress_kernel(void) | |||
131 | #ifdef CONFIG_SUPERH64 | 131 | #ifdef CONFIG_SUPERH64 |
132 | output_addr = (CONFIG_MEMORY_START + 0x2000); | 132 | output_addr = (CONFIG_MEMORY_START + 0x2000); |
133 | #else | 133 | #else |
134 | output_addr = PHYSADDR((unsigned long)&_text+PAGE_SIZE); | 134 | output_addr = __pa((unsigned long)&_text+PAGE_SIZE); |
135 | #ifdef CONFIG_29BIT | 135 | #ifdef CONFIG_29BIT |
136 | output_addr |= P2SEG; | 136 | output_addr |= P2SEG; |
137 | #endif | 137 | #endif |
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h index 80d40813e057..99d6b3ecbe22 100644 --- a/arch/sh/include/asm/addrspace.h +++ b/arch/sh/include/asm/addrspace.h | |||
@@ -28,9 +28,6 @@ | |||
28 | /* Returns the privileged segment base of a given address */ | 28 | /* Returns the privileged segment base of a given address */ |
29 | #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) | 29 | #define PXSEG(a) (((unsigned long)(a)) & 0xe0000000) |
30 | 30 | ||
31 | /* Returns the physical address of a PnSEG (n=1,2) address */ | ||
32 | #define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) | ||
33 | |||
34 | #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) | 31 | #if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED) |
35 | /* | 32 | /* |
36 | * Map an address to a certain privileged segment | 33 | * Map an address to a certain privileged segment |
@@ -60,5 +57,11 @@ | |||
60 | #define P3_ADDR_MAX P4SEG | 57 | #define P3_ADDR_MAX P4SEG |
61 | #endif | 58 | #endif |
62 | 59 | ||
60 | #ifndef __ASSEMBLY__ | ||
61 | #ifdef CONFIG_PMB | ||
62 | extern int __in_29bit_mode(void); | ||
63 | #endif /* CONFIG_PMB */ | ||
64 | #endif /* __ASSEMBLY__ */ | ||
65 | |||
63 | #endif /* __KERNEL__ */ | 66 | #endif /* __KERNEL__ */ |
64 | #endif /* __ASM_SH_ADDRSPACE_H */ | 67 | #endif /* __ASM_SH_ADDRSPACE_H */ |
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h index 46260fcbdf4b..02a19a1c033a 100644 --- a/arch/sh/include/asm/bugs.h +++ b/arch/sh/include/asm/bugs.h | |||
@@ -14,11 +14,15 @@ | |||
14 | 14 | ||
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | 16 | ||
17 | extern void select_idle_routine(void); | ||
18 | |||
17 | static void __init check_bugs(void) | 19 | static void __init check_bugs(void) |
18 | { | 20 | { |
19 | extern unsigned long loops_per_jiffy; | 21 | extern unsigned long loops_per_jiffy; |
20 | char *p = &init_utsname()->machine[2]; /* "sh" */ | 22 | char *p = &init_utsname()->machine[2]; /* "sh" */ |
21 | 23 | ||
24 | select_idle_routine(); | ||
25 | |||
22 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; | 26 | current_cpu_data.loops_per_jiffy = loops_per_jiffy; |
23 | 27 | ||
24 | switch (current_cpu_data.family) { | 28 | switch (current_cpu_data.family) { |
diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index ced6795891a6..bdccbbfdc0bd 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h | |||
@@ -194,6 +194,12 @@ | |||
194 | #define DWARF_ARCH_RA_REG 17 | 194 | #define DWARF_ARCH_RA_REG 17 |
195 | 195 | ||
196 | #ifndef __ASSEMBLY__ | 196 | #ifndef __ASSEMBLY__ |
197 | |||
198 | #include <linux/compiler.h> | ||
199 | #include <linux/bug.h> | ||
200 | #include <linux/list.h> | ||
201 | #include <linux/module.h> | ||
202 | |||
197 | /* | 203 | /* |
198 | * Read either the frame pointer (r14) or the stack pointer (r15). | 204 | * Read either the frame pointer (r14) or the stack pointer (r15). |
199 | * NOTE: this MUST be inlined. | 205 | * NOTE: this MUST be inlined. |
@@ -241,6 +247,12 @@ struct dwarf_cie { | |||
241 | 247 | ||
242 | unsigned long flags; | 248 | unsigned long flags; |
243 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) | 249 | #define DWARF_CIE_Z_AUGMENTATION (1 << 0) |
250 | |||
251 | /* | ||
252 | * 'mod' will be non-NULL if this CIE came from a module's | ||
253 | * .eh_frame section. | ||
254 | */ | ||
255 | struct module *mod; | ||
244 | }; | 256 | }; |
245 | 257 | ||
246 | /** | 258 | /** |
@@ -255,6 +267,12 @@ struct dwarf_fde { | |||
255 | unsigned char *instructions; | 267 | unsigned char *instructions; |
256 | unsigned char *end; | 268 | unsigned char *end; |
257 | struct list_head link; | 269 | struct list_head link; |
270 | |||
271 | /* | ||
272 | * 'mod' will be non-NULL if this FDE came from a module's | ||
273 | * .eh_frame section. | ||
274 | */ | ||
275 | struct module *mod; | ||
258 | }; | 276 | }; |
259 | 277 | ||
260 | /** | 278 | /** |
@@ -364,6 +382,12 @@ static inline unsigned int DW_CFA_operand(unsigned long insn) | |||
364 | 382 | ||
365 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, | 383 | extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, |
366 | struct dwarf_frame *); | 384 | struct dwarf_frame *); |
385 | extern void dwarf_free_frame(struct dwarf_frame *); | ||
386 | |||
387 | extern int module_dwarf_finalize(const Elf_Ehdr *, const Elf_Shdr *, | ||
388 | struct module *); | ||
389 | extern void module_dwarf_cleanup(struct module *); | ||
390 | |||
367 | #endif /* !__ASSEMBLY__ */ | 391 | #endif /* !__ASSEMBLY__ */ |
368 | 392 | ||
369 | #define CFI_STARTPROC .cfi_startproc | 393 | #define CFI_STARTPROC .cfi_startproc |
@@ -391,6 +415,10 @@ extern struct dwarf_frame *dwarf_unwind_stack(unsigned long, | |||
391 | static inline void dwarf_unwinder_init(void) | 415 | static inline void dwarf_unwinder_init(void) |
392 | { | 416 | { |
393 | } | 417 | } |
418 | |||
419 | #define module_dwarf_finalize(hdr, sechdrs, me) (0) | ||
420 | #define module_dwarf_cleanup(mod) do { } while (0) | ||
421 | |||
394 | #endif | 422 | #endif |
395 | 423 | ||
396 | #endif /* CONFIG_DWARF_UNWINDER */ | 424 | #endif /* CONFIG_DWARF_UNWINDER */ |
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h index 721fcc4d5e98..76c5a3099cb8 100644 --- a/arch/sh/include/asm/fixmap.h +++ b/arch/sh/include/asm/fixmap.h | |||
@@ -14,9 +14,9 @@ | |||
14 | #define _ASM_FIXMAP_H | 14 | #define _ASM_FIXMAP_H |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/threads.h> | ||
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
18 | #ifdef CONFIG_HIGHMEM | 19 | #ifdef CONFIG_HIGHMEM |
19 | #include <linux/threads.h> | ||
20 | #include <asm/kmap_types.h> | 20 | #include <asm/kmap_types.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
@@ -46,9 +46,9 @@ | |||
46 | * fix-mapped? | 46 | * fix-mapped? |
47 | */ | 47 | */ |
48 | enum fixed_addresses { | 48 | enum fixed_addresses { |
49 | #define FIX_N_COLOURS 16 | 49 | #define FIX_N_COLOURS 8 |
50 | FIX_CMAP_BEGIN, | 50 | FIX_CMAP_BEGIN, |
51 | FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, | 51 | FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS), |
52 | FIX_UNCACHED, | 52 | FIX_UNCACHED, |
53 | #ifdef CONFIG_HIGHMEM | 53 | #ifdef CONFIG_HIGHMEM |
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | 54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ |
diff --git a/arch/sh/include/asm/ftrace.h b/arch/sh/include/asm/ftrace.h index 12f3a31f20af..13e9966464c2 100644 --- a/arch/sh/include/asm/ftrace.h +++ b/arch/sh/include/asm/ftrace.h | |||
@@ -35,4 +35,21 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) | |||
35 | #endif /* __ASSEMBLY__ */ | 35 | #endif /* __ASSEMBLY__ */ |
36 | #endif /* CONFIG_FUNCTION_TRACER */ | 36 | #endif /* CONFIG_FUNCTION_TRACER */ |
37 | 37 | ||
38 | #ifndef __ASSEMBLY__ | ||
39 | |||
40 | /* arch/sh/kernel/return_address.c */ | ||
41 | extern void *return_address(unsigned int); | ||
42 | |||
43 | #define HAVE_ARCH_CALLER_ADDR | ||
44 | |||
45 | #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | ||
46 | #define CALLER_ADDR1 ((unsigned long)return_address(1)) | ||
47 | #define CALLER_ADDR2 ((unsigned long)return_address(2)) | ||
48 | #define CALLER_ADDR3 ((unsigned long)return_address(3)) | ||
49 | #define CALLER_ADDR4 ((unsigned long)return_address(4)) | ||
50 | #define CALLER_ADDR5 ((unsigned long)return_address(5)) | ||
51 | #define CALLER_ADDR6 ((unsigned long)return_address(6)) | ||
52 | |||
53 | #endif /* __ASSEMBLY__ */ | ||
54 | |||
38 | #endif /* __ASM_SH_FTRACE_H */ | 55 | #endif /* __ASM_SH_FTRACE_H */ |
diff --git a/arch/sh/include/asm/hardirq.h b/arch/sh/include/asm/hardirq.h index a5be4afa790b..48b191313a99 100644 --- a/arch/sh/include/asm/hardirq.h +++ b/arch/sh/include/asm/hardirq.h | |||
@@ -1,9 +1,16 @@ | |||
1 | #ifndef __ASM_SH_HARDIRQ_H | 1 | #ifndef __ASM_SH_HARDIRQ_H |
2 | #define __ASM_SH_HARDIRQ_H | 2 | #define __ASM_SH_HARDIRQ_H |
3 | 3 | ||
4 | extern void ack_bad_irq(unsigned int irq); | 4 | #include <linux/threads.h> |
5 | #define ack_bad_irq ack_bad_irq | 5 | #include <linux/irq.h> |
6 | |||
7 | typedef struct { | ||
8 | unsigned int __softirq_pending; | ||
9 | unsigned int __nmi_count; /* arch dependent */ | ||
10 | } ____cacheline_aligned irq_cpustat_t; | ||
6 | 11 | ||
7 | #include <asm-generic/hardirq.h> | 12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
13 | |||
14 | extern void ack_bad_irq(unsigned int irq); | ||
8 | 15 | ||
9 | #endif /* __ASM_SH_HARDIRQ_H */ | 16 | #endif /* __ASM_SH_HARDIRQ_H */ |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 5be45ea4dfec..0cf2a5708e26 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -246,7 +246,7 @@ void __iounmap(void __iomem *addr); | |||
246 | static inline void __iomem * | 246 | static inline void __iomem * |
247 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | 247 | __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) |
248 | { | 248 | { |
249 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) | 249 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) |
250 | unsigned long last_addr = offset + size - 1; | 250 | unsigned long last_addr = offset + size - 1; |
251 | #endif | 251 | #endif |
252 | void __iomem *ret; | 252 | void __iomem *ret; |
@@ -255,7 +255,7 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags) | |||
255 | if (ret) | 255 | if (ret) |
256 | return ret; | 256 | return ret; |
257 | 257 | ||
258 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) | 258 | #if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB) |
259 | /* | 259 | /* |
260 | * For P1 and P2 space this is trivial, as everything is already | 260 | * For P1 and P2 space this is trivial, as everything is already |
261 | * mapped. Uncached access for P1 addresses are done through P2. | 261 | * mapped. Uncached access for P1 addresses are done through P2. |
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index f5963037c9d6..c7426ad9926e 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h | |||
@@ -7,12 +7,16 @@ | |||
7 | #define PMB_PASCR 0xff000070 | 7 | #define PMB_PASCR 0xff000070 |
8 | #define PMB_IRMCR 0xff000078 | 8 | #define PMB_IRMCR 0xff000078 |
9 | 9 | ||
10 | #define PASCR_SE 0x80000000 | ||
11 | |||
10 | #define PMB_ADDR 0xf6100000 | 12 | #define PMB_ADDR 0xf6100000 |
11 | #define PMB_DATA 0xf7100000 | 13 | #define PMB_DATA 0xf7100000 |
12 | #define PMB_ENTRY_MAX 16 | 14 | #define PMB_ENTRY_MAX 16 |
13 | #define PMB_E_MASK 0x0000000f | 15 | #define PMB_E_MASK 0x0000000f |
14 | #define PMB_E_SHIFT 8 | 16 | #define PMB_E_SHIFT 8 |
15 | 17 | ||
18 | #define PMB_PFN_MASK 0xff000000 | ||
19 | |||
16 | #define PMB_SZ_16M 0x00000000 | 20 | #define PMB_SZ_16M 0x00000000 |
17 | #define PMB_SZ_64M 0x00000010 | 21 | #define PMB_SZ_64M 0x00000010 |
18 | #define PMB_SZ_128M 0x00000080 | 22 | #define PMB_SZ_128M 0x00000080 |
@@ -62,17 +66,10 @@ struct pmb_entry { | |||
62 | }; | 66 | }; |
63 | 67 | ||
64 | /* arch/sh/mm/pmb.c */ | 68 | /* arch/sh/mm/pmb.c */ |
65 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | ||
66 | unsigned long flags, int *entry); | ||
67 | int set_pmb_entry(struct pmb_entry *pmbe); | ||
68 | void clear_pmb_entry(struct pmb_entry *pmbe); | ||
69 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | ||
70 | unsigned long flags); | ||
71 | void pmb_free(struct pmb_entry *pmbe); | ||
72 | long pmb_remap(unsigned long virt, unsigned long phys, | 69 | long pmb_remap(unsigned long virt, unsigned long phys, |
73 | unsigned long size, unsigned long flags); | 70 | unsigned long size, unsigned long flags); |
74 | void pmb_unmap(unsigned long addr); | 71 | void pmb_unmap(unsigned long addr); |
72 | int pmb_init(void); | ||
75 | #endif /* __ASSEMBLY__ */ | 73 | #endif /* __ASSEMBLY__ */ |
76 | 74 | ||
77 | #endif /* __MMU_H */ | 75 | #endif /* __MMU_H */ |
78 | |||
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h index 4f3efa7d5a64..ba3046e4f06f 100644 --- a/arch/sh/include/asm/pgtable.h +++ b/arch/sh/include/asm/pgtable.h | |||
@@ -75,13 +75,31 @@ static inline unsigned long long neff_sign_extend(unsigned long val) | |||
75 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 75 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
76 | #define FIRST_USER_ADDRESS 0 | 76 | #define FIRST_USER_ADDRESS 0 |
77 | 77 | ||
78 | #ifdef CONFIG_32BIT | 78 | #define PHYS_ADDR_MASK29 0x1fffffff |
79 | #define PHYS_ADDR_MASK 0xffffffff | 79 | #define PHYS_ADDR_MASK32 0xffffffff |
80 | |||
81 | #ifdef CONFIG_PMB | ||
82 | static inline unsigned long phys_addr_mask(void) | ||
83 | { | ||
84 | /* Is the MMU in 29bit mode? */ | ||
85 | if (__in_29bit_mode()) | ||
86 | return PHYS_ADDR_MASK29; | ||
87 | |||
88 | return PHYS_ADDR_MASK32; | ||
89 | } | ||
90 | #elif defined(CONFIG_32BIT) | ||
91 | static inline unsigned long phys_addr_mask(void) | ||
92 | { | ||
93 | return PHYS_ADDR_MASK32; | ||
94 | } | ||
80 | #else | 95 | #else |
81 | #define PHYS_ADDR_MASK 0x1fffffff | 96 | static inline unsigned long phys_addr_mask(void) |
97 | { | ||
98 | return PHYS_ADDR_MASK29; | ||
99 | } | ||
82 | #endif | 100 | #endif |
83 | 101 | ||
84 | #define PTE_PHYS_MASK (PHYS_ADDR_MASK & PAGE_MASK) | 102 | #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) |
85 | #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) | 103 | #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) |
86 | 104 | ||
87 | #ifdef CONFIG_SUPERH32 | 105 | #ifdef CONFIG_SUPERH32 |
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index c0d359ce337b..b35435516203 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -108,7 +108,7 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) | |||
108 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) | 108 | #define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE) |
109 | #endif | 109 | #endif |
110 | 110 | ||
111 | #define _PAGE_FLAGS_HARDWARE_MASK (PHYS_ADDR_MASK & ~(_PAGE_CLEAR_FLAGS)) | 111 | #define _PAGE_FLAGS_HARDWARE_MASK (phys_addr_mask() & ~(_PAGE_CLEAR_FLAGS)) |
112 | 112 | ||
113 | /* Hardware flags, page size encoding */ | 113 | /* Hardware flags, page size encoding */ |
114 | #if !defined(CONFIG_MMU) | 114 | #if !defined(CONFIG_MMU) |
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h index 327cc2e4c97b..e38d1d4c7f6f 100644 --- a/arch/sh/include/asm/scatterlist.h +++ b/arch/sh/include/asm/scatterlist.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef __ASM_SH_SCATTERLIST_H | 1 | #ifndef __ASM_SH_SCATTERLIST_H |
2 | #define __ASM_SH_SCATTERLIST_H | 2 | #define __ASM_SH_SCATTERLIST_H |
3 | 3 | ||
4 | #define ISA_DMA_THRESHOLD PHYS_ADDR_MASK | 4 | #define ISA_DMA_THRESHOLD phys_addr_mask() |
5 | 5 | ||
6 | #include <asm-generic/scatterlist.h> | 6 | #include <asm-generic/scatterlist.h> |
7 | 7 | ||
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index bdeb9d46d17d..23eeed89467a 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
@@ -19,6 +19,7 @@ struct thread_info { | |||
19 | struct task_struct *task; /* main task structure */ | 19 | struct task_struct *task; /* main task structure */ |
20 | struct exec_domain *exec_domain; /* execution domain */ | 20 | struct exec_domain *exec_domain; /* execution domain */ |
21 | unsigned long flags; /* low level flags */ | 21 | unsigned long flags; /* low level flags */ |
22 | __u32 status; /* thread synchronous flags */ | ||
22 | __u32 cpu; | 23 | __u32 cpu; |
23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 24 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
24 | mm_segment_t addr_limit; /* thread address space */ | 25 | mm_segment_t addr_limit; /* thread address space */ |
@@ -111,7 +112,6 @@ extern void free_thread_info(struct thread_info *ti); | |||
111 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 112 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
112 | #define TIF_SIGPENDING 1 /* signal pending */ | 113 | #define TIF_SIGPENDING 1 /* signal pending */ |
113 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ | 114 | #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ |
114 | #define TIF_RESTORE_SIGMASK 3 /* restore signal mask in do_signal() */ | ||
115 | #define TIF_SINGLESTEP 4 /* singlestepping active */ | 115 | #define TIF_SINGLESTEP 4 /* singlestepping active */ |
116 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | 116 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ |
117 | #define TIF_SECCOMP 6 /* secure computing */ | 117 | #define TIF_SECCOMP 6 /* secure computing */ |
@@ -125,7 +125,6 @@ extern void free_thread_info(struct thread_info *ti); | |||
125 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 125 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
126 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 126 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
127 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 127 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
128 | #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) | ||
129 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 128 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
130 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 129 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
131 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 130 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
@@ -149,13 +148,32 @@ extern void free_thread_info(struct thread_info *ti); | |||
149 | /* work to do on any return to u-space */ | 148 | /* work to do on any return to u-space */ |
150 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ | 149 | #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \ |
151 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ | 150 | _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \ |
152 | _TIF_SINGLESTEP | _TIF_RESTORE_SIGMASK | \ | 151 | _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \ |
153 | _TIF_NOTIFY_RESUME | _TIF_SYSCALL_TRACEPOINT) | 152 | _TIF_SYSCALL_TRACEPOINT) |
154 | 153 | ||
155 | /* work to do on interrupt/exception return */ | 154 | /* work to do on interrupt/exception return */ |
156 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ | 155 | #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \ |
157 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) | 156 | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP)) |
158 | 157 | ||
158 | /* | ||
159 | * Thread-synchronous status. | ||
160 | * | ||
161 | * This is different from the flags in that nobody else | ||
162 | * ever touches our thread-synchronous status, so we don't | ||
163 | * have to worry about atomic accesses. | ||
164 | */ | ||
165 | #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */ | ||
166 | |||
167 | #ifndef __ASSEMBLY__ | ||
168 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
169 | static inline void set_restore_sigmask(void) | ||
170 | { | ||
171 | struct thread_info *ti = current_thread_info(); | ||
172 | ti->status |= TS_RESTORE_SIGMASK; | ||
173 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | ||
174 | } | ||
175 | #endif /* !__ASSEMBLY__ */ | ||
176 | |||
159 | #endif /* __KERNEL__ */ | 177 | #endif /* __KERNEL__ */ |
160 | 178 | ||
161 | #endif /* __ASM_SH_THREAD_INFO_H */ | 179 | #endif /* __ASM_SH_THREAD_INFO_H */ |
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h index 65e7bd2f2240..37cdadd975ac 100644 --- a/arch/sh/include/asm/topology.h +++ b/arch/sh/include/asm/topology.h | |||
@@ -40,6 +40,14 @@ | |||
40 | 40 | ||
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define mc_capable() (1) | ||
44 | |||
45 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu); | ||
46 | |||
47 | extern cpumask_t cpu_core_map[NR_CPUS]; | ||
48 | |||
49 | #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) | ||
50 | |||
43 | #include <asm-generic/topology.h> | 51 | #include <asm-generic/topology.h> |
44 | 52 | ||
45 | #endif /* _ASM_SH_TOPOLOGY_H */ | 53 | #endif /* _ASM_SH_TOPOLOGY_H */ |
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index a2d0a40f3848..f8791203cfe3 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile | |||
@@ -9,8 +9,11 @@ ifdef CONFIG_FUNCTION_TRACER | |||
9 | CFLAGS_REMOVE_ftrace.o = -pg | 9 | CFLAGS_REMOVE_ftrace.o = -pg |
10 | endif | 10 | endif |
11 | 11 | ||
12 | CFLAGS_REMOVE_return_address.o = -pg | ||
13 | |||
12 | obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ | 14 | obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ |
13 | machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \ | 15 | machvec.o nmi_debug.o process_$(BITS).o ptrace_$(BITS).o \ |
16 | return_address.o \ | ||
14 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ | 17 | setup.o signal_$(BITS).o sys_sh.o sys_sh$(BITS).o \ |
15 | syscalls_$(BITS).o time.o topology.o traps.o \ | 18 | syscalls_$(BITS).o time.o topology.o traps.o \ |
16 | traps_$(BITS).o unwinder.o | 19 | traps_$(BITS).o unwinder.o |
diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c index e848443deeb9..485330cf8549 100644 --- a/arch/sh/kernel/cpu/sh4a/setup-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c | |||
@@ -268,7 +268,11 @@ enum { | |||
268 | UNUSED = 0, | 268 | UNUSED = 0, |
269 | 269 | ||
270 | /* interrupt sources */ | 270 | /* interrupt sources */ |
271 | IRL, IRQ0, IRQ1, IRQ2, IRQ3, | 271 | IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, |
272 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
273 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
274 | IRL_HHLL, IRL_HHLH, IRL_HHHL, | ||
275 | IRQ0, IRQ1, IRQ2, IRQ3, | ||
272 | HUDII, | 276 | HUDII, |
273 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, | 277 | TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, |
274 | PCII0, PCII1, PCII2, PCII3, PCII4, | 278 | PCII0, PCII1, PCII2, PCII3, PCII4, |
@@ -291,7 +295,7 @@ enum { | |||
291 | INTICI4, INTICI5, INTICI6, INTICI7, | 295 | INTICI4, INTICI5, INTICI6, INTICI7, |
292 | 296 | ||
293 | /* interrupt groups */ | 297 | /* interrupt groups */ |
294 | PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, | 298 | IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, |
295 | DMAC0, DMAC1, | 299 | DMAC0, DMAC1, |
296 | }; | 300 | }; |
297 | 301 | ||
@@ -344,6 +348,10 @@ static struct intc_vect vectors[] __initdata = { | |||
344 | }; | 348 | }; |
345 | 349 | ||
346 | static struct intc_group groups[] __initdata = { | 350 | static struct intc_group groups[] __initdata = { |
351 | INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, | ||
352 | IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, | ||
353 | IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, | ||
354 | IRL_HHLL, IRL_HHLH, IRL_HHHL), | ||
347 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), | 355 | INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), |
348 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), | 356 | INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), |
349 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), | 357 | INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), |
@@ -419,14 +427,14 @@ static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, | |||
419 | 427 | ||
420 | /* External interrupt pins in IRL mode */ | 428 | /* External interrupt pins in IRL mode */ |
421 | static struct intc_vect vectors_irl[] __initdata = { | 429 | static struct intc_vect vectors_irl[] __initdata = { |
422 | INTC_VECT(IRL, 0x200), INTC_VECT(IRL, 0x220), | 430 | INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), |
423 | INTC_VECT(IRL, 0x240), INTC_VECT(IRL, 0x260), | 431 | INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), |
424 | INTC_VECT(IRL, 0x280), INTC_VECT(IRL, 0x2a0), | 432 | INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), |
425 | INTC_VECT(IRL, 0x2c0), INTC_VECT(IRL, 0x2e0), | 433 | INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), |
426 | INTC_VECT(IRL, 0x300), INTC_VECT(IRL, 0x320), | 434 | INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), |
427 | INTC_VECT(IRL, 0x340), INTC_VECT(IRL, 0x360), | 435 | INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), |
428 | INTC_VECT(IRL, 0x380), INTC_VECT(IRL, 0x3a0), | 436 | INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), |
429 | INTC_VECT(IRL, 0x3c0), | 437 | INTC_VECT(IRL_HHHL, 0x3c0), |
430 | }; | 438 | }; |
431 | 439 | ||
432 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, | 440 | static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, |
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c index 185ec3976a25..5863e0c4d02f 100644 --- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c | |||
@@ -14,6 +14,13 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | 16 | ||
17 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
18 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
19 | |||
20 | #define STBCR_MSTP 0x00000001 | ||
21 | #define STBCR_RESET 0x00000002 | ||
22 | #define STBCR_LTSLP 0x80000000 | ||
23 | |||
17 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | 24 | static irqreturn_t ipi_interrupt_handler(int irq, void *arg) |
18 | { | 25 | { |
19 | unsigned int message = (unsigned int)(long)arg; | 26 | unsigned int message = (unsigned int)(long)arg; |
@@ -21,9 +28,9 @@ static irqreturn_t ipi_interrupt_handler(int irq, void *arg) | |||
21 | unsigned int offs = 4 * cpu; | 28 | unsigned int offs = 4 * cpu; |
22 | unsigned int x; | 29 | unsigned int x; |
23 | 30 | ||
24 | x = ctrl_inl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ | 31 | x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ |
25 | x &= (1 << (message << 2)); | 32 | x &= (1 << (message << 2)); |
26 | ctrl_outl(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ | 33 | __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ |
27 | 34 | ||
28 | smp_message_recv(message); | 35 | smp_message_recv(message); |
29 | 36 | ||
@@ -37,6 +44,9 @@ void __init plat_smp_setup(void) | |||
37 | 44 | ||
38 | init_cpu_possible(cpumask_of(cpu)); | 45 | init_cpu_possible(cpumask_of(cpu)); |
39 | 46 | ||
47 | /* Enable light sleep for the boot CPU */ | ||
48 | __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); | ||
49 | |||
40 | __cpu_number_map[0] = 0; | 50 | __cpu_number_map[0] = 0; |
41 | __cpu_logical_map[0] = 0; | 51 | __cpu_logical_map[0] = 0; |
42 | 52 | ||
@@ -66,32 +76,23 @@ void __init plat_prepare_cpus(unsigned int max_cpus) | |||
66 | "IPI", (void *)(long)i); | 76 | "IPI", (void *)(long)i); |
67 | } | 77 | } |
68 | 78 | ||
69 | #define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) | ||
70 | #define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) | ||
71 | |||
72 | #define STBCR_MSTP 0x00000001 | ||
73 | #define STBCR_RESET 0x00000002 | ||
74 | #define STBCR_LTSLP 0x80000000 | ||
75 | |||
76 | #define STBCR_AP_VAL (STBCR_RESET | STBCR_LTSLP) | ||
77 | |||
78 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) | 79 | void plat_start_cpu(unsigned int cpu, unsigned long entry_point) |
79 | { | 80 | { |
80 | ctrl_outl(entry_point, RESET_REG(cpu)); | 81 | __raw_writel(entry_point, RESET_REG(cpu)); |
81 | 82 | ||
82 | if (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 83 | if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
83 | ctrl_outl(STBCR_MSTP, STBCR_REG(cpu)); | 84 | __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); |
84 | 85 | ||
85 | while (!(ctrl_inl(STBCR_REG(cpu)) & STBCR_MSTP)) | 86 | while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) |
86 | cpu_relax(); | 87 | cpu_relax(); |
87 | 88 | ||
88 | /* Start up secondary processor by sending a reset */ | 89 | /* Start up secondary processor by sending a reset */ |
89 | ctrl_outl(STBCR_AP_VAL, STBCR_REG(cpu)); | 90 | __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); |
90 | } | 91 | } |
91 | 92 | ||
92 | int plat_smp_processor_id(void) | 93 | int plat_smp_processor_id(void) |
93 | { | 94 | { |
94 | return ctrl_inl(0xff000048); /* CPIDR */ | 95 | return __raw_readl(0xff000048); /* CPIDR */ |
95 | } | 96 | } |
96 | 97 | ||
97 | void plat_send_ipi(unsigned int cpu, unsigned int message) | 98 | void plat_send_ipi(unsigned int cpu, unsigned int message) |
@@ -100,5 +101,5 @@ void plat_send_ipi(unsigned int cpu, unsigned int message) | |||
100 | 101 | ||
101 | BUG_ON(cpu >= 4); | 102 | BUG_ON(cpu >= 4); |
102 | 103 | ||
103 | ctrl_outl(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ | 104 | __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ |
104 | } | 105 | } |
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index b0aacf675258..8f13f73cb2cb 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S | |||
@@ -933,7 +933,7 @@ ret_with_reschedule: | |||
933 | 933 | ||
934 | pta restore_all, tr1 | 934 | pta restore_all, tr1 |
935 | 935 | ||
936 | movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8 | 936 | movi _TIF_SIGPENDING, r8 |
937 | and r8, r7, r8 | 937 | and r8, r7, r8 |
938 | pta work_notifysig, tr0 | 938 | pta work_notifysig, tr0 |
939 | bne r8, ZERO, tr0 | 939 | bne r8, ZERO, tr0 |
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 03b3616c80a5..718286be6648 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/list.h> | 20 | #include <linux/list.h> |
21 | #include <linux/mempool.h> | 21 | #include <linux/mempool.h> |
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/elf.h> | ||
23 | #include <asm/dwarf.h> | 24 | #include <asm/dwarf.h> |
24 | #include <asm/unwinder.h> | 25 | #include <asm/unwinder.h> |
25 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
@@ -529,7 +530,18 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, | |||
529 | } | 530 | } |
530 | 531 | ||
531 | /** | 532 | /** |
532 | * dwarf_unwind_stack - recursively unwind the stack | 533 | * dwarf_free_frame - free the memory allocated for @frame |
534 | * @frame: the frame to free | ||
535 | */ | ||
536 | void dwarf_free_frame(struct dwarf_frame *frame) | ||
537 | { | ||
538 | dwarf_frame_free_regs(frame); | ||
539 | mempool_free(frame, dwarf_frame_pool); | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * dwarf_unwind_stack - unwind the stack | ||
544 | * | ||
533 | * @pc: address of the function to unwind | 545 | * @pc: address of the function to unwind |
534 | * @prev: struct dwarf_frame of the previous stackframe on the callstack | 546 | * @prev: struct dwarf_frame of the previous stackframe on the callstack |
535 | * | 547 | * |
@@ -547,9 +559,9 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
547 | unsigned long addr; | 559 | unsigned long addr; |
548 | 560 | ||
549 | /* | 561 | /* |
550 | * If this is the first invocation of this recursive function we | 562 | * If we're starting at the top of the stack we need get the |
551 | * need get the contents of a physical register to get the CFA | 563 | * contents of a physical register to get the CFA in order to |
552 | * in order to begin the virtual unwinding of the stack. | 564 | * begin the virtual unwinding of the stack. |
553 | * | 565 | * |
554 | * NOTE: the return address is guaranteed to be setup by the | 566 | * NOTE: the return address is guaranteed to be setup by the |
555 | * time this function makes its first function call. | 567 | * time this function makes its first function call. |
@@ -571,9 +583,8 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
571 | fde = dwarf_lookup_fde(pc); | 583 | fde = dwarf_lookup_fde(pc); |
572 | if (!fde) { | 584 | if (!fde) { |
573 | /* | 585 | /* |
574 | * This is our normal exit path - the one that stops the | 586 | * This is our normal exit path. There are two reasons |
575 | * recursion. There's two reasons why we might exit | 587 | * why we might exit here, |
576 | * here, | ||
577 | * | 588 | * |
578 | * a) pc has no asscociated DWARF frame info and so | 589 | * a) pc has no asscociated DWARF frame info and so |
579 | * we don't know how to unwind this frame. This is | 590 | * we don't know how to unwind this frame. This is |
@@ -615,10 +626,10 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
615 | 626 | ||
616 | } else { | 627 | } else { |
617 | /* | 628 | /* |
618 | * Again, this is the first invocation of this | 629 | * Again, we're starting from the top of the |
619 | * recurisve function. We need to physically | 630 | * stack. We need to physically read |
620 | * read the contents of a register in order to | 631 | * the contents of a register in order to get |
621 | * get the Canonical Frame Address for this | 632 | * the Canonical Frame Address for this |
622 | * function. | 633 | * function. |
623 | */ | 634 | */ |
624 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); | 635 | frame->cfa = dwarf_read_arch_reg(frame->cfa_register); |
@@ -648,13 +659,12 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc, | |||
648 | return frame; | 659 | return frame; |
649 | 660 | ||
650 | bail: | 661 | bail: |
651 | dwarf_frame_free_regs(frame); | 662 | dwarf_free_frame(frame); |
652 | mempool_free(frame, dwarf_frame_pool); | ||
653 | return NULL; | 663 | return NULL; |
654 | } | 664 | } |
655 | 665 | ||
656 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | 666 | static int dwarf_parse_cie(void *entry, void *p, unsigned long len, |
657 | unsigned char *end) | 667 | unsigned char *end, struct module *mod) |
658 | { | 668 | { |
659 | struct dwarf_cie *cie; | 669 | struct dwarf_cie *cie; |
660 | unsigned long flags; | 670 | unsigned long flags; |
@@ -750,6 +760,8 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
750 | cie->initial_instructions = p; | 760 | cie->initial_instructions = p; |
751 | cie->instructions_end = end; | 761 | cie->instructions_end = end; |
752 | 762 | ||
763 | cie->mod = mod; | ||
764 | |||
753 | /* Add to list */ | 765 | /* Add to list */ |
754 | spin_lock_irqsave(&dwarf_cie_lock, flags); | 766 | spin_lock_irqsave(&dwarf_cie_lock, flags); |
755 | list_add_tail(&cie->link, &dwarf_cie_list); | 767 | list_add_tail(&cie->link, &dwarf_cie_list); |
@@ -760,7 +772,7 @@ static int dwarf_parse_cie(void *entry, void *p, unsigned long len, | |||
760 | 772 | ||
761 | static int dwarf_parse_fde(void *entry, u32 entry_type, | 773 | static int dwarf_parse_fde(void *entry, u32 entry_type, |
762 | void *start, unsigned long len, | 774 | void *start, unsigned long len, |
763 | unsigned char *end) | 775 | unsigned char *end, struct module *mod) |
764 | { | 776 | { |
765 | struct dwarf_fde *fde; | 777 | struct dwarf_fde *fde; |
766 | struct dwarf_cie *cie; | 778 | struct dwarf_cie *cie; |
@@ -809,6 +821,8 @@ static int dwarf_parse_fde(void *entry, u32 entry_type, | |||
809 | fde->instructions = p; | 821 | fde->instructions = p; |
810 | fde->end = end; | 822 | fde->end = end; |
811 | 823 | ||
824 | fde->mod = mod; | ||
825 | |||
812 | /* Add to list. */ | 826 | /* Add to list. */ |
813 | spin_lock_irqsave(&dwarf_fde_lock, flags); | 827 | spin_lock_irqsave(&dwarf_fde_lock, flags); |
814 | list_add_tail(&fde->link, &dwarf_fde_list); | 828 | list_add_tail(&fde->link, &dwarf_fde_list); |
@@ -832,10 +846,8 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
832 | while (1) { | 846 | while (1) { |
833 | frame = dwarf_unwind_stack(return_addr, _frame); | 847 | frame = dwarf_unwind_stack(return_addr, _frame); |
834 | 848 | ||
835 | if (_frame) { | 849 | if (_frame) |
836 | dwarf_frame_free_regs(_frame); | 850 | dwarf_free_frame(_frame); |
837 | mempool_free(_frame, dwarf_frame_pool); | ||
838 | } | ||
839 | 851 | ||
840 | _frame = frame; | 852 | _frame = frame; |
841 | 853 | ||
@@ -845,6 +857,9 @@ static void dwarf_unwinder_dump(struct task_struct *task, | |||
845 | return_addr = frame->return_addr; | 857 | return_addr = frame->return_addr; |
846 | ops->address(data, return_addr, 1); | 858 | ops->address(data, return_addr, 1); |
847 | } | 859 | } |
860 | |||
861 | if (frame) | ||
862 | dwarf_free_frame(frame); | ||
848 | } | 863 | } |
849 | 864 | ||
850 | static struct unwinder dwarf_unwinder = { | 865 | static struct unwinder dwarf_unwinder = { |
@@ -874,15 +889,15 @@ static void dwarf_unwinder_cleanup(void) | |||
874 | } | 889 | } |
875 | 890 | ||
876 | /** | 891 | /** |
877 | * dwarf_unwinder_init - initialise the dwarf unwinder | 892 | * dwarf_parse_section - parse DWARF section |
893 | * @eh_frame_start: start address of the .eh_frame section | ||
894 | * @eh_frame_end: end address of the .eh_frame section | ||
895 | * @mod: the kernel module containing the .eh_frame section | ||
878 | * | 896 | * |
879 | * Build the data structures describing the .dwarf_frame section to | 897 | * Parse the information in a .eh_frame section. |
880 | * make it easier to lookup CIE and FDE entries. Because the | ||
881 | * .eh_frame section is packed as tightly as possible it is not | ||
882 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
883 | * and CIE entries that make it easier. | ||
884 | */ | 898 | */ |
885 | static int __init dwarf_unwinder_init(void) | 899 | static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, |
900 | struct module *mod) | ||
886 | { | 901 | { |
887 | u32 entry_type; | 902 | u32 entry_type; |
888 | void *p, *entry; | 903 | void *p, *entry; |
@@ -890,32 +905,12 @@ static int __init dwarf_unwinder_init(void) | |||
890 | unsigned long len; | 905 | unsigned long len; |
891 | unsigned int c_entries, f_entries; | 906 | unsigned int c_entries, f_entries; |
892 | unsigned char *end; | 907 | unsigned char *end; |
893 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
894 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
895 | 908 | ||
896 | c_entries = 0; | 909 | c_entries = 0; |
897 | f_entries = 0; | 910 | f_entries = 0; |
898 | entry = &__start_eh_frame; | 911 | entry = eh_frame_start; |
899 | |||
900 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
901 | sizeof(struct dwarf_frame), 0, | ||
902 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
903 | |||
904 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
905 | sizeof(struct dwarf_reg), 0, | ||
906 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
907 | 912 | ||
908 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | 913 | while ((char *)entry < eh_frame_end) { |
909 | mempool_alloc_slab, | ||
910 | mempool_free_slab, | ||
911 | dwarf_frame_cachep); | ||
912 | |||
913 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
914 | mempool_alloc_slab, | ||
915 | mempool_free_slab, | ||
916 | dwarf_reg_cachep); | ||
917 | |||
918 | while ((char *)entry < __stop_eh_frame) { | ||
919 | p = entry; | 914 | p = entry; |
920 | 915 | ||
921 | count = dwarf_entry_len(p, &len); | 916 | count = dwarf_entry_len(p, &len); |
@@ -927,6 +922,7 @@ static int __init dwarf_unwinder_init(void) | |||
927 | * entry and move to the next one because 'len' | 922 | * entry and move to the next one because 'len' |
928 | * tells us where our next entry is. | 923 | * tells us where our next entry is. |
929 | */ | 924 | */ |
925 | err = -EINVAL; | ||
930 | goto out; | 926 | goto out; |
931 | } else | 927 | } else |
932 | p += count; | 928 | p += count; |
@@ -938,13 +934,14 @@ static int __init dwarf_unwinder_init(void) | |||
938 | p += 4; | 934 | p += 4; |
939 | 935 | ||
940 | if (entry_type == DW_EH_FRAME_CIE) { | 936 | if (entry_type == DW_EH_FRAME_CIE) { |
941 | err = dwarf_parse_cie(entry, p, len, end); | 937 | err = dwarf_parse_cie(entry, p, len, end, mod); |
942 | if (err < 0) | 938 | if (err < 0) |
943 | goto out; | 939 | goto out; |
944 | else | 940 | else |
945 | c_entries++; | 941 | c_entries++; |
946 | } else { | 942 | } else { |
947 | err = dwarf_parse_fde(entry, entry_type, p, len, end); | 943 | err = dwarf_parse_fde(entry, entry_type, p, len, |
944 | end, mod); | ||
948 | if (err < 0) | 945 | if (err < 0) |
949 | goto out; | 946 | goto out; |
950 | else | 947 | else |
@@ -957,6 +954,129 @@ static int __init dwarf_unwinder_init(void) | |||
957 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", | 954 | printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", |
958 | c_entries, f_entries); | 955 | c_entries, f_entries); |
959 | 956 | ||
957 | return 0; | ||
958 | |||
959 | out: | ||
960 | return err; | ||
961 | } | ||
962 | |||
963 | #ifdef CONFIG_MODULES | ||
964 | int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, | ||
965 | struct module *me) | ||
966 | { | ||
967 | unsigned int i, err; | ||
968 | unsigned long start, end; | ||
969 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
970 | |||
971 | start = end = 0; | ||
972 | |||
973 | for (i = 1; i < hdr->e_shnum; i++) { | ||
974 | /* Alloc bit cleared means "ignore it." */ | ||
975 | if ((sechdrs[i].sh_flags & SHF_ALLOC) | ||
976 | && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { | ||
977 | start = sechdrs[i].sh_addr; | ||
978 | end = start + sechdrs[i].sh_size; | ||
979 | break; | ||
980 | } | ||
981 | } | ||
982 | |||
983 | /* Did we find the .eh_frame section? */ | ||
984 | if (i != hdr->e_shnum) { | ||
985 | err = dwarf_parse_section((char *)start, (char *)end, me); | ||
986 | if (err) { | ||
987 | printk(KERN_WARNING "%s: failed to parse DWARF info\n", | ||
988 | me->name); | ||
989 | return err; | ||
990 | } | ||
991 | } | ||
992 | |||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | /** | ||
997 | * module_dwarf_cleanup - remove FDE/CIEs associated with @mod | ||
998 | * @mod: the module that is being unloaded | ||
999 | * | ||
1000 | * Remove any FDEs and CIEs from the global lists that came from | ||
1001 | * @mod's .eh_frame section because @mod is being unloaded. | ||
1002 | */ | ||
1003 | void module_dwarf_cleanup(struct module *mod) | ||
1004 | { | ||
1005 | struct dwarf_fde *fde; | ||
1006 | struct dwarf_cie *cie; | ||
1007 | unsigned long flags; | ||
1008 | |||
1009 | spin_lock_irqsave(&dwarf_cie_lock, flags); | ||
1010 | |||
1011 | again_cie: | ||
1012 | list_for_each_entry(cie, &dwarf_cie_list, link) { | ||
1013 | if (cie->mod == mod) | ||
1014 | break; | ||
1015 | } | ||
1016 | |||
1017 | if (&cie->link != &dwarf_cie_list) { | ||
1018 | list_del(&cie->link); | ||
1019 | kfree(cie); | ||
1020 | goto again_cie; | ||
1021 | } | ||
1022 | |||
1023 | spin_unlock_irqrestore(&dwarf_cie_lock, flags); | ||
1024 | |||
1025 | spin_lock_irqsave(&dwarf_fde_lock, flags); | ||
1026 | |||
1027 | again_fde: | ||
1028 | list_for_each_entry(fde, &dwarf_fde_list, link) { | ||
1029 | if (fde->mod == mod) | ||
1030 | break; | ||
1031 | } | ||
1032 | |||
1033 | if (&fde->link != &dwarf_fde_list) { | ||
1034 | list_del(&fde->link); | ||
1035 | kfree(fde); | ||
1036 | goto again_fde; | ||
1037 | } | ||
1038 | |||
1039 | spin_unlock_irqrestore(&dwarf_fde_lock, flags); | ||
1040 | } | ||
1041 | #endif /* CONFIG_MODULES */ | ||
1042 | |||
1043 | /** | ||
1044 | * dwarf_unwinder_init - initialise the dwarf unwinder | ||
1045 | * | ||
1046 | * Build the data structures describing the .dwarf_frame section to | ||
1047 | * make it easier to lookup CIE and FDE entries. Because the | ||
1048 | * .eh_frame section is packed as tightly as possible it is not | ||
1049 | * easy to lookup the FDE for a given PC, so we build a list of FDE | ||
1050 | * and CIE entries that make it easier. | ||
1051 | */ | ||
1052 | static int __init dwarf_unwinder_init(void) | ||
1053 | { | ||
1054 | int err; | ||
1055 | INIT_LIST_HEAD(&dwarf_cie_list); | ||
1056 | INIT_LIST_HEAD(&dwarf_fde_list); | ||
1057 | |||
1058 | dwarf_frame_cachep = kmem_cache_create("dwarf_frames", | ||
1059 | sizeof(struct dwarf_frame), 0, | ||
1060 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1061 | |||
1062 | dwarf_reg_cachep = kmem_cache_create("dwarf_regs", | ||
1063 | sizeof(struct dwarf_reg), 0, | ||
1064 | SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL); | ||
1065 | |||
1066 | dwarf_frame_pool = mempool_create(DWARF_FRAME_MIN_REQ, | ||
1067 | mempool_alloc_slab, | ||
1068 | mempool_free_slab, | ||
1069 | dwarf_frame_cachep); | ||
1070 | |||
1071 | dwarf_reg_pool = mempool_create(DWARF_REG_MIN_REQ, | ||
1072 | mempool_alloc_slab, | ||
1073 | mempool_free_slab, | ||
1074 | dwarf_reg_cachep); | ||
1075 | |||
1076 | err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); | ||
1077 | if (err) | ||
1078 | goto out; | ||
1079 | |||
960 | err = unwinder_register(&dwarf_unwinder); | 1080 | err = unwinder_register(&dwarf_unwinder); |
961 | if (err) | 1081 | if (err) |
962 | goto out; | 1082 | goto out; |
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 3eb84931d2aa..f0abd58c3a69 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S | |||
@@ -133,7 +133,7 @@ work_pending: | |||
133 | ! r8: current_thread_info | 133 | ! r8: current_thread_info |
134 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" | 134 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" |
135 | bf/s work_resched | 135 | bf/s work_resched |
136 | tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 | 136 | tst #_TIF_SIGPENDING, r0 |
137 | work_notifysig: | 137 | work_notifysig: |
138 | bt/s __restore_all | 138 | bt/s __restore_all |
139 | mov r15, r4 | 139 | mov r15, r4 |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 2c48e267256e..b6f41c109beb 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -62,6 +62,150 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) | |||
62 | return ftrace_replaced_code; | 62 | return ftrace_replaced_code; |
63 | } | 63 | } |
64 | 64 | ||
65 | /* | ||
66 | * Modifying code must take extra care. On an SMP machine, if | ||
67 | * the code being modified is also being executed on another CPU | ||
68 | * that CPU will have undefined results and possibly take a GPF. | ||
69 | * We use kstop_machine to stop other CPUS from exectuing code. | ||
70 | * But this does not stop NMIs from happening. We still need | ||
71 | * to protect against that. We separate out the modification of | ||
72 | * the code to take care of this. | ||
73 | * | ||
74 | * Two buffers are added: An IP buffer and a "code" buffer. | ||
75 | * | ||
76 | * 1) Put the instruction pointer into the IP buffer | ||
77 | * and the new code into the "code" buffer. | ||
78 | * 2) Wait for any running NMIs to finish and set a flag that says | ||
79 | * we are modifying code, it is done in an atomic operation. | ||
80 | * 3) Write the code | ||
81 | * 4) clear the flag. | ||
82 | * 5) Wait for any running NMIs to finish. | ||
83 | * | ||
84 | * If an NMI is executed, the first thing it does is to call | ||
85 | * "ftrace_nmi_enter". This will check if the flag is set to write | ||
86 | * and if it is, it will write what is in the IP and "code" buffers. | ||
87 | * | ||
88 | * The trick is, it does not matter if everyone is writing the same | ||
89 | * content to the code location. Also, if a CPU is executing code | ||
90 | * it is OK to write to that code location if the contents being written | ||
91 | * are the same as what exists. | ||
92 | */ | ||
93 | #define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ | ||
94 | static atomic_t nmi_running = ATOMIC_INIT(0); | ||
95 | static int mod_code_status; /* holds return value of text write */ | ||
96 | static void *mod_code_ip; /* holds the IP to write to */ | ||
97 | static void *mod_code_newcode; /* holds the text to write to the IP */ | ||
98 | |||
99 | static unsigned nmi_wait_count; | ||
100 | static atomic_t nmi_update_count = ATOMIC_INIT(0); | ||
101 | |||
102 | int ftrace_arch_read_dyn_info(char *buf, int size) | ||
103 | { | ||
104 | int r; | ||
105 | |||
106 | r = snprintf(buf, size, "%u %u", | ||
107 | nmi_wait_count, | ||
108 | atomic_read(&nmi_update_count)); | ||
109 | return r; | ||
110 | } | ||
111 | |||
112 | static void clear_mod_flag(void) | ||
113 | { | ||
114 | int old = atomic_read(&nmi_running); | ||
115 | |||
116 | for (;;) { | ||
117 | int new = old & ~MOD_CODE_WRITE_FLAG; | ||
118 | |||
119 | if (old == new) | ||
120 | break; | ||
121 | |||
122 | old = atomic_cmpxchg(&nmi_running, old, new); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void ftrace_mod_code(void) | ||
127 | { | ||
128 | /* | ||
129 | * Yes, more than one CPU process can be writing to mod_code_status. | ||
130 | * (and the code itself) | ||
131 | * But if one were to fail, then they all should, and if one were | ||
132 | * to succeed, then they all should. | ||
133 | */ | ||
134 | mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode, | ||
135 | MCOUNT_INSN_SIZE); | ||
136 | |||
137 | /* if we fail, then kill any new writers */ | ||
138 | if (mod_code_status) | ||
139 | clear_mod_flag(); | ||
140 | } | ||
141 | |||
142 | void ftrace_nmi_enter(void) | ||
143 | { | ||
144 | if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { | ||
145 | smp_rmb(); | ||
146 | ftrace_mod_code(); | ||
147 | atomic_inc(&nmi_update_count); | ||
148 | } | ||
149 | /* Must have previous changes seen before executions */ | ||
150 | smp_mb(); | ||
151 | } | ||
152 | |||
153 | void ftrace_nmi_exit(void) | ||
154 | { | ||
155 | /* Finish all executions before clearing nmi_running */ | ||
156 | smp_mb(); | ||
157 | atomic_dec(&nmi_running); | ||
158 | } | ||
159 | |||
160 | static void wait_for_nmi_and_set_mod_flag(void) | ||
161 | { | ||
162 | if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) | ||
163 | return; | ||
164 | |||
165 | do { | ||
166 | cpu_relax(); | ||
167 | } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); | ||
168 | |||
169 | nmi_wait_count++; | ||
170 | } | ||
171 | |||
172 | static void wait_for_nmi(void) | ||
173 | { | ||
174 | if (!atomic_read(&nmi_running)) | ||
175 | return; | ||
176 | |||
177 | do { | ||
178 | cpu_relax(); | ||
179 | } while (atomic_read(&nmi_running)); | ||
180 | |||
181 | nmi_wait_count++; | ||
182 | } | ||
183 | |||
184 | static int | ||
185 | do_ftrace_mod_code(unsigned long ip, void *new_code) | ||
186 | { | ||
187 | mod_code_ip = (void *)ip; | ||
188 | mod_code_newcode = new_code; | ||
189 | |||
190 | /* The buffers need to be visible before we let NMIs write them */ | ||
191 | smp_mb(); | ||
192 | |||
193 | wait_for_nmi_and_set_mod_flag(); | ||
194 | |||
195 | /* Make sure all running NMIs have finished before we write the code */ | ||
196 | smp_mb(); | ||
197 | |||
198 | ftrace_mod_code(); | ||
199 | |||
200 | /* Make sure the write happens before clearing the bit */ | ||
201 | smp_mb(); | ||
202 | |||
203 | clear_mod_flag(); | ||
204 | wait_for_nmi(); | ||
205 | |||
206 | return mod_code_status; | ||
207 | } | ||
208 | |||
65 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | 209 | static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, |
66 | unsigned char *new_code) | 210 | unsigned char *new_code) |
67 | { | 211 | { |
@@ -86,7 +230,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, | |||
86 | return -EINVAL; | 230 | return -EINVAL; |
87 | 231 | ||
88 | /* replace the text with the new text */ | 232 | /* replace the text with the new text */ |
89 | if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE)) | 233 | if (do_ftrace_mod_code(ip, new_code)) |
90 | return -EPERM; | 234 | return -EPERM; |
91 | 235 | ||
92 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); | 236 | flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); |
diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S index a78be74b8d3e..1151ecdffa71 100644 --- a/arch/sh/kernel/head_32.S +++ b/arch/sh/kernel/head_32.S | |||
@@ -33,7 +33,7 @@ ENTRY(empty_zero_page) | |||
33 | .long 1 /* LOADER_TYPE */ | 33 | .long 1 /* LOADER_TYPE */ |
34 | .long 0x00000000 /* INITRD_START */ | 34 | .long 0x00000000 /* INITRD_START */ |
35 | .long 0x00000000 /* INITRD_SIZE */ | 35 | .long 0x00000000 /* INITRD_SIZE */ |
36 | #ifdef CONFIG_32BIT | 36 | #if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED) |
37 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ | 37 | .long 0x53453f00 + 32 /* "SE?" = 32 bit */ |
38 | #else | 38 | #else |
39 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ | 39 | .long 0x53453f00 + 29 /* "SE?" = 29 bit */ |
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc093c7..aaff0037fcd7 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
22 | 22 | ||
23 | static int hlt_counter; | 23 | static int hlt_counter; |
24 | void (*pm_idle)(void); | 24 | void (*pm_idle)(void) = NULL; |
25 | void (*pm_power_off)(void); | 25 | void (*pm_power_off)(void); |
26 | EXPORT_SYMBOL(pm_power_off); | 26 | EXPORT_SYMBOL(pm_power_off); |
27 | 27 | ||
@@ -39,48 +39,92 @@ static int __init hlt_setup(char *__unused) | |||
39 | } | 39 | } |
40 | __setup("hlt", hlt_setup); | 40 | __setup("hlt", hlt_setup); |
41 | 41 | ||
42 | static inline int hlt_works(void) | ||
43 | { | ||
44 | return !hlt_counter; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * On SMP it's slightly faster (but much more power-consuming!) | ||
49 | * to poll the ->work.need_resched flag instead of waiting for the | ||
50 | * cross-CPU IPI to arrive. Use this option with caution. | ||
51 | */ | ||
52 | static void poll_idle(void) | ||
53 | { | ||
54 | local_irq_enable(); | ||
55 | while (!need_resched()) | ||
56 | cpu_relax(); | ||
57 | } | ||
58 | |||
42 | void default_idle(void) | 59 | void default_idle(void) |
43 | { | 60 | { |
44 | if (!hlt_counter) { | 61 | if (hlt_works()) { |
45 | clear_thread_flag(TIF_POLLING_NRFLAG); | 62 | clear_thread_flag(TIF_POLLING_NRFLAG); |
46 | smp_mb__after_clear_bit(); | 63 | smp_mb__after_clear_bit(); |
47 | set_bl_bit(); | ||
48 | stop_critical_timings(); | ||
49 | 64 | ||
50 | while (!need_resched()) | 65 | if (!need_resched()) { |
66 | local_irq_enable(); | ||
51 | cpu_sleep(); | 67 | cpu_sleep(); |
68 | } else | ||
69 | local_irq_enable(); | ||
52 | 70 | ||
53 | start_critical_timings(); | ||
54 | clear_bl_bit(); | ||
55 | set_thread_flag(TIF_POLLING_NRFLAG); | 71 | set_thread_flag(TIF_POLLING_NRFLAG); |
56 | } else | 72 | } else |
57 | while (!need_resched()) | 73 | poll_idle(); |
58 | cpu_relax(); | ||
59 | } | 74 | } |
60 | 75 | ||
76 | /* | ||
77 | * The idle thread. There's no useful work to be done, so just try to conserve | ||
78 | * power and have a low exit latency (ie sit in a loop waiting for somebody to | ||
79 | * say that they'd like to reschedule) | ||
80 | */ | ||
61 | void cpu_idle(void) | 81 | void cpu_idle(void) |
62 | { | 82 | { |
83 | unsigned int cpu = smp_processor_id(); | ||
84 | |||
63 | set_thread_flag(TIF_POLLING_NRFLAG); | 85 | set_thread_flag(TIF_POLLING_NRFLAG); |
64 | 86 | ||
65 | /* endless idle loop with no priority at all */ | 87 | /* endless idle loop with no priority at all */ |
66 | while (1) { | 88 | while (1) { |
67 | void (*idle)(void) = pm_idle; | 89 | tick_nohz_stop_sched_tick(1); |
68 | 90 | ||
69 | if (!idle) | 91 | while (!need_resched() && cpu_online(cpu)) { |
70 | idle = default_idle; | 92 | check_pgt_cache(); |
93 | rmb(); | ||
71 | 94 | ||
72 | tick_nohz_stop_sched_tick(1); | 95 | local_irq_disable(); |
73 | while (!need_resched()) | 96 | /* Don't trace irqs off for idle */ |
74 | idle(); | 97 | stop_critical_timings(); |
75 | tick_nohz_restart_sched_tick(); | 98 | pm_idle(); |
99 | /* | ||
100 | * Sanity check to ensure that pm_idle() returns | ||
101 | * with IRQs enabled | ||
102 | */ | ||
103 | WARN_ON(irqs_disabled()); | ||
104 | start_critical_timings(); | ||
105 | } | ||
76 | 106 | ||
107 | tick_nohz_restart_sched_tick(); | ||
77 | preempt_enable_no_resched(); | 108 | preempt_enable_no_resched(); |
78 | schedule(); | 109 | schedule(); |
79 | preempt_disable(); | 110 | preempt_disable(); |
80 | check_pgt_cache(); | ||
81 | } | 111 | } |
82 | } | 112 | } |
83 | 113 | ||
114 | void __cpuinit select_idle_routine(void) | ||
115 | { | ||
116 | /* | ||
117 | * If a platform has set its own idle routine, leave it alone. | ||
118 | */ | ||
119 | if (pm_idle) | ||
120 | return; | ||
121 | |||
122 | if (hlt_works()) | ||
123 | pm_idle = default_idle; | ||
124 | else | ||
125 | pm_idle = poll_idle; | ||
126 | } | ||
127 | |||
84 | static void do_nothing(void *unused) | 128 | static void do_nothing(void *unused) |
85 | { | 129 | { |
86 | } | 130 | } |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 7cb933ba4957..11c289ecc090 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -36,7 +36,15 @@ void ack_bad_irq(unsigned int irq) | |||
36 | */ | 36 | */ |
37 | static int show_other_interrupts(struct seq_file *p, int prec) | 37 | static int show_other_interrupts(struct seq_file *p, int prec) |
38 | { | 38 | { |
39 | int j; | ||
40 | |||
41 | seq_printf(p, "%*s: ", prec, "NMI"); | ||
42 | for_each_online_cpu(j) | ||
43 | seq_printf(p, "%10u ", irq_stat[j].__nmi_count); | ||
44 | seq_printf(p, " Non-maskable interrupts\n"); | ||
45 | |||
39 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | 46 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
47 | |||
40 | return 0; | 48 | return 0; |
41 | } | 49 | } |
42 | 50 | ||
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c index 7ea2704ea033..de7cf5477d3f 100644 --- a/arch/sh/kernel/machine_kexec.c +++ b/arch/sh/kernel/machine_kexec.c | |||
@@ -49,7 +49,7 @@ int machine_kexec_prepare(struct kimage *image) | |||
49 | /* older versions of kexec-tools are passing | 49 | /* older versions of kexec-tools are passing |
50 | * the zImage entry point as a virtual address. | 50 | * the zImage entry point as a virtual address. |
51 | */ | 51 | */ |
52 | if (image->start != PHYSADDR(image->start)) | 52 | if (image->start != __pa(image->start)) |
53 | return -EINVAL; /* upgrade your kexec-tools */ | 53 | return -EINVAL; /* upgrade your kexec-tools */ |
54 | 54 | ||
55 | return 0; | 55 | return 0; |
diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c index c2efdcde266f..43adddfe4c04 100644 --- a/arch/sh/kernel/module.c +++ b/arch/sh/kernel/module.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/string.h> | 32 | #include <linux/string.h> |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
34 | #include <asm/unaligned.h> | 34 | #include <asm/unaligned.h> |
35 | #include <asm/dwarf.h> | ||
35 | 36 | ||
36 | void *module_alloc(unsigned long size) | 37 | void *module_alloc(unsigned long size) |
37 | { | 38 | { |
@@ -145,10 +146,16 @@ int module_finalize(const Elf_Ehdr *hdr, | |||
145 | const Elf_Shdr *sechdrs, | 146 | const Elf_Shdr *sechdrs, |
146 | struct module *me) | 147 | struct module *me) |
147 | { | 148 | { |
148 | return module_bug_finalize(hdr, sechdrs, me); | 149 | int ret = 0; |
150 | |||
151 | ret |= module_dwarf_finalize(hdr, sechdrs, me); | ||
152 | ret |= module_bug_finalize(hdr, sechdrs, me); | ||
153 | |||
154 | return ret; | ||
149 | } | 155 | } |
150 | 156 | ||
151 | void module_arch_cleanup(struct module *mod) | 157 | void module_arch_cleanup(struct module *mod) |
152 | { | 158 | { |
153 | module_bug_cleanup(mod); | 159 | module_bug_cleanup(mod); |
160 | module_dwarf_cleanup(mod); | ||
154 | } | 161 | } |
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c new file mode 100644 index 000000000000..df3ab5811074 --- /dev/null +++ b/arch/sh/kernel/return_address.c | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/return_address.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Matt Fleming | ||
5 | * Copyright (C) 2009 Paul Mundt | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <asm/dwarf.h> | ||
13 | |||
14 | #ifdef CONFIG_DWARF_UNWINDER | ||
15 | |||
16 | void *return_address(unsigned int depth) | ||
17 | { | ||
18 | struct dwarf_frame *frame; | ||
19 | unsigned long ra; | ||
20 | int i; | ||
21 | |||
22 | for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { | ||
23 | struct dwarf_frame *tmp; | ||
24 | |||
25 | tmp = dwarf_unwind_stack(ra, frame); | ||
26 | |||
27 | if (frame) | ||
28 | dwarf_free_frame(frame); | ||
29 | |||
30 | frame = tmp; | ||
31 | |||
32 | if (!frame || !frame->return_addr) | ||
33 | break; | ||
34 | |||
35 | ra = frame->return_addr; | ||
36 | } | ||
37 | |||
38 | /* Failed to unwind the stack to the specified depth. */ | ||
39 | WARN_ON(i != depth + 1); | ||
40 | |||
41 | if (frame) | ||
42 | dwarf_free_frame(frame); | ||
43 | |||
44 | return (void *)ra; | ||
45 | } | ||
46 | |||
47 | #else | ||
48 | |||
49 | void *return_address(unsigned int depth) | ||
50 | { | ||
51 | return NULL; | ||
52 | } | ||
53 | |||
54 | #endif | ||
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index 99b4fb553bf1..5a947a2567e4 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c | |||
@@ -453,6 +453,10 @@ void __init setup_arch(char **cmdline_p) | |||
453 | 453 | ||
454 | paging_init(); | 454 | paging_init(); |
455 | 455 | ||
456 | #ifdef CONFIG_PMB_ENABLE | ||
457 | pmb_init(); | ||
458 | #endif | ||
459 | |||
456 | #ifdef CONFIG_SMP | 460 | #ifdef CONFIG_SMP |
457 | plat_smp_setup(); | 461 | plat_smp_setup(); |
458 | #endif | 462 | #endif |
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 3db37425210d..12815ce01ecd 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c | |||
@@ -67,7 +67,8 @@ sys_sigsuspend(old_sigset_t mask, | |||
67 | 67 | ||
68 | current->state = TASK_INTERRUPTIBLE; | 68 | current->state = TASK_INTERRUPTIBLE; |
69 | schedule(); | 69 | schedule(); |
70 | set_thread_flag(TIF_RESTORE_SIGMASK); | 70 | set_restore_sigmask(); |
71 | |||
71 | return -ERESTARTNOHAND; | 72 | return -ERESTARTNOHAND; |
72 | } | 73 | } |
73 | 74 | ||
@@ -590,7 +591,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) | |||
590 | if (try_to_freeze()) | 591 | if (try_to_freeze()) |
591 | goto no_signal; | 592 | goto no_signal; |
592 | 593 | ||
593 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 594 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) |
594 | oldset = ¤t->saved_sigmask; | 595 | oldset = ¤t->saved_sigmask; |
595 | else | 596 | else |
596 | oldset = ¤t->blocked; | 597 | oldset = ¤t->blocked; |
@@ -602,12 +603,13 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0) | |||
602 | /* Whee! Actually deliver the signal. */ | 603 | /* Whee! Actually deliver the signal. */ |
603 | if (handle_signal(signr, &ka, &info, oldset, | 604 | if (handle_signal(signr, &ka, &info, oldset, |
604 | regs, save_r0) == 0) { | 605 | regs, save_r0) == 0) { |
605 | /* a signal was successfully delivered; the saved | 606 | /* |
607 | * A signal was successfully delivered; the saved | ||
606 | * sigmask will have been stored in the signal frame, | 608 | * sigmask will have been stored in the signal frame, |
607 | * and will be restored by sigreturn, so we can simply | 609 | * and will be restored by sigreturn, so we can simply |
608 | * clear the TIF_RESTORE_SIGMASK flag */ | 610 | * clear the TS_RESTORE_SIGMASK flag |
609 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 611 | */ |
610 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 612 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
611 | 613 | ||
612 | tracehook_signal_handler(signr, &info, &ka, regs, | 614 | tracehook_signal_handler(signr, &info, &ka, regs, |
613 | test_thread_flag(TIF_SINGLESTEP)); | 615 | test_thread_flag(TIF_SINGLESTEP)); |
@@ -631,10 +633,12 @@ no_signal: | |||
631 | } | 633 | } |
632 | } | 634 | } |
633 | 635 | ||
634 | /* if there's no signal to deliver, we just put the saved sigmask | 636 | /* |
635 | * back */ | 637 | * If there's no signal to deliver, we just put the saved sigmask |
636 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 638 | * back. |
637 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 639 | */ |
640 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
641 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
638 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 642 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
639 | } | 643 | } |
640 | } | 644 | } |
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 74793c80a57a..feb3dddd3192 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c | |||
@@ -101,7 +101,7 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
101 | if (try_to_freeze()) | 101 | if (try_to_freeze()) |
102 | goto no_signal; | 102 | goto no_signal; |
103 | 103 | ||
104 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 104 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) |
105 | oldset = ¤t->saved_sigmask; | 105 | oldset = ¤t->saved_sigmask; |
106 | else if (!oldset) | 106 | else if (!oldset) |
107 | oldset = ¤t->blocked; | 107 | oldset = ¤t->blocked; |
@@ -115,11 +115,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset) | |||
115 | /* | 115 | /* |
116 | * If a signal was successfully delivered, the | 116 | * If a signal was successfully delivered, the |
117 | * saved sigmask is in its frame, and we can | 117 | * saved sigmask is in its frame, and we can |
118 | * clear the TIF_RESTORE_SIGMASK flag. | 118 | * clear the TS_RESTORE_SIGMASK flag. |
119 | */ | 119 | */ |
120 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 120 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
121 | clear_thread_flag(TIF_RESTORE_SIGMASK); | ||
122 | |||
123 | tracehook_signal_handler(signr, &info, &ka, regs, 0); | 121 | tracehook_signal_handler(signr, &info, &ka, regs, 0); |
124 | return 1; | 122 | return 1; |
125 | } | 123 | } |
@@ -146,8 +144,8 @@ no_signal: | |||
146 | } | 144 | } |
147 | 145 | ||
148 | /* No signal to deliver -- put the saved sigmask back */ | 146 | /* No signal to deliver -- put the saved sigmask back */ |
149 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 147 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { |
150 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 148 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
151 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 149 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); |
152 | } | 150 | } |
153 | 151 | ||
@@ -176,6 +174,7 @@ sys_sigsuspend(old_sigset_t mask, | |||
176 | while (1) { | 174 | while (1) { |
177 | current->state = TASK_INTERRUPTIBLE; | 175 | current->state = TASK_INTERRUPTIBLE; |
178 | schedule(); | 176 | schedule(); |
177 | set_restore_sigmask(); | ||
179 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ | 178 | regs->pc += 4; /* because sys_sigreturn decrements the pc */ |
180 | if (do_signal(regs, &saveset)) { | 179 | if (do_signal(regs, &saveset)) { |
181 | /* pc now points at signal handler. Need to decrement | 180 | /* pc now points at signal handler. Need to decrement |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 160db1003cfb..983e0792d5f3 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -122,7 +122,9 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
122 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ | 122 | stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ |
123 | stack_start.start_kernel_fn = start_secondary; | 123 | stack_start.start_kernel_fn = start_secondary; |
124 | 124 | ||
125 | flush_cache_all(); | 125 | flush_icache_range((unsigned long)&stack_start, |
126 | (unsigned long)&stack_start + sizeof(stack_start)); | ||
127 | wmb(); | ||
126 | 128 | ||
127 | plat_start_cpu(cpu, (unsigned long)_stext); | 129 | plat_start_cpu(cpu, (unsigned long)_stext); |
128 | 130 | ||
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c index 0838942b7083..9b0b633b6c92 100644 --- a/arch/sh/kernel/topology.c +++ b/arch/sh/kernel/topology.c | |||
@@ -16,6 +16,32 @@ | |||
16 | 16 | ||
17 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | 17 | static DEFINE_PER_CPU(struct cpu, cpu_devices); |
18 | 18 | ||
19 | cpumask_t cpu_core_map[NR_CPUS]; | ||
20 | |||
21 | static cpumask_t cpu_coregroup_map(unsigned int cpu) | ||
22 | { | ||
23 | /* | ||
24 | * Presently all SH-X3 SMP cores are multi-cores, so just keep it | ||
25 | * simple until we have a method for determining topology.. | ||
26 | */ | ||
27 | return cpu_possible_map; | ||
28 | } | ||
29 | |||
30 | const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | ||
31 | { | ||
32 | return &cpu_core_map[cpu]; | ||
33 | } | ||
34 | |||
35 | int arch_update_cpu_topology(void) | ||
36 | { | ||
37 | unsigned int cpu; | ||
38 | |||
39 | for_each_possible_cpu(cpu) | ||
40 | cpu_core_map[cpu] = cpu_coregroup_map(cpu); | ||
41 | |||
42 | return 0; | ||
43 | } | ||
44 | |||
19 | static int __init topology_init(void) | 45 | static int __init topology_init(void) |
20 | { | 46 | { |
21 | int i, ret; | 47 | int i, ret; |
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c index a8396f36bd14..d52695df2702 100644 --- a/arch/sh/kernel/traps.c +++ b/arch/sh/kernel/traps.c | |||
@@ -95,9 +95,11 @@ BUILD_TRAP_HANDLER(bug) | |||
95 | 95 | ||
96 | BUILD_TRAP_HANDLER(nmi) | 96 | BUILD_TRAP_HANDLER(nmi) |
97 | { | 97 | { |
98 | unsigned int cpu = smp_processor_id(); | ||
98 | TRAP_HANDLER_DECL; | 99 | TRAP_HANDLER_DECL; |
99 | 100 | ||
100 | nmi_enter(); | 101 | nmi_enter(); |
102 | nmi_count(cpu)++; | ||
101 | 103 | ||
102 | switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { | 104 | switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { |
103 | case NOTIFY_OK: | 105 | case NOTIFY_OK: |
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 64dc1ad59801..ca02b72bf46f 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -83,7 +83,6 @@ config 32BIT | |||
83 | config PMB_ENABLE | 83 | config PMB_ENABLE |
84 | bool "Support 32-bit physical addressing through PMB" | 84 | bool "Support 32-bit physical addressing through PMB" |
85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 85 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) |
86 | select 32BIT | ||
87 | default y | 86 | default y |
88 | help | 87 | help |
89 | If you say Y here, physical addressing will be extended to | 88 | If you say Y here, physical addressing will be extended to |
@@ -98,7 +97,6 @@ choice | |||
98 | config PMB | 97 | config PMB |
99 | bool "PMB" | 98 | bool "PMB" |
100 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) | 99 | depends on MMU && EXPERIMENTAL && (CPU_SUBTYPE_SH7757 || CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785) |
101 | select 32BIT | ||
102 | help | 100 | help |
103 | If you say Y here, physical addressing will be extended to | 101 | If you say Y here, physical addressing will be extended to |
104 | 32-bits through the SH-4A PMB. If this is not set, legacy | 102 | 32-bits through the SH-4A PMB. If this is not set, legacy |
@@ -258,6 +256,15 @@ endchoice | |||
258 | 256 | ||
259 | source "mm/Kconfig" | 257 | source "mm/Kconfig" |
260 | 258 | ||
259 | config SCHED_MC | ||
260 | bool "Multi-core scheduler support" | ||
261 | depends on SMP | ||
262 | default y | ||
263 | help | ||
264 | Multi-core scheduler support improves the CPU scheduler's decision | ||
265 | making when dealing with multi-core CPU chips at a cost of slightly | ||
266 | increased overhead in some places. If unsure say N here. | ||
267 | |||
261 | endmenu | 268 | endmenu |
262 | 269 | ||
263 | menu "Cache configuration" | 270 | menu "Cache configuration" |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 3759bf853293..8a70535fa7ce 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -33,8 +33,7 @@ obj-y += $(tlb-y) | |||
33 | endif | 33 | endif |
34 | 34 | ||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB_ENABLE) += pmb.o |
37 | obj-$(CONFIG_PMB_FIXED) += pmb-fixed.o | ||
38 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
39 | 38 | ||
40 | # Special flags for fault_64.o. This puts restrictions on the number of | 39 | # Special flags for fault_64.o. This puts restrictions on the number of |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 519e2d16cd06..4a2fbf2864de 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * arch/sh/mm/cache-sh4.c | 2 | * arch/sh/mm/cache-sh4.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2001 - 2007 Paul Mundt | 5 | * Copyright (C) 2001 - 2009 Paul Mundt |
6 | * Copyright (C) 2003 Richard Curnow | 6 | * Copyright (C) 2003 Richard Curnow |
7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. | 7 | * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. |
8 | * | 8 | * |
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/highmem.h> | ||
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
19 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
20 | 22 | ||
@@ -23,21 +25,12 @@ | |||
23 | * flushing. Anything exceeding this will simply flush the dcache in its | 25 | * flushing. Anything exceeding this will simply flush the dcache in its |
24 | * entirety. | 26 | * entirety. |
25 | */ | 27 | */ |
26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | ||
27 | #define MAX_ICACHE_PAGES 32 | 28 | #define MAX_ICACHE_PAGES 32 |
28 | 29 | ||
29 | static void __flush_cache_one(unsigned long addr, unsigned long phys, | 30 | static void __flush_cache_one(unsigned long addr, unsigned long phys, |
30 | unsigned long exec_offset); | 31 | unsigned long exec_offset); |
31 | 32 | ||
32 | /* | 33 | /* |
33 | * This is initialised here to ensure that it is not placed in the BSS. If | ||
34 | * that were to happen, note that cache_init gets called before the BSS is | ||
35 | * cleared, so this would get nulled out which would be hopeless. | ||
36 | */ | ||
37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | ||
38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | ||
39 | |||
40 | /* | ||
41 | * Write back the range of D-cache, and purge the I-cache. | 34 | * Write back the range of D-cache, and purge the I-cache. |
42 | * | 35 | * |
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 36 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
@@ -94,15 +87,16 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys) | |||
94 | unsigned long flags, exec_offset = 0; | 87 | unsigned long flags, exec_offset = 0; |
95 | 88 | ||
96 | /* | 89 | /* |
97 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 90 | * All types of SH-4 require PC to be uncached to operate on the I-cache. |
98 | * Some types of SH-4 require PC to be in P2 to operate on the D-cache. | 91 | * Some types of SH-4 require PC to be uncached to operate on the D-cache. |
99 | */ | 92 | */ |
100 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || | 93 | if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) || |
101 | (start < CACHE_OC_ADDRESS_ARRAY)) | 94 | (start < CACHE_OC_ADDRESS_ARRAY)) |
102 | exec_offset = 0x20000000; | 95 | exec_offset = cached_to_uncached; |
103 | 96 | ||
104 | local_irq_save(flags); | 97 | local_irq_save(flags); |
105 | __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset); | 98 | __flush_cache_one(start | SH_CACHE_ASSOC, |
99 | virt_to_phys(phys), exec_offset); | ||
106 | local_irq_restore(flags); | 100 | local_irq_restore(flags); |
107 | } | 101 | } |
108 | 102 | ||
@@ -121,13 +115,13 @@ static void sh4_flush_dcache_page(void *arg) | |||
121 | else | 115 | else |
122 | #endif | 116 | #endif |
123 | { | 117 | { |
124 | unsigned long phys = PHYSADDR(page_address(page)); | 118 | unsigned long phys = page_to_phys(page); |
125 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 119 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
126 | int i, n; | 120 | int i, n; |
127 | 121 | ||
128 | /* Loop all the D-cache */ | 122 | /* Loop all the D-cache */ |
129 | n = boot_cpu_data.dcache.n_aliases; | 123 | n = boot_cpu_data.dcache.n_aliases; |
130 | for (i = 0; i < n; i++, addr += PAGE_SIZE) | 124 | for (i = 0; i <= n; i++, addr += PAGE_SIZE) |
131 | flush_cache_one(addr, phys); | 125 | flush_cache_one(addr, phys); |
132 | } | 126 | } |
133 | 127 | ||
@@ -156,10 +150,27 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
156 | local_irq_restore(flags); | 150 | local_irq_restore(flags); |
157 | } | 151 | } |
158 | 152 | ||
159 | static inline void flush_dcache_all(void) | 153 | static void flush_dcache_all(void) |
160 | { | 154 | { |
161 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 155 | unsigned long addr, end_addr, entry_offset; |
162 | wmb(); | 156 | |
157 | end_addr = CACHE_OC_ADDRESS_ARRAY + | ||
158 | (current_cpu_data.dcache.sets << | ||
159 | current_cpu_data.dcache.entry_shift) * | ||
160 | current_cpu_data.dcache.ways; | ||
161 | |||
162 | entry_offset = 1 << current_cpu_data.dcache.entry_shift; | ||
163 | |||
164 | for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) { | ||
165 | __raw_writel(0, addr); addr += entry_offset; | ||
166 | __raw_writel(0, addr); addr += entry_offset; | ||
167 | __raw_writel(0, addr); addr += entry_offset; | ||
168 | __raw_writel(0, addr); addr += entry_offset; | ||
169 | __raw_writel(0, addr); addr += entry_offset; | ||
170 | __raw_writel(0, addr); addr += entry_offset; | ||
171 | __raw_writel(0, addr); addr += entry_offset; | ||
172 | __raw_writel(0, addr); addr += entry_offset; | ||
173 | } | ||
163 | } | 174 | } |
164 | 175 | ||
165 | static void sh4_flush_cache_all(void *unused) | 176 | static void sh4_flush_cache_all(void *unused) |
@@ -168,89 +179,13 @@ static void sh4_flush_cache_all(void *unused) | |||
168 | flush_icache_all(); | 179 | flush_icache_all(); |
169 | } | 180 | } |
170 | 181 | ||
171 | static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, | ||
172 | unsigned long end) | ||
173 | { | ||
174 | unsigned long d = 0, p = start & PAGE_MASK; | ||
175 | unsigned long alias_mask = boot_cpu_data.dcache.alias_mask; | ||
176 | unsigned long n_aliases = boot_cpu_data.dcache.n_aliases; | ||
177 | unsigned long select_bit; | ||
178 | unsigned long all_aliases_mask; | ||
179 | unsigned long addr_offset; | ||
180 | pgd_t *dir; | ||
181 | pmd_t *pmd; | ||
182 | pud_t *pud; | ||
183 | pte_t *pte; | ||
184 | int i; | ||
185 | |||
186 | dir = pgd_offset(mm, p); | ||
187 | pud = pud_offset(dir, p); | ||
188 | pmd = pmd_offset(pud, p); | ||
189 | end = PAGE_ALIGN(end); | ||
190 | |||
191 | all_aliases_mask = (1 << n_aliases) - 1; | ||
192 | |||
193 | do { | ||
194 | if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) { | ||
195 | p &= PMD_MASK; | ||
196 | p += PMD_SIZE; | ||
197 | pmd++; | ||
198 | |||
199 | continue; | ||
200 | } | ||
201 | |||
202 | pte = pte_offset_kernel(pmd, p); | ||
203 | |||
204 | do { | ||
205 | unsigned long phys; | ||
206 | pte_t entry = *pte; | ||
207 | |||
208 | if (!(pte_val(entry) & _PAGE_PRESENT)) { | ||
209 | pte++; | ||
210 | p += PAGE_SIZE; | ||
211 | continue; | ||
212 | } | ||
213 | |||
214 | phys = pte_val(entry) & PTE_PHYS_MASK; | ||
215 | |||
216 | if ((p ^ phys) & alias_mask) { | ||
217 | d |= 1 << ((p & alias_mask) >> PAGE_SHIFT); | ||
218 | d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT); | ||
219 | |||
220 | if (d == all_aliases_mask) | ||
221 | goto loop_exit; | ||
222 | } | ||
223 | |||
224 | pte++; | ||
225 | p += PAGE_SIZE; | ||
226 | } while (p < end && ((unsigned long)pte & ~PAGE_MASK)); | ||
227 | pmd++; | ||
228 | } while (p < end); | ||
229 | |||
230 | loop_exit: | ||
231 | addr_offset = 0; | ||
232 | select_bit = 1; | ||
233 | |||
234 | for (i = 0; i < n_aliases; i++) { | ||
235 | if (d & select_bit) { | ||
236 | (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE); | ||
237 | wmb(); | ||
238 | } | ||
239 | |||
240 | select_bit <<= 1; | ||
241 | addr_offset += PAGE_SIZE; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | /* | 182 | /* |
246 | * Note : (RPC) since the caches are physically tagged, the only point | 183 | * Note : (RPC) since the caches are physically tagged, the only point |
247 | * of flush_cache_mm for SH-4 is to get rid of aliases from the | 184 | * of flush_cache_mm for SH-4 is to get rid of aliases from the |
248 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that | 185 | * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that |
249 | * lines can stay resident so long as the virtual address they were | 186 | * lines can stay resident so long as the virtual address they were |
250 | * accessed with (hence cache set) is in accord with the physical | 187 | * accessed with (hence cache set) is in accord with the physical |
251 | * address (i.e. tag). It's no different here. So I reckon we don't | 188 | * address (i.e. tag). It's no different here. |
252 | * need to flush the I-cache, since aliases don't matter for that. We | ||
253 | * should try that. | ||
254 | * | 189 | * |
255 | * Caller takes mm->mmap_sem. | 190 | * Caller takes mm->mmap_sem. |
256 | */ | 191 | */ |
@@ -261,33 +196,7 @@ static void sh4_flush_cache_mm(void *arg) | |||
261 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 196 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
262 | return; | 197 | return; |
263 | 198 | ||
264 | /* | 199 | flush_dcache_all(); |
265 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | ||
266 | * the cache is physically tagged, the data can just be left in there. | ||
267 | */ | ||
268 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
269 | return; | ||
270 | |||
271 | /* | ||
272 | * Don't bother groveling around the dcache for the VMA ranges | ||
273 | * if there are too many PTEs to make it worthwhile. | ||
274 | */ | ||
275 | if (mm->nr_ptes >= MAX_DCACHE_PAGES) | ||
276 | flush_dcache_all(); | ||
277 | else { | ||
278 | struct vm_area_struct *vma; | ||
279 | |||
280 | /* | ||
281 | * In this case there are reasonably sized ranges to flush, | ||
282 | * iterate through the VMA list and take care of any aliases. | ||
283 | */ | ||
284 | for (vma = mm->mmap; vma; vma = vma->vm_next) | ||
285 | __flush_cache_mm(mm, vma->vm_start, vma->vm_end); | ||
286 | } | ||
287 | |||
288 | /* Only touch the icache if one of the VMAs has VM_EXEC set. */ | ||
289 | if (mm->exec_vm) | ||
290 | flush_icache_all(); | ||
291 | } | 200 | } |
292 | 201 | ||
293 | /* | 202 | /* |
@@ -300,44 +209,63 @@ static void sh4_flush_cache_page(void *args) | |||
300 | { | 209 | { |
301 | struct flusher_data *data = args; | 210 | struct flusher_data *data = args; |
302 | struct vm_area_struct *vma; | 211 | struct vm_area_struct *vma; |
212 | struct page *page; | ||
303 | unsigned long address, pfn, phys; | 213 | unsigned long address, pfn, phys; |
304 | unsigned int alias_mask; | 214 | int map_coherent = 0; |
215 | pgd_t *pgd; | ||
216 | pud_t *pud; | ||
217 | pmd_t *pmd; | ||
218 | pte_t *pte; | ||
219 | void *vaddr; | ||
305 | 220 | ||
306 | vma = data->vma; | 221 | vma = data->vma; |
307 | address = data->addr1; | 222 | address = data->addr1 & PAGE_MASK; |
308 | pfn = data->addr2; | 223 | pfn = data->addr2; |
309 | phys = pfn << PAGE_SHIFT; | 224 | phys = pfn << PAGE_SHIFT; |
225 | page = pfn_to_page(pfn); | ||
310 | 226 | ||
311 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 227 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
312 | return; | 228 | return; |
313 | 229 | ||
314 | alias_mask = boot_cpu_data.dcache.alias_mask; | 230 | pgd = pgd_offset(vma->vm_mm, address); |
315 | 231 | pud = pud_offset(pgd, address); | |
316 | /* We only need to flush D-cache when we have alias */ | 232 | pmd = pmd_offset(pud, address); |
317 | if ((address^phys) & alias_mask) { | 233 | pte = pte_offset_kernel(pmd, address); |
318 | /* Loop 4K of the D-cache */ | 234 | |
319 | flush_cache_one( | 235 | /* If the page isn't present, there is nothing to do here. */ |
320 | CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), | 236 | if (!(pte_val(*pte) & _PAGE_PRESENT)) |
321 | phys); | 237 | return; |
322 | /* Loop another 4K of the D-cache */ | ||
323 | flush_cache_one( | ||
324 | CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask), | ||
325 | phys); | ||
326 | } | ||
327 | 238 | ||
328 | alias_mask = boot_cpu_data.icache.alias_mask; | 239 | if ((vma->vm_mm == current->active_mm)) |
329 | if (vma->vm_flags & VM_EXEC) { | 240 | vaddr = NULL; |
241 | else { | ||
330 | /* | 242 | /* |
331 | * Evict entries from the portion of the cache from which code | 243 | * Use kmap_coherent or kmap_atomic to do flushes for |
332 | * may have been executed at this address (virtual). There's | 244 | * another ASID than the current one. |
333 | * no need to evict from the portion corresponding to the | ||
334 | * physical address as for the D-cache, because we know the | ||
335 | * kernel has never executed the code through its identity | ||
336 | * translation. | ||
337 | */ | 245 | */ |
338 | flush_cache_one( | 246 | map_coherent = (current_cpu_data.dcache.n_aliases && |
339 | CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), | 247 | !test_bit(PG_dcache_dirty, &page->flags) && |
340 | phys); | 248 | page_mapped(page)); |
249 | if (map_coherent) | ||
250 | vaddr = kmap_coherent(page, address); | ||
251 | else | ||
252 | vaddr = kmap_atomic(page, KM_USER0); | ||
253 | |||
254 | address = (unsigned long)vaddr; | ||
255 | } | ||
256 | |||
257 | if (pages_do_alias(address, phys)) | ||
258 | flush_cache_one(CACHE_OC_ADDRESS_ARRAY | | ||
259 | (address & shm_align_mask), phys); | ||
260 | |||
261 | if (vma->vm_flags & VM_EXEC) | ||
262 | flush_icache_all(); | ||
263 | |||
264 | if (vaddr) { | ||
265 | if (map_coherent) | ||
266 | kunmap_coherent(vaddr); | ||
267 | else | ||
268 | kunmap_atomic(vaddr, KM_USER0); | ||
341 | } | 269 | } |
342 | } | 270 | } |
343 | 271 | ||
@@ -370,24 +298,10 @@ static void sh4_flush_cache_range(void *args) | |||
370 | if (boot_cpu_data.dcache.n_aliases == 0) | 298 | if (boot_cpu_data.dcache.n_aliases == 0) |
371 | return; | 299 | return; |
372 | 300 | ||
373 | /* | 301 | flush_dcache_all(); |
374 | * Don't bother with the lookup and alias check if we have a | ||
375 | * wide range to cover, just blow away the dcache in its | ||
376 | * entirety instead. -- PFM. | ||
377 | */ | ||
378 | if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES) | ||
379 | flush_dcache_all(); | ||
380 | else | ||
381 | __flush_cache_mm(vma->vm_mm, start, end); | ||
382 | 302 | ||
383 | if (vma->vm_flags & VM_EXEC) { | 303 | if (vma->vm_flags & VM_EXEC) |
384 | /* | ||
385 | * TODO: Is this required??? Need to look at how I-cache | ||
386 | * coherency is assured when new programs are loaded to see if | ||
387 | * this matters. | ||
388 | */ | ||
389 | flush_icache_all(); | 304 | flush_icache_all(); |
390 | } | ||
391 | } | 305 | } |
392 | 306 | ||
393 | /** | 307 | /** |
@@ -461,245 +375,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, | |||
461 | } while (--way_count != 0); | 375 | } while (--way_count != 0); |
462 | } | 376 | } |
463 | 377 | ||
464 | /* | ||
465 | * Break the 1, 2 and 4 way variants of this out into separate functions to | ||
466 | * avoid nearly all the overhead of having the conditional stuff in the function | ||
467 | * bodies (+ the 1 and 2 way cases avoid saving any registers too). | ||
468 | * | ||
469 | * We want to eliminate unnecessary bus transactions, so this code uses | ||
470 | * a non-obvious technique. | ||
471 | * | ||
472 | * Loop over a cache way sized block of, one cache line at a time. For each | ||
473 | * line, use movca.a to cause the current cache line contents to be written | ||
474 | * back, but without reading anything from main memory. However this has the | ||
475 | * side effect that the cache is now caching that memory location. So follow | ||
476 | * this with a cache invalidate to mark the cache line invalid. And do all | ||
477 | * this with interrupts disabled, to avoid the cache line being accidently | ||
478 | * evicted while it is holding garbage. | ||
479 | * | ||
480 | * This also breaks in a number of circumstances: | ||
481 | * - if there are modifications to the region of memory just above | ||
482 | * empty_zero_page (for example because a breakpoint has been placed | ||
483 | * there), then these can be lost. | ||
484 | * | ||
485 | * This is because the the memory address which the cache temporarily | ||
486 | * caches in the above description is empty_zero_page. So the | ||
487 | * movca.l hits the cache (it is assumed that it misses, or at least | ||
488 | * isn't dirty), modifies the line and then invalidates it, losing the | ||
489 | * required change. | ||
490 | * | ||
491 | * - If caches are disabled or configured in write-through mode, then | ||
492 | * the movca.l writes garbage directly into memory. | ||
493 | */ | ||
494 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
495 | unsigned long extent_per_way) | ||
496 | { | ||
497 | unsigned long addr; | ||
498 | int i; | ||
499 | |||
500 | addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask); | ||
501 | |||
502 | while (extent_per_way) { | ||
503 | for (i = 0; i < cpu_data->dcache.ways; i++) | ||
504 | __raw_writel(0, addr + cpu_data->dcache.way_incr * i); | ||
505 | |||
506 | addr += cpu_data->dcache.linesz; | ||
507 | extent_per_way -= cpu_data->dcache.linesz; | ||
508 | } | ||
509 | } | ||
510 | |||
511 | static void __flush_dcache_segment_1way(unsigned long start, | ||
512 | unsigned long extent_per_way) | ||
513 | { | ||
514 | unsigned long orig_sr, sr_with_bl; | ||
515 | unsigned long base_addr; | ||
516 | unsigned long way_incr, linesz, way_size; | ||
517 | struct cache_info *dcache; | ||
518 | register unsigned long a0, a0e; | ||
519 | |||
520 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
521 | sr_with_bl = orig_sr | (1<<28); | ||
522 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
523 | |||
524 | /* | ||
525 | * The previous code aligned base_addr to 16k, i.e. the way_size of all | ||
526 | * existing SH-4 D-caches. Whilst I don't see a need to have this | ||
527 | * aligned to any better than the cache line size (which it will be | ||
528 | * anyway by construction), let's align it to at least the way_size of | ||
529 | * any existing or conceivable SH-4 D-cache. -- RPC | ||
530 | */ | ||
531 | base_addr = ((base_addr >> 16) << 16); | ||
532 | base_addr |= start; | ||
533 | |||
534 | dcache = &boot_cpu_data.dcache; | ||
535 | linesz = dcache->linesz; | ||
536 | way_incr = dcache->way_incr; | ||
537 | way_size = dcache->way_size; | ||
538 | |||
539 | a0 = base_addr; | ||
540 | a0e = base_addr + extent_per_way; | ||
541 | do { | ||
542 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
543 | asm volatile("movca.l r0, @%0\n\t" | ||
544 | "ocbi @%0" : : "r" (a0)); | ||
545 | a0 += linesz; | ||
546 | asm volatile("movca.l r0, @%0\n\t" | ||
547 | "ocbi @%0" : : "r" (a0)); | ||
548 | a0 += linesz; | ||
549 | asm volatile("movca.l r0, @%0\n\t" | ||
550 | "ocbi @%0" : : "r" (a0)); | ||
551 | a0 += linesz; | ||
552 | asm volatile("movca.l r0, @%0\n\t" | ||
553 | "ocbi @%0" : : "r" (a0)); | ||
554 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
555 | a0 += linesz; | ||
556 | } while (a0 < a0e); | ||
557 | } | ||
558 | |||
559 | static void __flush_dcache_segment_2way(unsigned long start, | ||
560 | unsigned long extent_per_way) | ||
561 | { | ||
562 | unsigned long orig_sr, sr_with_bl; | ||
563 | unsigned long base_addr; | ||
564 | unsigned long way_incr, linesz, way_size; | ||
565 | struct cache_info *dcache; | ||
566 | register unsigned long a0, a1, a0e; | ||
567 | |||
568 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
569 | sr_with_bl = orig_sr | (1<<28); | ||
570 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
571 | |||
572 | /* See comment under 1-way above */ | ||
573 | base_addr = ((base_addr >> 16) << 16); | ||
574 | base_addr |= start; | ||
575 | |||
576 | dcache = &boot_cpu_data.dcache; | ||
577 | linesz = dcache->linesz; | ||
578 | way_incr = dcache->way_incr; | ||
579 | way_size = dcache->way_size; | ||
580 | |||
581 | a0 = base_addr; | ||
582 | a1 = a0 + way_incr; | ||
583 | a0e = base_addr + extent_per_way; | ||
584 | do { | ||
585 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
586 | asm volatile("movca.l r0, @%0\n\t" | ||
587 | "movca.l r0, @%1\n\t" | ||
588 | "ocbi @%0\n\t" | ||
589 | "ocbi @%1" : : | ||
590 | "r" (a0), "r" (a1)); | ||
591 | a0 += linesz; | ||
592 | a1 += linesz; | ||
593 | asm volatile("movca.l r0, @%0\n\t" | ||
594 | "movca.l r0, @%1\n\t" | ||
595 | "ocbi @%0\n\t" | ||
596 | "ocbi @%1" : : | ||
597 | "r" (a0), "r" (a1)); | ||
598 | a0 += linesz; | ||
599 | a1 += linesz; | ||
600 | asm volatile("movca.l r0, @%0\n\t" | ||
601 | "movca.l r0, @%1\n\t" | ||
602 | "ocbi @%0\n\t" | ||
603 | "ocbi @%1" : : | ||
604 | "r" (a0), "r" (a1)); | ||
605 | a0 += linesz; | ||
606 | a1 += linesz; | ||
607 | asm volatile("movca.l r0, @%0\n\t" | ||
608 | "movca.l r0, @%1\n\t" | ||
609 | "ocbi @%0\n\t" | ||
610 | "ocbi @%1" : : | ||
611 | "r" (a0), "r" (a1)); | ||
612 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
613 | a0 += linesz; | ||
614 | a1 += linesz; | ||
615 | } while (a0 < a0e); | ||
616 | } | ||
617 | |||
618 | static void __flush_dcache_segment_4way(unsigned long start, | ||
619 | unsigned long extent_per_way) | ||
620 | { | ||
621 | unsigned long orig_sr, sr_with_bl; | ||
622 | unsigned long base_addr; | ||
623 | unsigned long way_incr, linesz, way_size; | ||
624 | struct cache_info *dcache; | ||
625 | register unsigned long a0, a1, a2, a3, a0e; | ||
626 | |||
627 | asm volatile("stc sr, %0" : "=r" (orig_sr)); | ||
628 | sr_with_bl = orig_sr | (1<<28); | ||
629 | base_addr = ((unsigned long)&empty_zero_page[0]); | ||
630 | |||
631 | /* See comment under 1-way above */ | ||
632 | base_addr = ((base_addr >> 16) << 16); | ||
633 | base_addr |= start; | ||
634 | |||
635 | dcache = &boot_cpu_data.dcache; | ||
636 | linesz = dcache->linesz; | ||
637 | way_incr = dcache->way_incr; | ||
638 | way_size = dcache->way_size; | ||
639 | |||
640 | a0 = base_addr; | ||
641 | a1 = a0 + way_incr; | ||
642 | a2 = a1 + way_incr; | ||
643 | a3 = a2 + way_incr; | ||
644 | a0e = base_addr + extent_per_way; | ||
645 | do { | ||
646 | asm volatile("ldc %0, sr" : : "r" (sr_with_bl)); | ||
647 | asm volatile("movca.l r0, @%0\n\t" | ||
648 | "movca.l r0, @%1\n\t" | ||
649 | "movca.l r0, @%2\n\t" | ||
650 | "movca.l r0, @%3\n\t" | ||
651 | "ocbi @%0\n\t" | ||
652 | "ocbi @%1\n\t" | ||
653 | "ocbi @%2\n\t" | ||
654 | "ocbi @%3\n\t" : : | ||
655 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
656 | a0 += linesz; | ||
657 | a1 += linesz; | ||
658 | a2 += linesz; | ||
659 | a3 += linesz; | ||
660 | asm volatile("movca.l r0, @%0\n\t" | ||
661 | "movca.l r0, @%1\n\t" | ||
662 | "movca.l r0, @%2\n\t" | ||
663 | "movca.l r0, @%3\n\t" | ||
664 | "ocbi @%0\n\t" | ||
665 | "ocbi @%1\n\t" | ||
666 | "ocbi @%2\n\t" | ||
667 | "ocbi @%3\n\t" : : | ||
668 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
669 | a0 += linesz; | ||
670 | a1 += linesz; | ||
671 | a2 += linesz; | ||
672 | a3 += linesz; | ||
673 | asm volatile("movca.l r0, @%0\n\t" | ||
674 | "movca.l r0, @%1\n\t" | ||
675 | "movca.l r0, @%2\n\t" | ||
676 | "movca.l r0, @%3\n\t" | ||
677 | "ocbi @%0\n\t" | ||
678 | "ocbi @%1\n\t" | ||
679 | "ocbi @%2\n\t" | ||
680 | "ocbi @%3\n\t" : : | ||
681 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
682 | a0 += linesz; | ||
683 | a1 += linesz; | ||
684 | a2 += linesz; | ||
685 | a3 += linesz; | ||
686 | asm volatile("movca.l r0, @%0\n\t" | ||
687 | "movca.l r0, @%1\n\t" | ||
688 | "movca.l r0, @%2\n\t" | ||
689 | "movca.l r0, @%3\n\t" | ||
690 | "ocbi @%0\n\t" | ||
691 | "ocbi @%1\n\t" | ||
692 | "ocbi @%2\n\t" | ||
693 | "ocbi @%3\n\t" : : | ||
694 | "r" (a0), "r" (a1), "r" (a2), "r" (a3)); | ||
695 | asm volatile("ldc %0, sr" : : "r" (orig_sr)); | ||
696 | a0 += linesz; | ||
697 | a1 += linesz; | ||
698 | a2 += linesz; | ||
699 | a3 += linesz; | ||
700 | } while (a0 < a0e); | ||
701 | } | ||
702 | |||
703 | extern void __weak sh4__flush_region_init(void); | 378 | extern void __weak sh4__flush_region_init(void); |
704 | 379 | ||
705 | /* | 380 | /* |
@@ -707,32 +382,11 @@ extern void __weak sh4__flush_region_init(void); | |||
707 | */ | 382 | */ |
708 | void __init sh4_cache_init(void) | 383 | void __init sh4_cache_init(void) |
709 | { | 384 | { |
710 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
711 | |||
712 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | 385 | printk("PVR=%08x CVR=%08x PRR=%08x\n", |
713 | ctrl_inl(CCN_PVR), | 386 | ctrl_inl(CCN_PVR), |
714 | ctrl_inl(CCN_CVR), | 387 | ctrl_inl(CCN_CVR), |
715 | ctrl_inl(CCN_PRR)); | 388 | ctrl_inl(CCN_PRR)); |
716 | 389 | ||
717 | if (wt_enabled) | ||
718 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
719 | else { | ||
720 | switch (boot_cpu_data.dcache.ways) { | ||
721 | case 1: | ||
722 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
723 | break; | ||
724 | case 2: | ||
725 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
726 | break; | ||
727 | case 4: | ||
728 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
729 | break; | ||
730 | default: | ||
731 | panic("unknown number of cache ways\n"); | ||
732 | break; | ||
733 | } | ||
734 | } | ||
735 | |||
736 | local_flush_icache_range = sh4_flush_icache_range; | 390 | local_flush_icache_range = sh4_flush_icache_range; |
737 | local_flush_dcache_page = sh4_flush_dcache_page; | 391 | local_flush_dcache_page = sh4_flush_dcache_page; |
738 | local_flush_cache_all = sh4_flush_cache_all; | 392 | local_flush_cache_all = sh4_flush_cache_all; |
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 2601935eb589..f527fb70fce6 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -141,7 +141,7 @@ static void sh7705_flush_dcache_page(void *arg) | |||
141 | if (mapping && !mapping_mapped(mapping)) | 141 | if (mapping && !mapping_mapped(mapping)) |
142 | set_bit(PG_dcache_dirty, &page->flags); | 142 | set_bit(PG_dcache_dirty, &page->flags); |
143 | else | 143 | else |
144 | __flush_dcache_page(PHYSADDR(page_address(page))); | 144 | __flush_dcache_page(__pa(page_address(page))); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) | 147 | static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a2dc7f9ecc51..fc372a1d3132 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -164,11 +164,17 @@ void flush_cache_all(void) | |||
164 | 164 | ||
165 | void flush_cache_mm(struct mm_struct *mm) | 165 | void flush_cache_mm(struct mm_struct *mm) |
166 | { | 166 | { |
167 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
168 | return; | ||
169 | |||
167 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); | 170 | cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); |
168 | } | 171 | } |
169 | 172 | ||
170 | void flush_cache_dup_mm(struct mm_struct *mm) | 173 | void flush_cache_dup_mm(struct mm_struct *mm) |
171 | { | 174 | { |
175 | if (boot_cpu_data.dcache.n_aliases == 0) | ||
176 | return; | ||
177 | |||
172 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); | 178 | cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); |
173 | } | 179 | } |
174 | 180 | ||
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c index e098ec158ddb..9a8403d9344b 100644 --- a/arch/sh/mm/consistent.c +++ b/arch/sh/mm/consistent.c | |||
@@ -85,7 +85,7 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
85 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 85 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
86 | enum dma_data_direction direction) | 86 | enum dma_data_direction direction) |
87 | { | 87 | { |
88 | #ifdef CONFIG_CPU_SH5 | 88 | #if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB) |
89 | void *p1addr = vaddr; | 89 | void *p1addr = vaddr; |
90 | #else | 90 | #else |
91 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); | 91 | void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr); |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 8173e38afd38..c8af6c5fa586 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -323,4 +323,12 @@ int memory_add_physaddr_to_nid(u64 addr) | |||
323 | } | 323 | } |
324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | 324 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); |
325 | #endif | 325 | #endif |
326 | |||
326 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 327 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
328 | |||
329 | #ifdef CONFIG_PMB | ||
330 | int __in_29bit_mode(void) | ||
331 | { | ||
332 | return !(ctrl_inl(PMB_PASCR) & PASCR_SE); | ||
333 | } | ||
334 | #endif /* CONFIG_PMB */ | ||
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 16e01b5fed04..15d74ea42094 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c | |||
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
39 | pagefault_disable(); | 39 | pagefault_disable(); |
40 | 40 | ||
41 | idx = FIX_CMAP_END - | 41 | idx = FIX_CMAP_END - |
42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); | 42 | (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) + |
43 | (FIX_N_COLOURS * smp_processor_id())); | ||
44 | |||
43 | vaddr = __fix_to_virt(idx); | 45 | vaddr = __fix_to_virt(idx); |
44 | 46 | ||
45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); | 47 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); |
diff --git a/arch/sh/mm/pmb-fixed.c b/arch/sh/mm/pmb-fixed.c deleted file mode 100644 index 43c8eac4d8a1..000000000000 --- a/arch/sh/mm/pmb-fixed.c +++ /dev/null | |||
@@ -1,45 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/fixed_pmb.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Renesas Solutions Corp. | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <asm/mmu.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | |||
16 | static int __uses_jump_to_uncached fixed_pmb_init(void) | ||
17 | { | ||
18 | int i; | ||
19 | unsigned long addr, data; | ||
20 | |||
21 | jump_to_uncached(); | ||
22 | |||
23 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
24 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
25 | data = ctrl_inl(addr); | ||
26 | if (!(data & PMB_V)) | ||
27 | continue; | ||
28 | |||
29 | if (data & PMB_C) { | ||
30 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
31 | data |= PMB_WT; | ||
32 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
33 | data &= ~PMB_WT; | ||
34 | #else | ||
35 | data &= ~(PMB_C | PMB_WT); | ||
36 | #endif | ||
37 | } | ||
38 | ctrl_outl(data, addr); | ||
39 | } | ||
40 | |||
41 | back_to_cached(); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | arch_initcall(fixed_pmb_init); | ||
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index aade31102112..280f6a166035 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c | |||
@@ -35,29 +35,9 @@ | |||
35 | 35 | ||
36 | static void __pmb_unmap(struct pmb_entry *); | 36 | static void __pmb_unmap(struct pmb_entry *); |
37 | 37 | ||
38 | static struct kmem_cache *pmb_cache; | 38 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
39 | static unsigned long pmb_map; | 39 | static unsigned long pmb_map; |
40 | 40 | ||
41 | static struct pmb_entry pmb_init_map[] = { | ||
42 | /* vpn ppn flags (ub/sz/c/wt) */ | ||
43 | |||
44 | /* P1 Section Mappings */ | ||
45 | { 0x80000000, 0x00000000, PMB_SZ_64M | PMB_C, }, | ||
46 | { 0x84000000, 0x04000000, PMB_SZ_64M | PMB_C, }, | ||
47 | { 0x88000000, 0x08000000, PMB_SZ_128M | PMB_C, }, | ||
48 | { 0x90000000, 0x10000000, PMB_SZ_64M | PMB_C, }, | ||
49 | { 0x94000000, 0x14000000, PMB_SZ_64M | PMB_C, }, | ||
50 | { 0x98000000, 0x18000000, PMB_SZ_64M | PMB_C, }, | ||
51 | |||
52 | /* P2 Section Mappings */ | ||
53 | { 0xa0000000, 0x00000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
54 | { 0xa4000000, 0x04000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
55 | { 0xa8000000, 0x08000000, PMB_UB | PMB_SZ_128M | PMB_WT, }, | ||
56 | { 0xb0000000, 0x10000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
57 | { 0xb4000000, 0x14000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
58 | { 0xb8000000, 0x18000000, PMB_UB | PMB_SZ_64M | PMB_WT, }, | ||
59 | }; | ||
60 | |||
61 | static inline unsigned long mk_pmb_entry(unsigned int entry) | 41 | static inline unsigned long mk_pmb_entry(unsigned int entry) |
62 | { | 42 | { |
63 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | 43 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; |
@@ -73,81 +53,68 @@ static inline unsigned long mk_pmb_data(unsigned int entry) | |||
73 | return mk_pmb_entry(entry) | PMB_DATA; | 53 | return mk_pmb_entry(entry) | PMB_DATA; |
74 | } | 54 | } |
75 | 55 | ||
76 | static DEFINE_SPINLOCK(pmb_list_lock); | 56 | static int pmb_alloc_entry(void) |
77 | static struct pmb_entry *pmb_list; | ||
78 | |||
79 | static inline void pmb_list_add(struct pmb_entry *pmbe) | ||
80 | { | 57 | { |
81 | struct pmb_entry **p, *tmp; | 58 | unsigned int pos; |
82 | 59 | ||
83 | p = &pmb_list; | 60 | repeat: |
84 | while ((tmp = *p) != NULL) | 61 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); |
85 | p = &tmp->next; | ||
86 | 62 | ||
87 | pmbe->next = tmp; | 63 | if (unlikely(pos > NR_PMB_ENTRIES)) |
88 | *p = pmbe; | 64 | return -ENOSPC; |
89 | } | ||
90 | 65 | ||
91 | static inline void pmb_list_del(struct pmb_entry *pmbe) | 66 | if (test_and_set_bit(pos, &pmb_map)) |
92 | { | 67 | goto repeat; |
93 | struct pmb_entry **p, *tmp; | ||
94 | 68 | ||
95 | for (p = &pmb_list; (tmp = *p); p = &tmp->next) | 69 | return pos; |
96 | if (tmp == pmbe) { | ||
97 | *p = tmp->next; | ||
98 | return; | ||
99 | } | ||
100 | } | 70 | } |
101 | 71 | ||
102 | struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, | 72 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
103 | unsigned long flags) | 73 | unsigned long flags, int entry) |
104 | { | 74 | { |
105 | struct pmb_entry *pmbe; | 75 | struct pmb_entry *pmbe; |
76 | int pos; | ||
77 | |||
78 | if (entry == PMB_NO_ENTRY) { | ||
79 | pos = pmb_alloc_entry(); | ||
80 | if (pos < 0) | ||
81 | return ERR_PTR(pos); | ||
82 | } else { | ||
83 | if (test_bit(entry, &pmb_map)) | ||
84 | return ERR_PTR(-ENOSPC); | ||
85 | pos = entry; | ||
86 | } | ||
106 | 87 | ||
107 | pmbe = kmem_cache_alloc(pmb_cache, GFP_KERNEL); | 88 | pmbe = &pmb_entry_list[pos]; |
108 | if (!pmbe) | 89 | if (!pmbe) |
109 | return ERR_PTR(-ENOMEM); | 90 | return ERR_PTR(-ENOMEM); |
110 | 91 | ||
111 | pmbe->vpn = vpn; | 92 | pmbe->vpn = vpn; |
112 | pmbe->ppn = ppn; | 93 | pmbe->ppn = ppn; |
113 | pmbe->flags = flags; | 94 | pmbe->flags = flags; |
114 | 95 | pmbe->entry = pos; | |
115 | spin_lock_irq(&pmb_list_lock); | ||
116 | pmb_list_add(pmbe); | ||
117 | spin_unlock_irq(&pmb_list_lock); | ||
118 | 96 | ||
119 | return pmbe; | 97 | return pmbe; |
120 | } | 98 | } |
121 | 99 | ||
122 | void pmb_free(struct pmb_entry *pmbe) | 100 | static void pmb_free(struct pmb_entry *pmbe) |
123 | { | 101 | { |
124 | spin_lock_irq(&pmb_list_lock); | 102 | int pos = pmbe->entry; |
125 | pmb_list_del(pmbe); | ||
126 | spin_unlock_irq(&pmb_list_lock); | ||
127 | 103 | ||
128 | kmem_cache_free(pmb_cache, pmbe); | 104 | pmbe->vpn = 0; |
105 | pmbe->ppn = 0; | ||
106 | pmbe->flags = 0; | ||
107 | pmbe->entry = 0; | ||
108 | |||
109 | clear_bit(pos, &pmb_map); | ||
129 | } | 110 | } |
130 | 111 | ||
131 | /* | 112 | /* |
132 | * Must be in P2 for __set_pmb_entry() | 113 | * Must be in P2 for __set_pmb_entry() |
133 | */ | 114 | */ |
134 | int __set_pmb_entry(unsigned long vpn, unsigned long ppn, | 115 | static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, |
135 | unsigned long flags, int *entry) | 116 | unsigned long flags, int pos) |
136 | { | 117 | { |
137 | unsigned int pos = *entry; | ||
138 | |||
139 | if (unlikely(pos == PMB_NO_ENTRY)) | ||
140 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
141 | |||
142 | repeat: | ||
143 | if (unlikely(pos > NR_PMB_ENTRIES)) | ||
144 | return -ENOSPC; | ||
145 | |||
146 | if (test_and_set_bit(pos, &pmb_map)) { | ||
147 | pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES); | ||
148 | goto repeat; | ||
149 | } | ||
150 | |||
151 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); | 118 | ctrl_outl(vpn | PMB_V, mk_pmb_addr(pos)); |
152 | 119 | ||
153 | #ifdef CONFIG_CACHE_WRITETHROUGH | 120 | #ifdef CONFIG_CACHE_WRITETHROUGH |
@@ -161,35 +128,21 @@ repeat: | |||
161 | #endif | 128 | #endif |
162 | 129 | ||
163 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); | 130 | ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); |
164 | |||
165 | *entry = pos; | ||
166 | |||
167 | return 0; | ||
168 | } | 131 | } |
169 | 132 | ||
170 | int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) | 133 | static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) |
171 | { | 134 | { |
172 | int ret; | ||
173 | |||
174 | jump_to_uncached(); | 135 | jump_to_uncached(); |
175 | ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); | 136 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); |
176 | back_to_cached(); | 137 | back_to_cached(); |
177 | |||
178 | return ret; | ||
179 | } | 138 | } |
180 | 139 | ||
181 | void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | 140 | static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) |
182 | { | 141 | { |
183 | unsigned int entry = pmbe->entry; | 142 | unsigned int entry = pmbe->entry; |
184 | unsigned long addr; | 143 | unsigned long addr; |
185 | 144 | ||
186 | /* | 145 | if (unlikely(entry >= NR_PMB_ENTRIES)) |
187 | * Don't allow clearing of wired init entries, P1 or P2 access | ||
188 | * without a corresponding mapping in the PMB will lead to reset | ||
189 | * by the TLB. | ||
190 | */ | ||
191 | if (unlikely(entry < ARRAY_SIZE(pmb_init_map) || | ||
192 | entry >= NR_PMB_ENTRIES)) | ||
193 | return; | 146 | return; |
194 | 147 | ||
195 | jump_to_uncached(); | 148 | jump_to_uncached(); |
@@ -202,8 +155,6 @@ void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) | |||
202 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); | 155 | ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); |
203 | 156 | ||
204 | back_to_cached(); | 157 | back_to_cached(); |
205 | |||
206 | clear_bit(entry, &pmb_map); | ||
207 | } | 158 | } |
208 | 159 | ||
209 | 160 | ||
@@ -239,23 +190,17 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, | |||
239 | 190 | ||
240 | again: | 191 | again: |
241 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | 192 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { |
242 | int ret; | ||
243 | |||
244 | if (size < pmb_sizes[i].size) | 193 | if (size < pmb_sizes[i].size) |
245 | continue; | 194 | continue; |
246 | 195 | ||
247 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); | 196 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, |
197 | PMB_NO_ENTRY); | ||
248 | if (IS_ERR(pmbe)) { | 198 | if (IS_ERR(pmbe)) { |
249 | err = PTR_ERR(pmbe); | 199 | err = PTR_ERR(pmbe); |
250 | goto out; | 200 | goto out; |
251 | } | 201 | } |
252 | 202 | ||
253 | ret = set_pmb_entry(pmbe); | 203 | set_pmb_entry(pmbe); |
254 | if (ret != 0) { | ||
255 | pmb_free(pmbe); | ||
256 | err = -EBUSY; | ||
257 | goto out; | ||
258 | } | ||
259 | 204 | ||
260 | phys += pmb_sizes[i].size; | 205 | phys += pmb_sizes[i].size; |
261 | vaddr += pmb_sizes[i].size; | 206 | vaddr += pmb_sizes[i].size; |
@@ -292,11 +237,16 @@ out: | |||
292 | 237 | ||
293 | void pmb_unmap(unsigned long addr) | 238 | void pmb_unmap(unsigned long addr) |
294 | { | 239 | { |
295 | struct pmb_entry **p, *pmbe; | 240 | struct pmb_entry *pmbe = NULL; |
241 | int i; | ||
296 | 242 | ||
297 | for (p = &pmb_list; (pmbe = *p); p = &pmbe->next) | 243 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
298 | if (pmbe->vpn == addr) | 244 | if (test_bit(i, &pmb_map)) { |
299 | break; | 245 | pmbe = &pmb_entry_list[i]; |
246 | if (pmbe->vpn == addr) | ||
247 | break; | ||
248 | } | ||
249 | } | ||
300 | 250 | ||
301 | if (unlikely(!pmbe)) | 251 | if (unlikely(!pmbe)) |
302 | return; | 252 | return; |
@@ -306,13 +256,22 @@ void pmb_unmap(unsigned long addr) | |||
306 | 256 | ||
307 | static void __pmb_unmap(struct pmb_entry *pmbe) | 257 | static void __pmb_unmap(struct pmb_entry *pmbe) |
308 | { | 258 | { |
309 | WARN_ON(!test_bit(pmbe->entry, &pmb_map)); | 259 | BUG_ON(!test_bit(pmbe->entry, &pmb_map)); |
310 | 260 | ||
311 | do { | 261 | do { |
312 | struct pmb_entry *pmblink = pmbe; | 262 | struct pmb_entry *pmblink = pmbe; |
313 | 263 | ||
314 | if (pmbe->entry != PMB_NO_ENTRY) | 264 | /* |
315 | clear_pmb_entry(pmbe); | 265 | * We may be called before this pmb_entry has been |
266 | * entered into the PMB table via set_pmb_entry(), but | ||
267 | * that's OK because we've allocated a unique slot for | ||
268 | * this entry in pmb_alloc() (even if we haven't filled | ||
269 | * it yet). | ||
270 | * | ||
271 | * Therefore, calling clear_pmb_entry() is safe as no | ||
272 | * other mapping can be using that slot. | ||
273 | */ | ||
274 | clear_pmb_entry(pmbe); | ||
316 | 275 | ||
317 | pmbe = pmblink->link; | 276 | pmbe = pmblink->link; |
318 | 277 | ||
@@ -320,42 +279,34 @@ static void __pmb_unmap(struct pmb_entry *pmbe) | |||
320 | } while (pmbe); | 279 | } while (pmbe); |
321 | } | 280 | } |
322 | 281 | ||
323 | static void pmb_cache_ctor(void *pmb) | 282 | #ifdef CONFIG_PMB |
283 | int __uses_jump_to_uncached pmb_init(void) | ||
324 | { | 284 | { |
325 | struct pmb_entry *pmbe = pmb; | 285 | unsigned int i; |
326 | 286 | long size, ret; | |
327 | memset(pmb, 0, sizeof(struct pmb_entry)); | ||
328 | |||
329 | pmbe->entry = PMB_NO_ENTRY; | ||
330 | } | ||
331 | |||
332 | static int __uses_jump_to_uncached pmb_init(void) | ||
333 | { | ||
334 | unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); | ||
335 | unsigned int entry, i; | ||
336 | |||
337 | BUG_ON(unlikely(nr_entries >= NR_PMB_ENTRIES)); | ||
338 | |||
339 | pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, | ||
340 | SLAB_PANIC, pmb_cache_ctor); | ||
341 | 287 | ||
342 | jump_to_uncached(); | 288 | jump_to_uncached(); |
343 | 289 | ||
344 | /* | 290 | /* |
345 | * Ordering is important, P2 must be mapped in the PMB before we | 291 | * Insert PMB entries for the P1 and P2 areas so that, after |
346 | * can set PMB.SE, and P1 must be mapped before we jump back to | 292 | * we've switched the MMU to 32-bit mode, the semantics of P1 |
347 | * P1 space. | 293 | * and P2 are the same as in 29-bit mode, e.g. |
294 | * | ||
295 | * P1 - provides a cached window onto physical memory | ||
296 | * P2 - provides an uncached window onto physical memory | ||
348 | */ | 297 | */ |
349 | for (entry = 0; entry < nr_entries; entry++) { | 298 | size = __MEMORY_START + __MEMORY_SIZE; |
350 | struct pmb_entry *pmbe = pmb_init_map + entry; | ||
351 | 299 | ||
352 | __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &entry); | 300 | ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C); |
353 | } | 301 | BUG_ON(ret != size); |
302 | |||
303 | ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB); | ||
304 | BUG_ON(ret != size); | ||
354 | 305 | ||
355 | ctrl_outl(0, PMB_IRMCR); | 306 | ctrl_outl(0, PMB_IRMCR); |
356 | 307 | ||
357 | /* PMB.SE and UB[7] */ | 308 | /* PMB.SE and UB[7] */ |
358 | ctrl_outl((1 << 31) | (1 << 7), PMB_PASCR); | 309 | ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR); |
359 | 310 | ||
360 | /* Flush out the TLB */ | 311 | /* Flush out the TLB */ |
361 | i = ctrl_inl(MMUCR); | 312 | i = ctrl_inl(MMUCR); |
@@ -366,7 +317,53 @@ static int __uses_jump_to_uncached pmb_init(void) | |||
366 | 317 | ||
367 | return 0; | 318 | return 0; |
368 | } | 319 | } |
369 | arch_initcall(pmb_init); | 320 | #else |
321 | int __uses_jump_to_uncached pmb_init(void) | ||
322 | { | ||
323 | int i; | ||
324 | unsigned long addr, data; | ||
325 | |||
326 | jump_to_uncached(); | ||
327 | |||
328 | for (i = 0; i < PMB_ENTRY_MAX; i++) { | ||
329 | struct pmb_entry *pmbe; | ||
330 | unsigned long vpn, ppn, flags; | ||
331 | |||
332 | addr = PMB_DATA + (i << PMB_E_SHIFT); | ||
333 | data = ctrl_inl(addr); | ||
334 | if (!(data & PMB_V)) | ||
335 | continue; | ||
336 | |||
337 | if (data & PMB_C) { | ||
338 | #if defined(CONFIG_CACHE_WRITETHROUGH) | ||
339 | data |= PMB_WT; | ||
340 | #elif defined(CONFIG_CACHE_WRITEBACK) | ||
341 | data &= ~PMB_WT; | ||
342 | #else | ||
343 | data &= ~(PMB_C | PMB_WT); | ||
344 | #endif | ||
345 | } | ||
346 | ctrl_outl(data, addr); | ||
347 | |||
348 | ppn = data & PMB_PFN_MASK; | ||
349 | |||
350 | flags = data & (PMB_C | PMB_WT | PMB_UB); | ||
351 | flags |= data & PMB_SZ_MASK; | ||
352 | |||
353 | addr = PMB_ADDR + (i << PMB_E_SHIFT); | ||
354 | data = ctrl_inl(addr); | ||
355 | |||
356 | vpn = data & PMB_PFN_MASK; | ||
357 | |||
358 | pmbe = pmb_alloc(vpn, ppn, flags, i); | ||
359 | WARN_ON(IS_ERR(pmbe)); | ||
360 | } | ||
361 | |||
362 | back_to_cached(); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | #endif /* CONFIG_PMB */ | ||
370 | 367 | ||
371 | static int pmb_seq_show(struct seq_file *file, void *iter) | 368 | static int pmb_seq_show(struct seq_file *file, void *iter) |
372 | { | 369 | { |
@@ -434,15 +431,18 @@ postcore_initcall(pmb_debugfs_init); | |||
434 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) | 431 | static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) |
435 | { | 432 | { |
436 | static pm_message_t prev_state; | 433 | static pm_message_t prev_state; |
434 | int i; | ||
437 | 435 | ||
438 | /* Restore the PMB after a resume from hibernation */ | 436 | /* Restore the PMB after a resume from hibernation */ |
439 | if (state.event == PM_EVENT_ON && | 437 | if (state.event == PM_EVENT_ON && |
440 | prev_state.event == PM_EVENT_FREEZE) { | 438 | prev_state.event == PM_EVENT_FREEZE) { |
441 | struct pmb_entry *pmbe; | 439 | struct pmb_entry *pmbe; |
442 | spin_lock_irq(&pmb_list_lock); | 440 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
443 | for (pmbe = pmb_list; pmbe; pmbe = pmbe->next) | 441 | if (test_bit(i, &pmb_map)) { |
444 | set_pmb_entry(pmbe); | 442 | pmbe = &pmb_entry_list[i]; |
445 | spin_unlock_irq(&pmb_list_lock); | 443 | set_pmb_entry(pmbe); |
444 | } | ||
445 | } | ||
446 | } | 446 | } |
447 | prev_state = state; | 447 | prev_state = state; |
448 | return 0; | 448 | return 0; |