diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-27 18:14:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-27 18:14:26 -0400 |
commit | cea8f46c36c3f82860b038aa23a46e16757666ba (patch) | |
tree | e09dc37d2b6880d86dac09afbc0c686139d86df0 | |
parent | c1e7179a38919f02dd950801529176b72f5e5a8a (diff) | |
parent | 91b006def384d8f07f9f324ab211fefe2b085c90 (diff) |
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates from Russell King:
"First ARM push of this merge window, post me coming back from holiday.
This is what has been in linux-next for the last few weeks. Not much
to say which isn't described by the commit summaries."
* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (32 commits)
ARM: 7463/1: topology: Update cpu_power according to DT information
ARM: 7462/1: topology: factorize the update of sibling masks
ARM: 7461/1: topology: Add arch_scale_freq_power function
ARM: 7456/1: ptrace: provide separate functions for tracing syscall {entry,exit}
ARM: 7455/1: audit: move syscall auditing until after ptrace SIGTRAP handling
ARM: 7454/1: entry: don't bother with syscall tracing on ret_from_fork path
ARM: 7453/1: audit: only allow syscall auditing for pure EABI userspace
ARM: 7452/1: delay: allow timer-based delay implementation to be selected
ARM: 7451/1: arch timer: implement read_current_timer and get_cycles
ARM: 7450/1: dcache: select DCACHE_WORD_ACCESS for little-endian ARMv6+ CPUs
ARM: 7449/1: use generic strnlen_user and strncpy_from_user functions
ARM: 7448/1: perf: remove arm_perf_pmu_ids global enumeration
ARM: 7447/1: rwlocks: remove unused branch labels from trylock routines
ARM: 7446/1: spinlock: use ticket algorithm for ARMv6+ locking implementation
ARM: 7445/1: mm: update CONTEXTIDR register to contain PID of current process
ARM: 7444/1: kernel: add arch-timer C3STOP feature
ARM: 7460/1: remove asm/locks.h
ARM: 7439/1: head.S: simplify initial page table mapping
ARM: 7437/1: zImage: Allow DTB command line concatenation with ATAG_CMDLINE
ARM: 7436/1: Do not map the vectors page as write-through on UP systems
...
69 files changed, 964 insertions, 754 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9124ff75fe53..fbdd8533c05d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -45,6 +45,9 @@ config ARM | |||
45 | select GENERIC_SMP_IDLE_THREAD | 45 | select GENERIC_SMP_IDLE_THREAD |
46 | select KTIME_SCALAR | 46 | select KTIME_SCALAR |
47 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | 47 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP |
48 | select GENERIC_STRNCPY_FROM_USER | ||
49 | select GENERIC_STRNLEN_USER | ||
50 | select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN | ||
48 | help | 51 | help |
49 | The ARM series is a line of low-power-consumption RISC chip designs | 52 | The ARM series is a line of low-power-consumption RISC chip designs |
50 | licensed by ARM Ltd and targeted at embedded applications and | 53 | licensed by ARM Ltd and targeted at embedded applications and |
@@ -2004,6 +2007,25 @@ config ARM_ATAG_DTB_COMPAT | |||
2004 | bootloaders, this option allows zImage to extract the information | 2007 | bootloaders, this option allows zImage to extract the information |
2005 | from the ATAG list and store it at run time into the appended DTB. | 2008 | from the ATAG list and store it at run time into the appended DTB. |
2006 | 2009 | ||
2010 | choice | ||
2011 | prompt "Kernel command line type" if ARM_ATAG_DTB_COMPAT | ||
2012 | default ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER | ||
2013 | |||
2014 | config ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER | ||
2015 | bool "Use bootloader kernel arguments if available" | ||
2016 | help | ||
2017 | Uses the command-line options passed by the boot loader instead of | ||
2018 | the device tree bootargs property. If the boot loader doesn't provide | ||
2019 | any, the device tree bootargs property will be used. | ||
2020 | |||
2021 | config ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND | ||
2022 | bool "Extend with bootloader kernel arguments" | ||
2023 | help | ||
2024 | The command-line arguments provided by the boot loader will be | ||
2025 | appended to the the device tree bootargs property. | ||
2026 | |||
2027 | endchoice | ||
2028 | |||
2007 | config CMDLINE | 2029 | config CMDLINE |
2008 | string "Default kernel command string" | 2030 | string "Default kernel command string" |
2009 | default "" | 2031 | default "" |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index a03b5a7059e2..f15f82bf3a50 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -395,4 +395,13 @@ config ARM_KPROBES_TEST | |||
395 | help | 395 | help |
396 | Perform tests of kprobes API and instruction set simulation. | 396 | Perform tests of kprobes API and instruction set simulation. |
397 | 397 | ||
398 | config PID_IN_CONTEXTIDR | ||
399 | bool "Write the current PID to the CONTEXTIDR register" | ||
400 | depends on CPU_COPY_V6 | ||
401 | help | ||
402 | Enabling this option causes the kernel to write the current PID to | ||
403 | the PROCID field of the CONTEXTIDR register, at the expense of some | ||
404 | additional instructions during context switch. Say Y here only if you | ||
405 | are planning to use hardware trace tools with this kernel. | ||
406 | |||
398 | endmenu | 407 | endmenu |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 4d6d31115cf2..30eae87ead6d 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -10,6 +10,9 @@ | |||
10 | # | 10 | # |
11 | # Copyright (C) 1995-2001 by Russell King | 11 | # Copyright (C) 1995-2001 by Russell King |
12 | 12 | ||
13 | # Ensure linker flags are correct | ||
14 | LDFLAGS := | ||
15 | |||
13 | LDFLAGS_vmlinux :=-p --no-undefined -X | 16 | LDFLAGS_vmlinux :=-p --no-undefined -X |
14 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | 17 | ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) |
15 | LDFLAGS_vmlinux += --be8 | 18 | LDFLAGS_vmlinux += --be8 |
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index 797f04bedb47..aabc02a68482 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c | |||
@@ -1,6 +1,12 @@ | |||
1 | #include <asm/setup.h> | 1 | #include <asm/setup.h> |
2 | #include <libfdt.h> | 2 | #include <libfdt.h> |
3 | 3 | ||
4 | #if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND) | ||
5 | #define do_extend_cmdline 1 | ||
6 | #else | ||
7 | #define do_extend_cmdline 0 | ||
8 | #endif | ||
9 | |||
4 | static int node_offset(void *fdt, const char *node_path) | 10 | static int node_offset(void *fdt, const char *node_path) |
5 | { | 11 | { |
6 | int offset = fdt_path_offset(fdt, node_path); | 12 | int offset = fdt_path_offset(fdt, node_path); |
@@ -36,6 +42,48 @@ static int setprop_cell(void *fdt, const char *node_path, | |||
36 | return fdt_setprop_cell(fdt, offset, property, val); | 42 | return fdt_setprop_cell(fdt, offset, property, val); |
37 | } | 43 | } |
38 | 44 | ||
45 | static const void *getprop(const void *fdt, const char *node_path, | ||
46 | const char *property, int *len) | ||
47 | { | ||
48 | int offset = fdt_path_offset(fdt, node_path); | ||
49 | |||
50 | if (offset == -FDT_ERR_NOTFOUND) | ||
51 | return NULL; | ||
52 | |||
53 | return fdt_getprop(fdt, offset, property, len); | ||
54 | } | ||
55 | |||
56 | static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) | ||
57 | { | ||
58 | char cmdline[COMMAND_LINE_SIZE]; | ||
59 | const char *fdt_bootargs; | ||
60 | char *ptr = cmdline; | ||
61 | int len = 0; | ||
62 | |||
63 | /* copy the fdt command line into the buffer */ | ||
64 | fdt_bootargs = getprop(fdt, "/chosen", "bootargs", &len); | ||
65 | if (fdt_bootargs) | ||
66 | if (len < COMMAND_LINE_SIZE) { | ||
67 | memcpy(ptr, fdt_bootargs, len); | ||
68 | /* len is the length of the string | ||
69 | * including the NULL terminator */ | ||
70 | ptr += len - 1; | ||
71 | } | ||
72 | |||
73 | /* and append the ATAG_CMDLINE */ | ||
74 | if (fdt_cmdline) { | ||
75 | len = strlen(fdt_cmdline); | ||
76 | if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) { | ||
77 | *ptr++ = ' '; | ||
78 | memcpy(ptr, fdt_cmdline, len); | ||
79 | ptr += len; | ||
80 | } | ||
81 | } | ||
82 | *ptr = '\0'; | ||
83 | |||
84 | setprop_string(fdt, "/chosen", "bootargs", cmdline); | ||
85 | } | ||
86 | |||
39 | /* | 87 | /* |
40 | * Convert and fold provided ATAGs into the provided FDT. | 88 | * Convert and fold provided ATAGs into the provided FDT. |
41 | * | 89 | * |
@@ -72,8 +120,18 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
72 | 120 | ||
73 | for_each_tag(atag, atag_list) { | 121 | for_each_tag(atag, atag_list) { |
74 | if (atag->hdr.tag == ATAG_CMDLINE) { | 122 | if (atag->hdr.tag == ATAG_CMDLINE) { |
75 | setprop_string(fdt, "/chosen", "bootargs", | 123 | /* Append the ATAGS command line to the device tree |
76 | atag->u.cmdline.cmdline); | 124 | * command line. |
125 | * NB: This means that if the same parameter is set in | ||
126 | * the device tree and in the tags, the one from the | ||
127 | * tags will be chosen. | ||
128 | */ | ||
129 | if (do_extend_cmdline) | ||
130 | merge_fdt_bootargs(fdt, | ||
131 | atag->u.cmdline.cmdline); | ||
132 | else | ||
133 | setprop_string(fdt, "/chosen", "bootargs", | ||
134 | atag->u.cmdline.cmdline); | ||
77 | } else if (atag->hdr.tag == ATAG_MEM) { | 135 | } else if (atag->hdr.tag == ATAG_MEM) { |
78 | if (memcount >= sizeof(mem_reg_property)/4) | 136 | if (memcount >= sizeof(mem_reg_property)/4) |
79 | continue; | 137 | continue; |
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index ed2e95d46e29..62e75475e57e 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h | |||
@@ -1,7 +1,10 @@ | |||
1 | #ifndef __ASMARM_ARCH_TIMER_H | 1 | #ifndef __ASMARM_ARCH_TIMER_H |
2 | #define __ASMARM_ARCH_TIMER_H | 2 | #define __ASMARM_ARCH_TIMER_H |
3 | 3 | ||
4 | #include <asm/errno.h> | ||
5 | |||
4 | #ifdef CONFIG_ARM_ARCH_TIMER | 6 | #ifdef CONFIG_ARM_ARCH_TIMER |
7 | #define ARCH_HAS_READ_CURRENT_TIMER | ||
5 | int arch_timer_of_register(void); | 8 | int arch_timer_of_register(void); |
6 | int arch_timer_sched_clock_init(void); | 9 | int arch_timer_sched_clock_init(void); |
7 | #else | 10 | #else |
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index b2deda181549..dc6145120de3 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h | |||
@@ -6,9 +6,22 @@ | |||
6 | #ifndef __ASM_ARM_DELAY_H | 6 | #ifndef __ASM_ARM_DELAY_H |
7 | #define __ASM_ARM_DELAY_H | 7 | #define __ASM_ARM_DELAY_H |
8 | 8 | ||
9 | #include <asm/memory.h> | ||
9 | #include <asm/param.h> /* HZ */ | 10 | #include <asm/param.h> /* HZ */ |
10 | 11 | ||
11 | extern void __delay(int loops); | 12 | #define MAX_UDELAY_MS 2 |
13 | #define UDELAY_MULT ((UL(2199023) * HZ) >> 11) | ||
14 | #define UDELAY_SHIFT 30 | ||
15 | |||
16 | #ifndef __ASSEMBLY__ | ||
17 | |||
18 | extern struct arm_delay_ops { | ||
19 | void (*delay)(unsigned long); | ||
20 | void (*const_udelay)(unsigned long); | ||
21 | void (*udelay)(unsigned long); | ||
22 | } arm_delay_ops; | ||
23 | |||
24 | #define __delay(n) arm_delay_ops.delay(n) | ||
12 | 25 | ||
13 | /* | 26 | /* |
14 | * This function intentionally does not exist; if you see references to | 27 | * This function intentionally does not exist; if you see references to |
@@ -23,22 +36,27 @@ extern void __bad_udelay(void); | |||
23 | * division by multiplication: you don't have to worry about | 36 | * division by multiplication: you don't have to worry about |
24 | * loss of precision. | 37 | * loss of precision. |
25 | * | 38 | * |
26 | * Use only for very small delays ( < 1 msec). Should probably use a | 39 | * Use only for very small delays ( < 2 msec). Should probably use a |
27 | * lookup table, really, as the multiplications take much too long with | 40 | * lookup table, really, as the multiplications take much too long with |
28 | * short delays. This is a "reasonable" implementation, though (and the | 41 | * short delays. This is a "reasonable" implementation, though (and the |
29 | * first constant multiplications gets optimized away if the delay is | 42 | * first constant multiplications gets optimized away if the delay is |
30 | * a constant) | 43 | * a constant) |
31 | */ | 44 | */ |
32 | extern void __udelay(unsigned long usecs); | 45 | #define __udelay(n) arm_delay_ops.udelay(n) |
33 | extern void __const_udelay(unsigned long); | 46 | #define __const_udelay(n) arm_delay_ops.const_udelay(n) |
34 | |||
35 | #define MAX_UDELAY_MS 2 | ||
36 | 47 | ||
37 | #define udelay(n) \ | 48 | #define udelay(n) \ |
38 | (__builtin_constant_p(n) ? \ | 49 | (__builtin_constant_p(n) ? \ |
39 | ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ | 50 | ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \ |
40 | __const_udelay((n) * ((2199023U*HZ)>>11))) : \ | 51 | __const_udelay((n) * UDELAY_MULT)) : \ |
41 | __udelay(n)) | 52 | __udelay(n)) |
42 | 53 | ||
54 | /* Loop-based definitions for assembly code. */ | ||
55 | extern void __loop_delay(unsigned long loops); | ||
56 | extern void __loop_udelay(unsigned long usecs); | ||
57 | extern void __loop_const_udelay(unsigned long); | ||
58 | |||
59 | #endif /* __ASSEMBLY__ */ | ||
60 | |||
43 | #endif /* defined(_ARM_DELAY_H) */ | 61 | #endif /* defined(_ARM_DELAY_H) */ |
44 | 62 | ||
diff --git a/arch/arm/include/asm/locks.h b/arch/arm/include/asm/locks.h deleted file mode 100644 index ef4c897772d1..000000000000 --- a/arch/arm/include/asm/locks.h +++ /dev/null | |||
@@ -1,274 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/include/asm/locks.h | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Interrupt safe locking assembler. | ||
11 | */ | ||
12 | #ifndef __ASM_PROC_LOCKS_H | ||
13 | #define __ASM_PROC_LOCKS_H | ||
14 | |||
15 | #if __LINUX_ARM_ARCH__ >= 6 | ||
16 | |||
17 | #define __down_op(ptr,fail) \ | ||
18 | ({ \ | ||
19 | __asm__ __volatile__( \ | ||
20 | "@ down_op\n" \ | ||
21 | "1: ldrex lr, [%0]\n" \ | ||
22 | " sub lr, lr, %1\n" \ | ||
23 | " strex ip, lr, [%0]\n" \ | ||
24 | " teq ip, #0\n" \ | ||
25 | " bne 1b\n" \ | ||
26 | " teq lr, #0\n" \ | ||
27 | " movmi ip, %0\n" \ | ||
28 | " blmi " #fail \ | ||
29 | : \ | ||
30 | : "r" (ptr), "I" (1) \ | ||
31 | : "ip", "lr", "cc"); \ | ||
32 | smp_mb(); \ | ||
33 | }) | ||
34 | |||
35 | #define __down_op_ret(ptr,fail) \ | ||
36 | ({ \ | ||
37 | unsigned int ret; \ | ||
38 | __asm__ __volatile__( \ | ||
39 | "@ down_op_ret\n" \ | ||
40 | "1: ldrex lr, [%1]\n" \ | ||
41 | " sub lr, lr, %2\n" \ | ||
42 | " strex ip, lr, [%1]\n" \ | ||
43 | " teq ip, #0\n" \ | ||
44 | " bne 1b\n" \ | ||
45 | " teq lr, #0\n" \ | ||
46 | " movmi ip, %1\n" \ | ||
47 | " movpl ip, #0\n" \ | ||
48 | " blmi " #fail "\n" \ | ||
49 | " mov %0, ip" \ | ||
50 | : "=&r" (ret) \ | ||
51 | : "r" (ptr), "I" (1) \ | ||
52 | : "ip", "lr", "cc"); \ | ||
53 | smp_mb(); \ | ||
54 | ret; \ | ||
55 | }) | ||
56 | |||
57 | #define __up_op(ptr,wake) \ | ||
58 | ({ \ | ||
59 | smp_mb(); \ | ||
60 | __asm__ __volatile__( \ | ||
61 | "@ up_op\n" \ | ||
62 | "1: ldrex lr, [%0]\n" \ | ||
63 | " add lr, lr, %1\n" \ | ||
64 | " strex ip, lr, [%0]\n" \ | ||
65 | " teq ip, #0\n" \ | ||
66 | " bne 1b\n" \ | ||
67 | " cmp lr, #0\n" \ | ||
68 | " movle ip, %0\n" \ | ||
69 | " blle " #wake \ | ||
70 | : \ | ||
71 | : "r" (ptr), "I" (1) \ | ||
72 | : "ip", "lr", "cc"); \ | ||
73 | }) | ||
74 | |||
75 | /* | ||
76 | * The value 0x01000000 supports up to 128 processors and | ||
77 | * lots of processes. BIAS must be chosen such that sub'ing | ||
78 | * BIAS once per CPU will result in the long remaining | ||
79 | * negative. | ||
80 | */ | ||
81 | #define RW_LOCK_BIAS 0x01000000 | ||
82 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
83 | |||
84 | #define __down_op_write(ptr,fail) \ | ||
85 | ({ \ | ||
86 | __asm__ __volatile__( \ | ||
87 | "@ down_op_write\n" \ | ||
88 | "1: ldrex lr, [%0]\n" \ | ||
89 | " sub lr, lr, %1\n" \ | ||
90 | " strex ip, lr, [%0]\n" \ | ||
91 | " teq ip, #0\n" \ | ||
92 | " bne 1b\n" \ | ||
93 | " teq lr, #0\n" \ | ||
94 | " movne ip, %0\n" \ | ||
95 | " blne " #fail \ | ||
96 | : \ | ||
97 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
98 | : "ip", "lr", "cc"); \ | ||
99 | smp_mb(); \ | ||
100 | }) | ||
101 | |||
102 | #define __up_op_write(ptr,wake) \ | ||
103 | ({ \ | ||
104 | smp_mb(); \ | ||
105 | __asm__ __volatile__( \ | ||
106 | "@ up_op_write\n" \ | ||
107 | "1: ldrex lr, [%0]\n" \ | ||
108 | " adds lr, lr, %1\n" \ | ||
109 | " strex ip, lr, [%0]\n" \ | ||
110 | " teq ip, #0\n" \ | ||
111 | " bne 1b\n" \ | ||
112 | " movcs ip, %0\n" \ | ||
113 | " blcs " #wake \ | ||
114 | : \ | ||
115 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
116 | : "ip", "lr", "cc"); \ | ||
117 | }) | ||
118 | |||
119 | #define __down_op_read(ptr,fail) \ | ||
120 | __down_op(ptr, fail) | ||
121 | |||
122 | #define __up_op_read(ptr,wake) \ | ||
123 | ({ \ | ||
124 | smp_mb(); \ | ||
125 | __asm__ __volatile__( \ | ||
126 | "@ up_op_read\n" \ | ||
127 | "1: ldrex lr, [%0]\n" \ | ||
128 | " add lr, lr, %1\n" \ | ||
129 | " strex ip, lr, [%0]\n" \ | ||
130 | " teq ip, #0\n" \ | ||
131 | " bne 1b\n" \ | ||
132 | " teq lr, #0\n" \ | ||
133 | " moveq ip, %0\n" \ | ||
134 | " bleq " #wake \ | ||
135 | : \ | ||
136 | : "r" (ptr), "I" (1) \ | ||
137 | : "ip", "lr", "cc"); \ | ||
138 | }) | ||
139 | |||
140 | #else | ||
141 | |||
142 | #define __down_op(ptr,fail) \ | ||
143 | ({ \ | ||
144 | __asm__ __volatile__( \ | ||
145 | "@ down_op\n" \ | ||
146 | " mrs ip, cpsr\n" \ | ||
147 | " orr lr, ip, #128\n" \ | ||
148 | " msr cpsr_c, lr\n" \ | ||
149 | " ldr lr, [%0]\n" \ | ||
150 | " subs lr, lr, %1\n" \ | ||
151 | " str lr, [%0]\n" \ | ||
152 | " msr cpsr_c, ip\n" \ | ||
153 | " movmi ip, %0\n" \ | ||
154 | " blmi " #fail \ | ||
155 | : \ | ||
156 | : "r" (ptr), "I" (1) \ | ||
157 | : "ip", "lr", "cc"); \ | ||
158 | smp_mb(); \ | ||
159 | }) | ||
160 | |||
161 | #define __down_op_ret(ptr,fail) \ | ||
162 | ({ \ | ||
163 | unsigned int ret; \ | ||
164 | __asm__ __volatile__( \ | ||
165 | "@ down_op_ret\n" \ | ||
166 | " mrs ip, cpsr\n" \ | ||
167 | " orr lr, ip, #128\n" \ | ||
168 | " msr cpsr_c, lr\n" \ | ||
169 | " ldr lr, [%1]\n" \ | ||
170 | " subs lr, lr, %2\n" \ | ||
171 | " str lr, [%1]\n" \ | ||
172 | " msr cpsr_c, ip\n" \ | ||
173 | " movmi ip, %1\n" \ | ||
174 | " movpl ip, #0\n" \ | ||
175 | " blmi " #fail "\n" \ | ||
176 | " mov %0, ip" \ | ||
177 | : "=&r" (ret) \ | ||
178 | : "r" (ptr), "I" (1) \ | ||
179 | : "ip", "lr", "cc"); \ | ||
180 | smp_mb(); \ | ||
181 | ret; \ | ||
182 | }) | ||
183 | |||
184 | #define __up_op(ptr,wake) \ | ||
185 | ({ \ | ||
186 | smp_mb(); \ | ||
187 | __asm__ __volatile__( \ | ||
188 | "@ up_op\n" \ | ||
189 | " mrs ip, cpsr\n" \ | ||
190 | " orr lr, ip, #128\n" \ | ||
191 | " msr cpsr_c, lr\n" \ | ||
192 | " ldr lr, [%0]\n" \ | ||
193 | " adds lr, lr, %1\n" \ | ||
194 | " str lr, [%0]\n" \ | ||
195 | " msr cpsr_c, ip\n" \ | ||
196 | " movle ip, %0\n" \ | ||
197 | " blle " #wake \ | ||
198 | : \ | ||
199 | : "r" (ptr), "I" (1) \ | ||
200 | : "ip", "lr", "cc"); \ | ||
201 | }) | ||
202 | |||
203 | /* | ||
204 | * The value 0x01000000 supports up to 128 processors and | ||
205 | * lots of processes. BIAS must be chosen such that sub'ing | ||
206 | * BIAS once per CPU will result in the long remaining | ||
207 | * negative. | ||
208 | */ | ||
209 | #define RW_LOCK_BIAS 0x01000000 | ||
210 | #define RW_LOCK_BIAS_STR "0x01000000" | ||
211 | |||
212 | #define __down_op_write(ptr,fail) \ | ||
213 | ({ \ | ||
214 | __asm__ __volatile__( \ | ||
215 | "@ down_op_write\n" \ | ||
216 | " mrs ip, cpsr\n" \ | ||
217 | " orr lr, ip, #128\n" \ | ||
218 | " msr cpsr_c, lr\n" \ | ||
219 | " ldr lr, [%0]\n" \ | ||
220 | " subs lr, lr, %1\n" \ | ||
221 | " str lr, [%0]\n" \ | ||
222 | " msr cpsr_c, ip\n" \ | ||
223 | " movne ip, %0\n" \ | ||
224 | " blne " #fail \ | ||
225 | : \ | ||
226 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
227 | : "ip", "lr", "cc"); \ | ||
228 | smp_mb(); \ | ||
229 | }) | ||
230 | |||
231 | #define __up_op_write(ptr,wake) \ | ||
232 | ({ \ | ||
233 | __asm__ __volatile__( \ | ||
234 | "@ up_op_write\n" \ | ||
235 | " mrs ip, cpsr\n" \ | ||
236 | " orr lr, ip, #128\n" \ | ||
237 | " msr cpsr_c, lr\n" \ | ||
238 | " ldr lr, [%0]\n" \ | ||
239 | " adds lr, lr, %1\n" \ | ||
240 | " str lr, [%0]\n" \ | ||
241 | " msr cpsr_c, ip\n" \ | ||
242 | " movcs ip, %0\n" \ | ||
243 | " blcs " #wake \ | ||
244 | : \ | ||
245 | : "r" (ptr), "I" (RW_LOCK_BIAS) \ | ||
246 | : "ip", "lr", "cc"); \ | ||
247 | smp_mb(); \ | ||
248 | }) | ||
249 | |||
250 | #define __down_op_read(ptr,fail) \ | ||
251 | __down_op(ptr, fail) | ||
252 | |||
253 | #define __up_op_read(ptr,wake) \ | ||
254 | ({ \ | ||
255 | smp_mb(); \ | ||
256 | __asm__ __volatile__( \ | ||
257 | "@ up_op_read\n" \ | ||
258 | " mrs ip, cpsr\n" \ | ||
259 | " orr lr, ip, #128\n" \ | ||
260 | " msr cpsr_c, lr\n" \ | ||
261 | " ldr lr, [%0]\n" \ | ||
262 | " adds lr, lr, %1\n" \ | ||
263 | " str lr, [%0]\n" \ | ||
264 | " msr cpsr_c, ip\n" \ | ||
265 | " moveq ip, %0\n" \ | ||
266 | " bleq " #wake \ | ||
267 | : \ | ||
268 | : "r" (ptr), "I" (1) \ | ||
269 | : "ip", "lr", "cc"); \ | ||
270 | }) | ||
271 | |||
272 | #endif | ||
273 | |||
274 | #endif | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index fcb575747e5e..e965f1b560f1 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/const.h> | 17 | #include <linux/const.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <asm/sizes.h> | 19 | #include <linux/sizes.h> |
20 | 20 | ||
21 | #ifdef CONFIG_NEED_MACH_MEMORY_H | 21 | #ifdef CONFIG_NEED_MACH_MEMORY_H |
22 | #include <mach/memory.h> | 22 | #include <mach/memory.h> |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 00cbe10a50e3..e074948d8143 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -12,21 +12,6 @@ | |||
12 | #ifndef __ARM_PERF_EVENT_H__ | 12 | #ifndef __ARM_PERF_EVENT_H__ |
13 | #define __ARM_PERF_EVENT_H__ | 13 | #define __ARM_PERF_EVENT_H__ |
14 | 14 | ||
15 | /* ARM perf PMU IDs for use by internal perf clients. */ | 15 | /* Nothing to see here... */ |
16 | enum arm_perf_pmu_ids { | ||
17 | ARM_PERF_PMU_ID_XSCALE1 = 0, | ||
18 | ARM_PERF_PMU_ID_XSCALE2, | ||
19 | ARM_PERF_PMU_ID_V6, | ||
20 | ARM_PERF_PMU_ID_V6MP, | ||
21 | ARM_PERF_PMU_ID_CA8, | ||
22 | ARM_PERF_PMU_ID_CA9, | ||
23 | ARM_PERF_PMU_ID_CA5, | ||
24 | ARM_PERF_PMU_ID_CA15, | ||
25 | ARM_PERF_PMU_ID_CA7, | ||
26 | ARM_NUM_PMU_IDS, | ||
27 | }; | ||
28 | |||
29 | extern enum arm_perf_pmu_ids | ||
30 | armpmu_get_pmu_id(void); | ||
31 | 16 | ||
32 | #endif /* __ARM_PERF_EVENT_H__ */ | 17 | #endif /* __ARM_PERF_EVENT_H__ */ |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 90114faa9f3c..4432305f4a2a 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -103,10 +103,9 @@ struct pmu_hw_events { | |||
103 | 103 | ||
104 | struct arm_pmu { | 104 | struct arm_pmu { |
105 | struct pmu pmu; | 105 | struct pmu pmu; |
106 | enum arm_perf_pmu_ids id; | ||
107 | enum arm_pmu_type type; | 106 | enum arm_pmu_type type; |
108 | cpumask_t active_irqs; | 107 | cpumask_t active_irqs; |
109 | const char *name; | 108 | char *name; |
110 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 109 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
111 | void (*enable)(struct hw_perf_event *evt, int idx); | 110 | void (*enable)(struct hw_perf_event *evt, int idx); |
112 | void (*disable)(struct hw_perf_event *evt, int idx); | 111 | void (*disable)(struct hw_perf_event *evt, int idx); |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 65fa3c88095c..b4ca707d0a69 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -59,18 +59,13 @@ static inline void dsb_sev(void) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * ARMv6 Spin-locking. | 62 | * ARMv6 ticket-based spin-locking. |
63 | * | 63 | * |
64 | * We exclusively read the old value. If it is zero, we may have | 64 | * A memory barrier is required after we get a lock, and before we |
65 | * won the lock, so we try exclusively storing it. A memory barrier | 65 | * release it, because V6 CPUs are assumed to have weakly ordered |
66 | * is required after we get a lock, and before we release it, because | 66 | * memory. |
67 | * V6 CPUs are assumed to have weakly ordered memory. | ||
68 | * | ||
69 | * Unlocked value: 0 | ||
70 | * Locked value: 1 | ||
71 | */ | 67 | */ |
72 | 68 | ||
73 | #define arch_spin_is_locked(x) ((x)->lock != 0) | ||
74 | #define arch_spin_unlock_wait(lock) \ | 69 | #define arch_spin_unlock_wait(lock) \ |
75 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 70 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
76 | 71 | ||
@@ -79,31 +74,39 @@ static inline void dsb_sev(void) | |||
79 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 74 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
80 | { | 75 | { |
81 | unsigned long tmp; | 76 | unsigned long tmp; |
77 | u32 newval; | ||
78 | arch_spinlock_t lockval; | ||
82 | 79 | ||
83 | __asm__ __volatile__( | 80 | __asm__ __volatile__( |
84 | "1: ldrex %0, [%1]\n" | 81 | "1: ldrex %0, [%3]\n" |
85 | " teq %0, #0\n" | 82 | " add %1, %0, %4\n" |
86 | WFE("ne") | 83 | " strex %2, %1, [%3]\n" |
87 | " strexeq %0, %2, [%1]\n" | 84 | " teq %2, #0\n" |
88 | " teqeq %0, #0\n" | ||
89 | " bne 1b" | 85 | " bne 1b" |
90 | : "=&r" (tmp) | 86 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) |
91 | : "r" (&lock->lock), "r" (1) | 87 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
92 | : "cc"); | 88 | : "cc"); |
93 | 89 | ||
90 | while (lockval.tickets.next != lockval.tickets.owner) { | ||
91 | wfe(); | ||
92 | lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner); | ||
93 | } | ||
94 | |||
94 | smp_mb(); | 95 | smp_mb(); |
95 | } | 96 | } |
96 | 97 | ||
97 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
98 | { | 99 | { |
99 | unsigned long tmp; | 100 | unsigned long tmp; |
101 | u32 slock; | ||
100 | 102 | ||
101 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
102 | " ldrex %0, [%1]\n" | 104 | " ldrex %0, [%2]\n" |
103 | " teq %0, #0\n" | 105 | " subs %1, %0, %0, ror #16\n" |
104 | " strexeq %0, %2, [%1]" | 106 | " addeq %0, %0, %3\n" |
105 | : "=&r" (tmp) | 107 | " strexeq %1, %0, [%2]" |
106 | : "r" (&lock->lock), "r" (1) | 108 | : "=&r" (slock), "=&r" (tmp) |
109 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | ||
107 | : "cc"); | 110 | : "cc"); |
108 | 111 | ||
109 | if (tmp == 0) { | 112 | if (tmp == 0) { |
@@ -116,17 +119,38 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
116 | 119 | ||
117 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 120 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
118 | { | 121 | { |
122 | unsigned long tmp; | ||
123 | u32 slock; | ||
124 | |||
119 | smp_mb(); | 125 | smp_mb(); |
120 | 126 | ||
121 | __asm__ __volatile__( | 127 | __asm__ __volatile__( |
122 | " str %1, [%0]\n" | 128 | " mov %1, #1\n" |
123 | : | 129 | "1: ldrex %0, [%2]\n" |
124 | : "r" (&lock->lock), "r" (0) | 130 | " uadd16 %0, %0, %1\n" |
131 | " strex %1, %0, [%2]\n" | ||
132 | " teq %1, #0\n" | ||
133 | " bne 1b" | ||
134 | : "=&r" (slock), "=&r" (tmp) | ||
135 | : "r" (&lock->slock) | ||
125 | : "cc"); | 136 | : "cc"); |
126 | 137 | ||
127 | dsb_sev(); | 138 | dsb_sev(); |
128 | } | 139 | } |
129 | 140 | ||
141 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
142 | { | ||
143 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
144 | return tickets.owner != tickets.next; | ||
145 | } | ||
146 | |||
147 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | ||
148 | { | ||
149 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | ||
150 | return (tickets.next - tickets.owner) > 1; | ||
151 | } | ||
152 | #define arch_spin_is_contended arch_spin_is_contended | ||
153 | |||
130 | /* | 154 | /* |
131 | * RWLOCKS | 155 | * RWLOCKS |
132 | * | 156 | * |
@@ -158,7 +182,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
158 | unsigned long tmp; | 182 | unsigned long tmp; |
159 | 183 | ||
160 | __asm__ __volatile__( | 184 | __asm__ __volatile__( |
161 | "1: ldrex %0, [%1]\n" | 185 | " ldrex %0, [%1]\n" |
162 | " teq %0, #0\n" | 186 | " teq %0, #0\n" |
163 | " strexeq %0, %2, [%1]" | 187 | " strexeq %0, %2, [%1]" |
164 | : "=&r" (tmp) | 188 | : "=&r" (tmp) |
@@ -244,7 +268,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
244 | unsigned long tmp, tmp2 = 1; | 268 | unsigned long tmp, tmp2 = 1; |
245 | 269 | ||
246 | __asm__ __volatile__( | 270 | __asm__ __volatile__( |
247 | "1: ldrex %0, [%2]\n" | 271 | " ldrex %0, [%2]\n" |
248 | " adds %0, %0, #1\n" | 272 | " adds %0, %0, #1\n" |
249 | " strexpl %1, %0, [%2]\n" | 273 | " strexpl %1, %0, [%2]\n" |
250 | : "=&r" (tmp), "+r" (tmp2) | 274 | : "=&r" (tmp), "+r" (tmp2) |
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index d14d197ae04a..b262d2f8b478 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h | |||
@@ -5,11 +5,24 @@ | |||
5 | # error "please don't include this file directly" | 5 | # error "please don't include this file directly" |
6 | #endif | 6 | #endif |
7 | 7 | ||
8 | #define TICKET_SHIFT 16 | ||
9 | |||
8 | typedef struct { | 10 | typedef struct { |
9 | volatile unsigned int lock; | 11 | union { |
12 | u32 slock; | ||
13 | struct __raw_tickets { | ||
14 | #ifdef __ARMEB__ | ||
15 | u16 next; | ||
16 | u16 owner; | ||
17 | #else | ||
18 | u16 owner; | ||
19 | u16 next; | ||
20 | #endif | ||
21 | } tickets; | ||
22 | }; | ||
10 | } arch_spinlock_t; | 23 | } arch_spinlock_t; |
11 | 24 | ||
12 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | 25 | #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } |
13 | 26 | ||
14 | typedef struct { | 27 | typedef struct { |
15 | volatile unsigned int lock; | 28 | volatile unsigned int lock; |
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 3be8de3adaba..ce119442277c 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h | |||
@@ -12,13 +12,15 @@ | |||
12 | #ifndef _ASMARM_TIMEX_H | 12 | #ifndef _ASMARM_TIMEX_H |
13 | #define _ASMARM_TIMEX_H | 13 | #define _ASMARM_TIMEX_H |
14 | 14 | ||
15 | #include <asm/arch_timer.h> | ||
15 | #include <mach/timex.h> | 16 | #include <mach/timex.h> |
16 | 17 | ||
17 | typedef unsigned long cycles_t; | 18 | typedef unsigned long cycles_t; |
18 | 19 | ||
19 | static inline cycles_t get_cycles (void) | 20 | #ifdef ARCH_HAS_READ_CURRENT_TIMER |
20 | { | 21 | #define get_cycles() ({ cycles_t c; read_current_timer(&c) ? 0 : c; }) |
21 | return 0; | 22 | #else |
22 | } | 23 | #define get_cycles() (0) |
24 | #endif | ||
23 | 25 | ||
24 | #endif | 26 | #endif |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 71f6536d17ac..479a6352e0b5 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
@@ -189,6 +189,9 @@ static inline void set_fs(mm_segment_t fs) | |||
189 | 189 | ||
190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | 190 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) |
191 | 191 | ||
192 | #define user_addr_max() \ | ||
193 | (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) | ||
194 | |||
192 | /* | 195 | /* |
193 | * The "__xxx" versions of the user access functions do not verify the | 196 | * The "__xxx" versions of the user access functions do not verify the |
194 | * address space - it must have been done previously with a separate | 197 | * address space - it must have been done previously with a separate |
@@ -398,9 +401,6 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l | |||
398 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) | 401 | #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) |
399 | #endif | 402 | #endif |
400 | 403 | ||
401 | extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); | ||
402 | extern unsigned long __must_check __strnlen_user(const char __user *s, long n); | ||
403 | |||
404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) | 404 | static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) |
405 | { | 405 | { |
406 | if (access_ok(VERIFY_READ, from, n)) | 406 | if (access_ok(VERIFY_READ, from, n)) |
@@ -427,24 +427,9 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo | |||
427 | return n; | 427 | return n; |
428 | } | 428 | } |
429 | 429 | ||
430 | static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) | 430 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
431 | { | ||
432 | long res = -EFAULT; | ||
433 | if (access_ok(VERIFY_READ, src, 1)) | ||
434 | res = __strncpy_from_user(dst, src, count); | ||
435 | return res; | ||
436 | } | ||
437 | |||
438 | #define strlen_user(s) strnlen_user(s, ~0UL >> 1) | ||
439 | 431 | ||
440 | static inline long __must_check strnlen_user(const char __user *s, long n) | 432 | extern __must_check long strlen_user(const char __user *str); |
441 | { | 433 | extern __must_check long strnlen_user(const char __user *str, long n); |
442 | unsigned long res = 0; | ||
443 | |||
444 | if (__addr_ok(s)) | ||
445 | res = __strnlen_user(s, n); | ||
446 | |||
447 | return res; | ||
448 | } | ||
449 | 434 | ||
450 | #endif /* _ASMARM_UACCESS_H */ | 435 | #endif /* _ASMARM_UACCESS_H */ |
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..4d52f92967a6 --- /dev/null +++ b/arch/arm/include/asm/word-at-a-time.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #ifndef __ASM_ARM_WORD_AT_A_TIME_H | ||
2 | #define __ASM_ARM_WORD_AT_A_TIME_H | ||
3 | |||
4 | #ifndef __ARMEB__ | ||
5 | |||
6 | /* | ||
7 | * Little-endian word-at-a-time zero byte handling. | ||
8 | * Heavily based on the x86 algorithm. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | |||
12 | struct word_at_a_time { | ||
13 | const unsigned long one_bits, high_bits; | ||
14 | }; | ||
15 | |||
16 | #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } | ||
17 | |||
18 | static inline unsigned long has_zero(unsigned long a, unsigned long *bits, | ||
19 | const struct word_at_a_time *c) | ||
20 | { | ||
21 | unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; | ||
22 | *bits = mask; | ||
23 | return mask; | ||
24 | } | ||
25 | |||
26 | #define prep_zero_mask(a, bits, c) (bits) | ||
27 | |||
28 | static inline unsigned long create_zero_mask(unsigned long bits) | ||
29 | { | ||
30 | bits = (bits - 1) & ~bits; | ||
31 | return bits >> 7; | ||
32 | } | ||
33 | |||
34 | static inline unsigned long find_zero(unsigned long mask) | ||
35 | { | ||
36 | unsigned long ret; | ||
37 | |||
38 | #if __LINUX_ARM_ARCH__ >= 5 | ||
39 | /* We have clz available. */ | ||
40 | ret = fls(mask) >> 3; | ||
41 | #else | ||
42 | /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ | ||
43 | ret = (0x0ff0001 + mask) >> 23; | ||
44 | /* Fix the 1 for 00 case */ | ||
45 | ret &= mask; | ||
46 | #endif | ||
47 | |||
48 | return ret; | ||
49 | } | ||
50 | |||
51 | #ifdef CONFIG_DCACHE_WORD_ACCESS | ||
52 | |||
53 | #define zero_bytemask(mask) (mask) | ||
54 | |||
55 | /* | ||
56 | * Load an unaligned word from kernel space. | ||
57 | * | ||
58 | * In the (very unlikely) case of the word being a page-crosser | ||
59 | * and the next page not being mapped, take the exception and | ||
60 | * return zeroes in the non-existing part. | ||
61 | */ | ||
62 | static inline unsigned long load_unaligned_zeropad(const void *addr) | ||
63 | { | ||
64 | unsigned long ret, offset; | ||
65 | |||
66 | /* Load word from unaligned pointer addr */ | ||
67 | asm( | ||
68 | "1: ldr %0, [%2]\n" | ||
69 | "2:\n" | ||
70 | " .pushsection .fixup,\"ax\"\n" | ||
71 | " .align 2\n" | ||
72 | "3: and %1, %2, #0x3\n" | ||
73 | " bic %2, %2, #0x3\n" | ||
74 | " ldr %0, [%2]\n" | ||
75 | " lsl %1, %1, #0x3\n" | ||
76 | " lsr %0, %0, %1\n" | ||
77 | " b 2b\n" | ||
78 | " .popsection\n" | ||
79 | " .pushsection __ex_table,\"a\"\n" | ||
80 | " .align 3\n" | ||
81 | " .long 1b, 3b\n" | ||
82 | " .popsection" | ||
83 | : "=&r" (ret), "=&r" (offset) | ||
84 | : "r" (addr), "Qo" (*(unsigned long *)addr)); | ||
85 | |||
86 | return ret; | ||
87 | } | ||
88 | |||
89 | |||
90 | #endif /* DCACHE_WORD_ACCESS */ | ||
91 | |||
92 | #else /* __ARMEB__ */ | ||
93 | #include <asm-generic/word-at-a-time.h> | ||
94 | #endif | ||
95 | |||
96 | #endif /* __ASM_ARM_WORD_AT_A_TIME_H */ | ||
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index dd58035621f7..cf258807160d 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c | |||
@@ -32,6 +32,8 @@ static int arch_timer_ppi2; | |||
32 | 32 | ||
33 | static struct clock_event_device __percpu **arch_timer_evt; | 33 | static struct clock_event_device __percpu **arch_timer_evt; |
34 | 34 | ||
35 | extern void init_current_timer_delay(unsigned long freq); | ||
36 | |||
35 | /* | 37 | /* |
36 | * Architected system timer support. | 38 | * Architected system timer support. |
37 | */ | 39 | */ |
@@ -137,7 +139,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk) | |||
137 | /* Be safe... */ | 139 | /* Be safe... */ |
138 | arch_timer_disable(); | 140 | arch_timer_disable(); |
139 | 141 | ||
140 | clk->features = CLOCK_EVT_FEAT_ONESHOT; | 142 | clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; |
141 | clk->name = "arch_sys_timer"; | 143 | clk->name = "arch_sys_timer"; |
142 | clk->rating = 450; | 144 | clk->rating = 450; |
143 | clk->set_mode = arch_timer_set_mode; | 145 | clk->set_mode = arch_timer_set_mode; |
@@ -223,6 +225,14 @@ static cycle_t arch_counter_read(struct clocksource *cs) | |||
223 | return arch_counter_get_cntpct(); | 225 | return arch_counter_get_cntpct(); |
224 | } | 226 | } |
225 | 227 | ||
228 | int read_current_timer(unsigned long *timer_val) | ||
229 | { | ||
230 | if (!arch_timer_rate) | ||
231 | return -ENXIO; | ||
232 | *timer_val = arch_counter_get_cntpct(); | ||
233 | return 0; | ||
234 | } | ||
235 | |||
226 | static struct clocksource clocksource_counter = { | 236 | static struct clocksource clocksource_counter = { |
227 | .name = "arch_sys_counter", | 237 | .name = "arch_sys_counter", |
228 | .rating = 400, | 238 | .rating = 400, |
@@ -296,6 +306,7 @@ static int __init arch_timer_register(void) | |||
296 | if (err) | 306 | if (err) |
297 | goto out_free_irq; | 307 | goto out_free_irq; |
298 | 308 | ||
309 | init_current_timer_delay(arch_timer_rate); | ||
299 | return 0; | 310 | return 0; |
300 | 311 | ||
301 | out_free_irq: | 312 | out_free_irq: |
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index b57c75e0b01f..60d3b738d420 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c | |||
@@ -49,8 +49,7 @@ extern void __aeabi_ulcmp(void); | |||
49 | extern void fpundefinstr(void); | 49 | extern void fpundefinstr(void); |
50 | 50 | ||
51 | /* platform dependent support */ | 51 | /* platform dependent support */ |
52 | EXPORT_SYMBOL(__udelay); | 52 | EXPORT_SYMBOL(arm_delay_ops); |
53 | EXPORT_SYMBOL(__const_udelay); | ||
54 | 53 | ||
55 | /* networking */ | 54 | /* networking */ |
56 | EXPORT_SYMBOL(csum_partial); | 55 | EXPORT_SYMBOL(csum_partial); |
@@ -87,10 +86,6 @@ EXPORT_SYMBOL(memmove); | |||
87 | EXPORT_SYMBOL(memchr); | 86 | EXPORT_SYMBOL(memchr); |
88 | EXPORT_SYMBOL(__memzero); | 87 | EXPORT_SYMBOL(__memzero); |
89 | 88 | ||
90 | /* user mem (segment) */ | ||
91 | EXPORT_SYMBOL(__strnlen_user); | ||
92 | EXPORT_SYMBOL(__strncpy_from_user); | ||
93 | |||
94 | #ifdef CONFIG_MMU | 89 | #ifdef CONFIG_MMU |
95 | EXPORT_SYMBOL(copy_page); | 90 | EXPORT_SYMBOL(copy_page); |
96 | 91 | ||
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 4afed88d250a..49d9f9305247 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -95,13 +95,7 @@ ENDPROC(ret_to_user) | |||
95 | ENTRY(ret_from_fork) | 95 | ENTRY(ret_from_fork) |
96 | bl schedule_tail | 96 | bl schedule_tail |
97 | get_thread_info tsk | 97 | get_thread_info tsk |
98 | ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing | ||
99 | mov why, #1 | 98 | mov why, #1 |
100 | tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls? | ||
101 | beq ret_slow_syscall | ||
102 | mov r1, sp | ||
103 | mov r0, #1 @ trace exit [IP = 1] | ||
104 | bl syscall_trace | ||
105 | b ret_slow_syscall | 99 | b ret_slow_syscall |
106 | ENDPROC(ret_from_fork) | 100 | ENDPROC(ret_from_fork) |
107 | 101 | ||
@@ -448,10 +442,9 @@ ENDPROC(vector_swi) | |||
448 | * context switches, and waiting for our parent to respond. | 442 | * context switches, and waiting for our parent to respond. |
449 | */ | 443 | */ |
450 | __sys_trace: | 444 | __sys_trace: |
451 | mov r2, scno | 445 | mov r1, scno |
452 | add r1, sp, #S_OFF | 446 | add r0, sp, #S_OFF |
453 | mov r0, #0 @ trace entry [IP = 0] | 447 | bl syscall_trace_enter |
454 | bl syscall_trace | ||
455 | 448 | ||
456 | adr lr, BSYM(__sys_trace_return) @ return address | 449 | adr lr, BSYM(__sys_trace_return) @ return address |
457 | mov scno, r0 @ syscall number (possibly new) | 450 | mov scno, r0 @ syscall number (possibly new) |
@@ -463,10 +456,9 @@ __sys_trace: | |||
463 | 456 | ||
464 | __sys_trace_return: | 457 | __sys_trace_return: |
465 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | 458 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 |
466 | mov r2, scno | 459 | mov r1, scno |
467 | mov r1, sp | 460 | mov r0, sp |
468 | mov r0, #1 @ trace exit [IP = 1] | 461 | bl syscall_trace_exit |
469 | bl syscall_trace | ||
470 | b ret_slow_syscall | 462 | b ret_slow_syscall |
471 | 463 | ||
472 | .align 5 | 464 | .align 5 |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 835898e7d704..3db960e20cb8 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -55,14 +55,6 @@ | |||
55 | add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE | 55 | add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE |
56 | .endm | 56 | .endm |
57 | 57 | ||
58 | #ifdef CONFIG_XIP_KERNEL | ||
59 | #define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) | ||
60 | #define KERNEL_END _edata_loc | ||
61 | #else | ||
62 | #define KERNEL_START KERNEL_RAM_VADDR | ||
63 | #define KERNEL_END _end | ||
64 | #endif | ||
65 | |||
66 | /* | 58 | /* |
67 | * Kernel startup entry point. | 59 | * Kernel startup entry point. |
68 | * --------------------------- | 60 | * --------------------------- |
@@ -218,51 +210,46 @@ __create_page_tables: | |||
218 | blo 1b | 210 | blo 1b |
219 | 211 | ||
220 | /* | 212 | /* |
221 | * Now setup the pagetables for our kernel direct | 213 | * Map our RAM from the start to the end of the kernel .bss section. |
222 | * mapped region. | ||
223 | */ | 214 | */ |
224 | mov r3, pc | 215 | add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) |
225 | mov r3, r3, lsr #SECTION_SHIFT | 216 | ldr r6, =(_end - 1) |
226 | orr r3, r7, r3, lsl #SECTION_SHIFT | 217 | orr r3, r8, r7 |
227 | add r0, r4, #(KERNEL_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) | ||
228 | str r3, [r0, #((KERNEL_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! | ||
229 | ldr r6, =(KERNEL_END - 1) | ||
230 | add r0, r0, #1 << PMD_ORDER | ||
231 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) | 218 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) |
232 | 1: cmp r0, r6 | 219 | 1: str r3, [r0], #1 << PMD_ORDER |
233 | add r3, r3, #1 << SECTION_SHIFT | 220 | add r3, r3, #1 << SECTION_SHIFT |
234 | strls r3, [r0], #1 << PMD_ORDER | 221 | cmp r0, r6 |
235 | bls 1b | 222 | bls 1b |
236 | 223 | ||
237 | #ifdef CONFIG_XIP_KERNEL | 224 | #ifdef CONFIG_XIP_KERNEL |
238 | /* | 225 | /* |
239 | * Map some ram to cover our .data and .bss areas. | 226 | * Map the kernel image separately as it is not located in RAM. |
240 | */ | 227 | */ |
241 | add r3, r8, #TEXT_OFFSET | 228 | #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) |
242 | orr r3, r3, r7 | 229 | mov r3, pc |
243 | add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) | 230 | mov r3, r3, lsr #SECTION_SHIFT |
244 | str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> (SECTION_SHIFT - PMD_ORDER)]! | 231 | orr r3, r7, r3, lsl #SECTION_SHIFT |
245 | ldr r6, =(_end - 1) | 232 | add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) |
246 | add r0, r0, #4 | 233 | str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! |
234 | ldr r6, =(_edata_loc - 1) | ||
235 | add r0, r0, #1 << PMD_ORDER | ||
247 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) | 236 | add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) |
248 | 1: cmp r0, r6 | 237 | 1: cmp r0, r6 |
249 | add r3, r3, #1 << 20 | 238 | add r3, r3, #1 << SECTION_SHIFT |
250 | strls r3, [r0], #4 | 239 | strls r3, [r0], #1 << PMD_ORDER |
251 | bls 1b | 240 | bls 1b |
252 | #endif | 241 | #endif |
253 | 242 | ||
254 | /* | 243 | /* |
255 | * Then map boot params address in r2 or the first 1MB (2MB with LPAE) | 244 | * Then map boot params address in r2 if specified. |
256 | * of ram if boot params address is not specified. | ||
257 | */ | 245 | */ |
258 | mov r0, r2, lsr #SECTION_SHIFT | 246 | mov r0, r2, lsr #SECTION_SHIFT |
259 | movs r0, r0, lsl #SECTION_SHIFT | 247 | movs r0, r0, lsl #SECTION_SHIFT |
260 | moveq r0, r8 | 248 | subne r3, r0, r8 |
261 | sub r3, r0, r8 | 249 | addne r3, r3, #PAGE_OFFSET |
262 | add r3, r3, #PAGE_OFFSET | 250 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) |
263 | add r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) | 251 | orrne r6, r7, r0 |
264 | orr r6, r7, r0 | 252 | strne r6, [r3] |
265 | str r6, [r3] | ||
266 | 253 | ||
267 | #ifdef CONFIG_DEBUG_LL | 254 | #ifdef CONFIG_DEBUG_LL |
268 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) | 255 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index a02eada3aa5d..ab243b87118d 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -47,17 +47,14 @@ static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); | |||
47 | /* Set at runtime when we know what CPU type we are. */ | 47 | /* Set at runtime when we know what CPU type we are. */ |
48 | static struct arm_pmu *cpu_pmu; | 48 | static struct arm_pmu *cpu_pmu; |
49 | 49 | ||
50 | enum arm_perf_pmu_ids | 50 | const char *perf_pmu_name(void) |
51 | armpmu_get_pmu_id(void) | ||
52 | { | 51 | { |
53 | int id = -ENODEV; | 52 | if (!cpu_pmu) |
54 | 53 | return NULL; | |
55 | if (cpu_pmu != NULL) | ||
56 | id = cpu_pmu->id; | ||
57 | 54 | ||
58 | return id; | 55 | return cpu_pmu->pmu.name; |
59 | } | 56 | } |
60 | EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); | 57 | EXPORT_SYMBOL_GPL(perf_pmu_name); |
61 | 58 | ||
62 | int perf_num_counters(void) | 59 | int perf_num_counters(void) |
63 | { | 60 | { |
@@ -760,7 +757,7 @@ init_hw_perf_events(void) | |||
760 | cpu_pmu->name, cpu_pmu->num_events); | 757 | cpu_pmu->name, cpu_pmu->num_events); |
761 | cpu_pmu_init(cpu_pmu); | 758 | cpu_pmu_init(cpu_pmu); |
762 | register_cpu_notifier(&pmu_cpu_notifier); | 759 | register_cpu_notifier(&pmu_cpu_notifier); |
763 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | 760 | armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW); |
764 | } else { | 761 | } else { |
765 | pr_info("no hardware support available\n"); | 762 | pr_info("no hardware support available\n"); |
766 | } | 763 | } |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index ab627a740fa3..c90fcb2b6967 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -650,7 +650,6 @@ static int armv6_map_event(struct perf_event *event) | |||
650 | } | 650 | } |
651 | 651 | ||
652 | static struct arm_pmu armv6pmu = { | 652 | static struct arm_pmu armv6pmu = { |
653 | .id = ARM_PERF_PMU_ID_V6, | ||
654 | .name = "v6", | 653 | .name = "v6", |
655 | .handle_irq = armv6pmu_handle_irq, | 654 | .handle_irq = armv6pmu_handle_irq, |
656 | .enable = armv6pmu_enable_event, | 655 | .enable = armv6pmu_enable_event, |
@@ -685,7 +684,6 @@ static int armv6mpcore_map_event(struct perf_event *event) | |||
685 | } | 684 | } |
686 | 685 | ||
687 | static struct arm_pmu armv6mpcore_pmu = { | 686 | static struct arm_pmu armv6mpcore_pmu = { |
688 | .id = ARM_PERF_PMU_ID_V6MP, | ||
689 | .name = "v6mpcore", | 687 | .name = "v6mpcore", |
690 | .handle_irq = armv6pmu_handle_irq, | 688 | .handle_irq = armv6pmu_handle_irq, |
691 | .enable = armv6pmu_enable_event, | 689 | .enable = armv6pmu_enable_event, |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index d3c536068162..f04070bd2183 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -1258,7 +1258,6 @@ static u32 __init armv7_read_num_pmnc_events(void) | |||
1258 | 1258 | ||
1259 | static struct arm_pmu *__init armv7_a8_pmu_init(void) | 1259 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1260 | { | 1260 | { |
1261 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | ||
1262 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1261 | armv7pmu.name = "ARMv7 Cortex-A8"; |
1263 | armv7pmu.map_event = armv7_a8_map_event; | 1262 | armv7pmu.map_event = armv7_a8_map_event; |
1264 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1263 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1267,7 +1266,6 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void) | |||
1267 | 1266 | ||
1268 | static struct arm_pmu *__init armv7_a9_pmu_init(void) | 1267 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1269 | { | 1268 | { |
1270 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | ||
1271 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1269 | armv7pmu.name = "ARMv7 Cortex-A9"; |
1272 | armv7pmu.map_event = armv7_a9_map_event; | 1270 | armv7pmu.map_event = armv7_a9_map_event; |
1273 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1271 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1276,7 +1274,6 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
1276 | 1274 | ||
1277 | static struct arm_pmu *__init armv7_a5_pmu_init(void) | 1275 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1278 | { | 1276 | { |
1279 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; | ||
1280 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1277 | armv7pmu.name = "ARMv7 Cortex-A5"; |
1281 | armv7pmu.map_event = armv7_a5_map_event; | 1278 | armv7pmu.map_event = armv7_a5_map_event; |
1282 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1279 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1285,7 +1282,6 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void) | |||
1285 | 1282 | ||
1286 | static struct arm_pmu *__init armv7_a15_pmu_init(void) | 1283 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1287 | { | 1284 | { |
1288 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; | ||
1289 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1285 | armv7pmu.name = "ARMv7 Cortex-A15"; |
1290 | armv7pmu.map_event = armv7_a15_map_event; | 1286 | armv7pmu.map_event = armv7_a15_map_event; |
1291 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1287 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
@@ -1295,7 +1291,6 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) | |||
1295 | 1291 | ||
1296 | static struct arm_pmu *__init armv7_a7_pmu_init(void) | 1292 | static struct arm_pmu *__init armv7_a7_pmu_init(void) |
1297 | { | 1293 | { |
1298 | armv7pmu.id = ARM_PERF_PMU_ID_CA7; | ||
1299 | armv7pmu.name = "ARMv7 Cortex-A7"; | 1294 | armv7pmu.name = "ARMv7 Cortex-A7"; |
1300 | armv7pmu.map_event = armv7_a7_map_event; | 1295 | armv7pmu.map_event = armv7_a7_map_event; |
1301 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1296 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index e34e7254e652..f759fe0bab63 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -435,7 +435,6 @@ static int xscale_map_event(struct perf_event *event) | |||
435 | } | 435 | } |
436 | 436 | ||
437 | static struct arm_pmu xscale1pmu = { | 437 | static struct arm_pmu xscale1pmu = { |
438 | .id = ARM_PERF_PMU_ID_XSCALE1, | ||
439 | .name = "xscale1", | 438 | .name = "xscale1", |
440 | .handle_irq = xscale1pmu_handle_irq, | 439 | .handle_irq = xscale1pmu_handle_irq, |
441 | .enable = xscale1pmu_enable_event, | 440 | .enable = xscale1pmu_enable_event, |
@@ -803,7 +802,6 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
803 | } | 802 | } |
804 | 803 | ||
805 | static struct arm_pmu xscale2pmu = { | 804 | static struct arm_pmu xscale2pmu = { |
806 | .id = ARM_PERF_PMU_ID_XSCALE2, | ||
807 | .name = "xscale2", | 805 | .name = "xscale2", |
808 | .handle_irq = xscale2pmu_handle_irq, | 806 | .handle_irq = xscale2pmu_handle_irq, |
809 | .enable = xscale2pmu_enable_event, | 807 | .enable = xscale2pmu_enable_event, |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 14e38261cd31..dab711e6e1ca 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -907,16 +907,16 @@ long arch_ptrace(struct task_struct *child, long request, | |||
907 | return ret; | 907 | return ret; |
908 | } | 908 | } |
909 | 909 | ||
910 | asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) | 910 | enum ptrace_syscall_dir { |
911 | PTRACE_SYSCALL_ENTER = 0, | ||
912 | PTRACE_SYSCALL_EXIT, | ||
913 | }; | ||
914 | |||
915 | static int ptrace_syscall_trace(struct pt_regs *regs, int scno, | ||
916 | enum ptrace_syscall_dir dir) | ||
911 | { | 917 | { |
912 | unsigned long ip; | 918 | unsigned long ip; |
913 | 919 | ||
914 | if (why) | ||
915 | audit_syscall_exit(regs); | ||
916 | else | ||
917 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, | ||
918 | regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); | ||
919 | |||
920 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 920 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
921 | return scno; | 921 | return scno; |
922 | 922 | ||
@@ -927,14 +927,28 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) | |||
927 | * IP = 0 -> entry, =1 -> exit | 927 | * IP = 0 -> entry, =1 -> exit |
928 | */ | 928 | */ |
929 | ip = regs->ARM_ip; | 929 | ip = regs->ARM_ip; |
930 | regs->ARM_ip = why; | 930 | regs->ARM_ip = dir; |
931 | 931 | ||
932 | if (why) | 932 | if (dir == PTRACE_SYSCALL_EXIT) |
933 | tracehook_report_syscall_exit(regs, 0); | 933 | tracehook_report_syscall_exit(regs, 0); |
934 | else if (tracehook_report_syscall_entry(regs)) | 934 | else if (tracehook_report_syscall_entry(regs)) |
935 | current_thread_info()->syscall = -1; | 935 | current_thread_info()->syscall = -1; |
936 | 936 | ||
937 | regs->ARM_ip = ip; | 937 | regs->ARM_ip = ip; |
938 | |||
939 | return current_thread_info()->syscall; | 938 | return current_thread_info()->syscall; |
940 | } | 939 | } |
940 | |||
941 | asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) | ||
942 | { | ||
943 | int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_ENTER); | ||
944 | audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, | ||
945 | regs->ARM_r2, regs->ARM_r3); | ||
946 | return ret; | ||
947 | } | ||
948 | |||
949 | asmlinkage int syscall_trace_exit(struct pt_regs *regs, int scno) | ||
950 | { | ||
951 | int ret = ptrace_syscall_trace(regs, scno, PTRACE_SYSCALL_EXIT); | ||
952 | audit_syscall_exit(regs); | ||
953 | return ret; | ||
954 | } | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 2c7217d971db..aea74f5bc34a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -179,7 +179,7 @@ void __ref cpu_die(void) | |||
179 | mb(); | 179 | mb(); |
180 | 180 | ||
181 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | 181 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ |
182 | complete(&cpu_died); | 182 | RCU_NONIDLE(complete(&cpu_died)); |
183 | 183 | ||
184 | /* | 184 | /* |
185 | * actual CPU shutdown procedure is at least platform (if not | 185 | * actual CPU shutdown procedure is at least platform (if not |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 8200deaa14f6..198b08456e90 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -17,11 +17,190 @@ | |||
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | #include <linux/node.h> | 18 | #include <linux/node.h> |
19 | #include <linux/nodemask.h> | 19 | #include <linux/nodemask.h> |
20 | #include <linux/of.h> | ||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/slab.h> | ||
21 | 23 | ||
22 | #include <asm/cputype.h> | 24 | #include <asm/cputype.h> |
23 | #include <asm/topology.h> | 25 | #include <asm/topology.h> |
24 | 26 | ||
27 | /* | ||
28 | * cpu power scale management | ||
29 | */ | ||
30 | |||
31 | /* | ||
32 | * cpu power table | ||
33 | * This per cpu data structure describes the relative capacity of each core. | ||
34 | * On a heteregenous system, cores don't have the same computation capacity | ||
35 | * and we reflect that difference in the cpu_power field so the scheduler can | ||
36 | * take this difference into account during load balance. A per cpu structure | ||
37 | * is preferred because each CPU updates its own cpu_power field during the | ||
38 | * load balance except for idle cores. One idle core is selected to run the | ||
39 | * rebalance_domains for all idle cores and the cpu_power can be updated | ||
40 | * during this sequence. | ||
41 | */ | ||
42 | static DEFINE_PER_CPU(unsigned long, cpu_scale); | ||
43 | |||
44 | unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) | ||
45 | { | ||
46 | return per_cpu(cpu_scale, cpu); | ||
47 | } | ||
48 | |||
49 | static void set_power_scale(unsigned int cpu, unsigned long power) | ||
50 | { | ||
51 | per_cpu(cpu_scale, cpu) = power; | ||
52 | } | ||
53 | |||
54 | #ifdef CONFIG_OF | ||
55 | struct cpu_efficiency { | ||
56 | const char *compatible; | ||
57 | unsigned long efficiency; | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Table of relative efficiency of each processors | ||
62 | * The efficiency value must fit in 20bit and the final | ||
63 | * cpu_scale value must be in the range | ||
64 | * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 | ||
65 | * in order to return at most 1 when DIV_ROUND_CLOSEST | ||
66 | * is used to compute the capacity of a CPU. | ||
67 | * Processors that are not defined in the table, | ||
68 | * use the default SCHED_POWER_SCALE value for cpu_scale. | ||
69 | */ | ||
70 | struct cpu_efficiency table_efficiency[] = { | ||
71 | {"arm,cortex-a15", 3891}, | ||
72 | {"arm,cortex-a7", 2048}, | ||
73 | {NULL, }, | ||
74 | }; | ||
75 | |||
76 | struct cpu_capacity { | ||
77 | unsigned long hwid; | ||
78 | unsigned long capacity; | ||
79 | }; | ||
80 | |||
81 | struct cpu_capacity *cpu_capacity; | ||
82 | |||
83 | unsigned long middle_capacity = 1; | ||
84 | |||
85 | /* | ||
86 | * Iterate all CPUs' descriptor in DT and compute the efficiency | ||
87 | * (as per table_efficiency). Also calculate a middle efficiency | ||
88 | * as close as possible to (max{eff_i} - min{eff_i}) / 2 | ||
89 | * This is later used to scale the cpu_power field such that an | ||
90 | * 'average' CPU is of middle power. Also see the comments near | ||
91 | * table_efficiency[] and update_cpu_power(). | ||
92 | */ | ||
93 | static void __init parse_dt_topology(void) | ||
94 | { | ||
95 | struct cpu_efficiency *cpu_eff; | ||
96 | struct device_node *cn = NULL; | ||
97 | unsigned long min_capacity = (unsigned long)(-1); | ||
98 | unsigned long max_capacity = 0; | ||
99 | unsigned long capacity = 0; | ||
100 | int alloc_size, cpu = 0; | ||
101 | |||
102 | alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); | ||
103 | cpu_capacity = (struct cpu_capacity *)kzalloc(alloc_size, GFP_NOWAIT); | ||
104 | |||
105 | while ((cn = of_find_node_by_type(cn, "cpu"))) { | ||
106 | const u32 *rate, *reg; | ||
107 | int len; | ||
108 | |||
109 | if (cpu >= num_possible_cpus()) | ||
110 | break; | ||
111 | |||
112 | for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) | ||
113 | if (of_device_is_compatible(cn, cpu_eff->compatible)) | ||
114 | break; | ||
115 | |||
116 | if (cpu_eff->compatible == NULL) | ||
117 | continue; | ||
118 | |||
119 | rate = of_get_property(cn, "clock-frequency", &len); | ||
120 | if (!rate || len != 4) { | ||
121 | pr_err("%s missing clock-frequency property\n", | ||
122 | cn->full_name); | ||
123 | continue; | ||
124 | } | ||
125 | |||
126 | reg = of_get_property(cn, "reg", &len); | ||
127 | if (!reg || len != 4) { | ||
128 | pr_err("%s missing reg property\n", cn->full_name); | ||
129 | continue; | ||
130 | } | ||
131 | |||
132 | capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; | ||
133 | |||
134 | /* Save min capacity of the system */ | ||
135 | if (capacity < min_capacity) | ||
136 | min_capacity = capacity; | ||
137 | |||
138 | /* Save max capacity of the system */ | ||
139 | if (capacity > max_capacity) | ||
140 | max_capacity = capacity; | ||
141 | |||
142 | cpu_capacity[cpu].capacity = capacity; | ||
143 | cpu_capacity[cpu++].hwid = be32_to_cpup(reg); | ||
144 | } | ||
145 | |||
146 | if (cpu < num_possible_cpus()) | ||
147 | cpu_capacity[cpu].hwid = (unsigned long)(-1); | ||
148 | |||
149 | /* If min and max capacities are equals, we bypass the update of the | ||
150 | * cpu_scale because all CPUs have the same capacity. Otherwise, we | ||
151 | * compute a middle_capacity factor that will ensure that the capacity | ||
152 | * of an 'average' CPU of the system will be as close as possible to | ||
153 | * SCHED_POWER_SCALE, which is the default value, but with the | ||
154 | * constraint explained near table_efficiency[]. | ||
155 | */ | ||
156 | if (min_capacity == max_capacity) | ||
157 | cpu_capacity[0].hwid = (unsigned long)(-1); | ||
158 | else if (4*max_capacity < (3*(max_capacity + min_capacity))) | ||
159 | middle_capacity = (min_capacity + max_capacity) | ||
160 | >> (SCHED_POWER_SHIFT+1); | ||
161 | else | ||
162 | middle_capacity = ((max_capacity / 3) | ||
163 | >> (SCHED_POWER_SHIFT-1)) + 1; | ||
164 | |||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Look for a customed capacity of a CPU in the cpu_capacity table during the | ||
169 | * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the | ||
170 | * function returns directly for SMP system. | ||
171 | */ | ||
172 | void update_cpu_power(unsigned int cpu, unsigned long hwid) | ||
173 | { | ||
174 | unsigned int idx = 0; | ||
175 | |||
176 | /* look for the cpu's hwid in the cpu capacity table */ | ||
177 | for (idx = 0; idx < num_possible_cpus(); idx++) { | ||
178 | if (cpu_capacity[idx].hwid == hwid) | ||
179 | break; | ||
180 | |||
181 | if (cpu_capacity[idx].hwid == -1) | ||
182 | return; | ||
183 | } | ||
184 | |||
185 | if (idx == num_possible_cpus()) | ||
186 | return; | ||
187 | |||
188 | set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); | ||
189 | |||
190 | printk(KERN_INFO "CPU%u: update cpu_power %lu\n", | ||
191 | cpu, arch_scale_freq_power(NULL, cpu)); | ||
192 | } | ||
193 | |||
194 | #else | ||
195 | static inline void parse_dt_topology(void) {} | ||
196 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | ||
197 | #endif | ||
198 | |||
199 | |||
200 | /* | ||
201 | * cpu topology management | ||
202 | */ | ||
203 | |||
25 | #define MPIDR_SMP_BITMASK (0x3 << 30) | 204 | #define MPIDR_SMP_BITMASK (0x3 << 30) |
26 | #define MPIDR_SMP_VALUE (0x2 << 30) | 205 | #define MPIDR_SMP_VALUE (0x2 << 30) |
27 | 206 | ||
@@ -31,6 +210,7 @@ | |||
31 | * These masks reflect the current use of the affinity levels. | 210 | * These masks reflect the current use of the affinity levels. |
32 | * The affinity level can be up to 16 bits according to ARM ARM | 211 | * The affinity level can be up to 16 bits according to ARM ARM |
33 | */ | 212 | */ |
213 | #define MPIDR_HWID_BITMASK 0xFFFFFF | ||
34 | 214 | ||
35 | #define MPIDR_LEVEL0_MASK 0x3 | 215 | #define MPIDR_LEVEL0_MASK 0x3 |
36 | #define MPIDR_LEVEL0_SHIFT 0 | 216 | #define MPIDR_LEVEL0_SHIFT 0 |
@@ -41,6 +221,9 @@ | |||
41 | #define MPIDR_LEVEL2_MASK 0xFF | 221 | #define MPIDR_LEVEL2_MASK 0xFF |
42 | #define MPIDR_LEVEL2_SHIFT 16 | 222 | #define MPIDR_LEVEL2_SHIFT 16 |
43 | 223 | ||
224 | /* | ||
225 | * cpu topology table | ||
226 | */ | ||
44 | struct cputopo_arm cpu_topology[NR_CPUS]; | 227 | struct cputopo_arm cpu_topology[NR_CPUS]; |
45 | 228 | ||
46 | const struct cpumask *cpu_coregroup_mask(int cpu) | 229 | const struct cpumask *cpu_coregroup_mask(int cpu) |
@@ -48,6 +231,32 @@ const struct cpumask *cpu_coregroup_mask(int cpu) | |||
48 | return &cpu_topology[cpu].core_sibling; | 231 | return &cpu_topology[cpu].core_sibling; |
49 | } | 232 | } |
50 | 233 | ||
234 | void update_siblings_masks(unsigned int cpuid) | ||
235 | { | ||
236 | struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | ||
237 | int cpu; | ||
238 | |||
239 | /* update core and thread sibling masks */ | ||
240 | for_each_possible_cpu(cpu) { | ||
241 | cpu_topo = &cpu_topology[cpu]; | ||
242 | |||
243 | if (cpuid_topo->socket_id != cpu_topo->socket_id) | ||
244 | continue; | ||
245 | |||
246 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
247 | if (cpu != cpuid) | ||
248 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | ||
249 | |||
250 | if (cpuid_topo->core_id != cpu_topo->core_id) | ||
251 | continue; | ||
252 | |||
253 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | ||
254 | if (cpu != cpuid) | ||
255 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | ||
256 | } | ||
257 | smp_wmb(); | ||
258 | } | ||
259 | |||
51 | /* | 260 | /* |
52 | * store_cpu_topology is called at boot when only one cpu is running | 261 | * store_cpu_topology is called at boot when only one cpu is running |
53 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, | 262 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, |
@@ -57,7 +266,6 @@ void store_cpu_topology(unsigned int cpuid) | |||
57 | { | 266 | { |
58 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; | 267 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; |
59 | unsigned int mpidr; | 268 | unsigned int mpidr; |
60 | unsigned int cpu; | ||
61 | 269 | ||
62 | /* If the cpu topology has been already set, just return */ | 270 | /* If the cpu topology has been already set, just return */ |
63 | if (cpuid_topo->core_id != -1) | 271 | if (cpuid_topo->core_id != -1) |
@@ -99,26 +307,9 @@ void store_cpu_topology(unsigned int cpuid) | |||
99 | cpuid_topo->socket_id = -1; | 307 | cpuid_topo->socket_id = -1; |
100 | } | 308 | } |
101 | 309 | ||
102 | /* update core and thread sibling masks */ | 310 | update_siblings_masks(cpuid); |
103 | for_each_possible_cpu(cpu) { | 311 | |
104 | struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; | 312 | update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); |
105 | |||
106 | if (cpuid_topo->socket_id == cpu_topo->socket_id) { | ||
107 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
108 | if (cpu != cpuid) | ||
109 | cpumask_set_cpu(cpu, | ||
110 | &cpuid_topo->core_sibling); | ||
111 | |||
112 | if (cpuid_topo->core_id == cpu_topo->core_id) { | ||
113 | cpumask_set_cpu(cpuid, | ||
114 | &cpu_topo->thread_sibling); | ||
115 | if (cpu != cpuid) | ||
116 | cpumask_set_cpu(cpu, | ||
117 | &cpuid_topo->thread_sibling); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | smp_wmb(); | ||
122 | 313 | ||
123 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", | 314 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
124 | cpuid, cpu_topology[cpuid].thread_id, | 315 | cpuid, cpu_topology[cpuid].thread_id, |
@@ -134,7 +325,7 @@ void init_cpu_topology(void) | |||
134 | { | 325 | { |
135 | unsigned int cpu; | 326 | unsigned int cpu; |
136 | 327 | ||
137 | /* init core mask */ | 328 | /* init core mask and power*/ |
138 | for_each_possible_cpu(cpu) { | 329 | for_each_possible_cpu(cpu) { |
139 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); | 330 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); |
140 | 331 | ||
@@ -143,6 +334,10 @@ void init_cpu_topology(void) | |||
143 | cpu_topo->socket_id = -1; | 334 | cpu_topo->socket_id = -1; |
144 | cpumask_clear(&cpu_topo->core_sibling); | 335 | cpumask_clear(&cpu_topo->core_sibling); |
145 | cpumask_clear(&cpu_topo->thread_sibling); | 336 | cpumask_clear(&cpu_topo->thread_sibling); |
337 | |||
338 | set_power_scale(cpu, SCHED_POWER_SCALE); | ||
146 | } | 339 | } |
147 | smp_wmb(); | 340 | smp_wmb(); |
341 | |||
342 | parse_dt_topology(); | ||
148 | } | 343 | } |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3647170e9a16..8b97d739b17b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -233,9 +233,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
233 | #define S_ISA " ARM" | 233 | #define S_ISA " ARM" |
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) | 236 | static int __die(const char *str, int err, struct pt_regs *regs) |
237 | { | 237 | { |
238 | struct task_struct *tsk = thread->task; | 238 | struct task_struct *tsk = current; |
239 | static int die_counter; | 239 | static int die_counter; |
240 | int ret; | 240 | int ret; |
241 | 241 | ||
@@ -245,12 +245,12 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
245 | /* trap and error numbers are mostly meaningless on ARM */ | 245 | /* trap and error numbers are mostly meaningless on ARM */ |
246 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | 246 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); |
247 | if (ret == NOTIFY_STOP) | 247 | if (ret == NOTIFY_STOP) |
248 | return ret; | 248 | return 1; |
249 | 249 | ||
250 | print_modules(); | 250 | print_modules(); |
251 | __show_regs(regs); | 251 | __show_regs(regs); |
252 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", | 252 | printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", |
253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); | 253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); |
254 | 254 | ||
255 | if (!user_mode(regs) || in_interrupt()) { | 255 | if (!user_mode(regs) || in_interrupt()) { |
256 | dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, | 256 | dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, |
@@ -259,45 +259,77 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
259 | dump_instr(KERN_EMERG, regs); | 259 | dump_instr(KERN_EMERG, regs); |
260 | } | 260 | } |
261 | 261 | ||
262 | return ret; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
265 | static DEFINE_RAW_SPINLOCK(die_lock); | 265 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
266 | static int die_owner = -1; | ||
267 | static unsigned int die_nest_count; | ||
266 | 268 | ||
267 | /* | 269 | static unsigned long oops_begin(void) |
268 | * This function is protected against re-entrancy. | ||
269 | */ | ||
270 | void die(const char *str, struct pt_regs *regs, int err) | ||
271 | { | 270 | { |
272 | struct thread_info *thread = current_thread_info(); | 271 | int cpu; |
273 | int ret; | 272 | unsigned long flags; |
274 | enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; | ||
275 | 273 | ||
276 | oops_enter(); | 274 | oops_enter(); |
277 | 275 | ||
278 | raw_spin_lock_irq(&die_lock); | 276 | /* racy, but better than risking deadlock. */ |
277 | raw_local_irq_save(flags); | ||
278 | cpu = smp_processor_id(); | ||
279 | if (!arch_spin_trylock(&die_lock)) { | ||
280 | if (cpu == die_owner) | ||
281 | /* nested oops. should stop eventually */; | ||
282 | else | ||
283 | arch_spin_lock(&die_lock); | ||
284 | } | ||
285 | die_nest_count++; | ||
286 | die_owner = cpu; | ||
279 | console_verbose(); | 287 | console_verbose(); |
280 | bust_spinlocks(1); | 288 | bust_spinlocks(1); |
281 | if (!user_mode(regs)) | 289 | return flags; |
282 | bug_type = report_bug(regs->ARM_pc, regs); | 290 | } |
283 | if (bug_type != BUG_TRAP_TYPE_NONE) | ||
284 | str = "Oops - BUG"; | ||
285 | ret = __die(str, err, thread, regs); | ||
286 | 291 | ||
287 | if (regs && kexec_should_crash(thread->task)) | 292 | static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
293 | { | ||
294 | if (regs && kexec_should_crash(current)) | ||
288 | crash_kexec(regs); | 295 | crash_kexec(regs); |
289 | 296 | ||
290 | bust_spinlocks(0); | 297 | bust_spinlocks(0); |
298 | die_owner = -1; | ||
291 | add_taint(TAINT_DIE); | 299 | add_taint(TAINT_DIE); |
292 | raw_spin_unlock_irq(&die_lock); | 300 | die_nest_count--; |
301 | if (!die_nest_count) | ||
302 | /* Nest count reaches zero, release the lock. */ | ||
303 | arch_spin_unlock(&die_lock); | ||
304 | raw_local_irq_restore(flags); | ||
293 | oops_exit(); | 305 | oops_exit(); |
294 | 306 | ||
295 | if (in_interrupt()) | 307 | if (in_interrupt()) |
296 | panic("Fatal exception in interrupt"); | 308 | panic("Fatal exception in interrupt"); |
297 | if (panic_on_oops) | 309 | if (panic_on_oops) |
298 | panic("Fatal exception"); | 310 | panic("Fatal exception"); |
299 | if (ret != NOTIFY_STOP) | 311 | if (signr) |
300 | do_exit(SIGSEGV); | 312 | do_exit(signr); |
313 | } | ||
314 | |||
315 | /* | ||
316 | * This function is protected against re-entrancy. | ||
317 | */ | ||
318 | void die(const char *str, struct pt_regs *regs, int err) | ||
319 | { | ||
320 | enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; | ||
321 | unsigned long flags = oops_begin(); | ||
322 | int sig = SIGSEGV; | ||
323 | |||
324 | if (!user_mode(regs)) | ||
325 | bug_type = report_bug(regs->ARM_pc, regs); | ||
326 | if (bug_type != BUG_TRAP_TYPE_NONE) | ||
327 | str = "Oops - BUG"; | ||
328 | |||
329 | if (__die(str, err, regs)) | ||
330 | sig = 0; | ||
331 | |||
332 | oops_end(flags, regs, sig); | ||
301 | } | 333 | } |
302 | 334 | ||
303 | void arm_notify_die(const char *str, struct pt_regs *regs, | 335 | void arm_notify_die(const char *str, struct pt_regs *regs, |
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 992769ae2599..2473fd1fd51c 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile | |||
@@ -6,9 +6,8 @@ | |||
6 | 6 | ||
7 | lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ | 7 | lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ |
8 | csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ | 8 | csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ |
9 | delay.o findbit.o memchr.o memcpy.o \ | 9 | delay.o delay-loop.o findbit.o memchr.o memcpy.o \ |
10 | memmove.o memset.o memzero.o setbit.o \ | 10 | memmove.o memset.o memzero.o setbit.o \ |
11 | strncpy_from_user.o strnlen_user.o \ | ||
12 | strchr.o strrchr.o \ | 11 | strchr.o strrchr.o \ |
13 | testchangebit.o testclearbit.o testsetbit.o \ | 12 | testchangebit.o testclearbit.o testsetbit.o \ |
14 | ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ | 13 | ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ |
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay-loop.S index 3c9a05c8d20b..36b668d8e121 100644 --- a/arch/arm/lib/delay.S +++ b/arch/arm/lib/delay-loop.S | |||
@@ -9,11 +9,11 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/param.h> | 12 | #include <asm/delay.h> |
13 | .text | 13 | .text |
14 | 14 | ||
15 | .LC0: .word loops_per_jiffy | 15 | .LC0: .word loops_per_jiffy |
16 | .LC1: .word (2199023*HZ)>>11 | 16 | .LC1: .word UDELAY_MULT |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * r0 <= 2000 | 19 | * r0 <= 2000 |
@@ -21,10 +21,10 @@ | |||
21 | * HZ <= 1000 | 21 | * HZ <= 1000 |
22 | */ | 22 | */ |
23 | 23 | ||
24 | ENTRY(__udelay) | 24 | ENTRY(__loop_udelay) |
25 | ldr r2, .LC1 | 25 | ldr r2, .LC1 |
26 | mul r0, r2, r0 | 26 | mul r0, r2, r0 |
27 | ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 | 27 | ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06 |
28 | mov r1, #-1 | 28 | mov r1, #-1 |
29 | ldr r2, .LC0 | 29 | ldr r2, .LC0 |
30 | ldr r2, [r2] @ max = 0x01ffffff | 30 | ldr r2, [r2] @ max = 0x01ffffff |
@@ -39,12 +39,10 @@ ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06 | |||
39 | 39 | ||
40 | /* | 40 | /* |
41 | * loops = r0 * HZ * loops_per_jiffy / 1000000 | 41 | * loops = r0 * HZ * loops_per_jiffy / 1000000 |
42 | * | ||
43 | * Oh, if only we had a cycle counter... | ||
44 | */ | 42 | */ |
45 | 43 | ||
46 | @ Delay routine | 44 | @ Delay routine |
47 | ENTRY(__delay) | 45 | ENTRY(__loop_delay) |
48 | subs r0, r0, #1 | 46 | subs r0, r0, #1 |
49 | #if 0 | 47 | #if 0 |
50 | movls pc, lr | 48 | movls pc, lr |
@@ -62,8 +60,8 @@ ENTRY(__delay) | |||
62 | movls pc, lr | 60 | movls pc, lr |
63 | subs r0, r0, #1 | 61 | subs r0, r0, #1 |
64 | #endif | 62 | #endif |
65 | bhi __delay | 63 | bhi __loop_delay |
66 | mov pc, lr | 64 | mov pc, lr |
67 | ENDPROC(__udelay) | 65 | ENDPROC(__loop_udelay) |
68 | ENDPROC(__const_udelay) | 66 | ENDPROC(__loop_const_udelay) |
69 | ENDPROC(__delay) | 67 | ENDPROC(__loop_delay) |
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c new file mode 100644 index 000000000000..d6dacc69254e --- /dev/null +++ b/arch/arm/lib/delay.c | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * Delay loops based on the OpenRISC implementation. | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Limited | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | * | ||
19 | * Author: Will Deacon <will.deacon@arm.com> | ||
20 | */ | ||
21 | |||
22 | #include <linux/delay.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/timex.h> | ||
27 | |||
28 | /* | ||
29 | * Default to the loop-based delay implementation. | ||
30 | */ | ||
31 | struct arm_delay_ops arm_delay_ops = { | ||
32 | .delay = __loop_delay, | ||
33 | .const_udelay = __loop_const_udelay, | ||
34 | .udelay = __loop_udelay, | ||
35 | }; | ||
36 | |||
37 | #ifdef ARCH_HAS_READ_CURRENT_TIMER | ||
38 | static void __timer_delay(unsigned long cycles) | ||
39 | { | ||
40 | cycles_t start = get_cycles(); | ||
41 | |||
42 | while ((get_cycles() - start) < cycles) | ||
43 | cpu_relax(); | ||
44 | } | ||
45 | |||
46 | static void __timer_const_udelay(unsigned long xloops) | ||
47 | { | ||
48 | unsigned long long loops = xloops; | ||
49 | loops *= loops_per_jiffy; | ||
50 | __timer_delay(loops >> UDELAY_SHIFT); | ||
51 | } | ||
52 | |||
53 | static void __timer_udelay(unsigned long usecs) | ||
54 | { | ||
55 | __timer_const_udelay(usecs * UDELAY_MULT); | ||
56 | } | ||
57 | |||
58 | void __init init_current_timer_delay(unsigned long freq) | ||
59 | { | ||
60 | pr_info("Switching to timer-based delay loop\n"); | ||
61 | lpj_fine = freq / HZ; | ||
62 | arm_delay_ops.delay = __timer_delay; | ||
63 | arm_delay_ops.const_udelay = __timer_const_udelay; | ||
64 | arm_delay_ops.udelay = __timer_udelay; | ||
65 | } | ||
66 | |||
67 | unsigned long __cpuinit calibrate_delay_is_known(void) | ||
68 | { | ||
69 | return lpj_fine; | ||
70 | } | ||
71 | #endif | ||
diff --git a/arch/arm/lib/strncpy_from_user.S b/arch/arm/lib/strncpy_from_user.S deleted file mode 100644 index f202d7bd1647..000000000000 --- a/arch/arm/lib/strncpy_from_user.S +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/strncpy_from_user.S | ||
3 | * | ||
4 | * Copyright (C) 1995-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/linkage.h> | ||
11 | #include <asm/assembler.h> | ||
12 | #include <asm/errno.h> | ||
13 | |||
14 | .text | ||
15 | .align 5 | ||
16 | |||
17 | /* | ||
18 | * Copy a string from user space to kernel space. | ||
19 | * r0 = dst, r1 = src, r2 = byte length | ||
20 | * returns the number of characters copied (strlen of copied string), | ||
21 | * -EFAULT on exception, or "len" if we fill the whole buffer | ||
22 | */ | ||
23 | ENTRY(__strncpy_from_user) | ||
24 | mov ip, r1 | ||
25 | 1: subs r2, r2, #1 | ||
26 | ldrusr r3, r1, 1, pl | ||
27 | bmi 2f | ||
28 | strb r3, [r0], #1 | ||
29 | teq r3, #0 | ||
30 | bne 1b | ||
31 | sub r1, r1, #1 @ take NUL character out of count | ||
32 | 2: sub r0, r1, ip | ||
33 | mov pc, lr | ||
34 | ENDPROC(__strncpy_from_user) | ||
35 | |||
36 | .pushsection .fixup,"ax" | ||
37 | .align 0 | ||
38 | 9001: mov r3, #0 | ||
39 | strb r3, [r0, #0] @ null terminate | ||
40 | mov r0, #-EFAULT | ||
41 | mov pc, lr | ||
42 | .popsection | ||
43 | |||
diff --git a/arch/arm/lib/strnlen_user.S b/arch/arm/lib/strnlen_user.S deleted file mode 100644 index 0ecbb459c4f1..000000000000 --- a/arch/arm/lib/strnlen_user.S +++ /dev/null | |||
@@ -1,40 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/strnlen_user.S | ||
3 | * | ||
4 | * Copyright (C) 1995-2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/linkage.h> | ||
11 | #include <asm/assembler.h> | ||
12 | #include <asm/errno.h> | ||
13 | |||
14 | .text | ||
15 | .align 5 | ||
16 | |||
17 | /* Prototype: unsigned long __strnlen_user(const char *str, long n) | ||
18 | * Purpose : get length of a string in user memory | ||
19 | * Params : str - address of string in user memory | ||
20 | * Returns : length of string *including terminator* | ||
21 | * or zero on exception, or n + 1 if too long | ||
22 | */ | ||
23 | ENTRY(__strnlen_user) | ||
24 | mov r2, r0 | ||
25 | 1: | ||
26 | ldrusr r3, r0, 1 | ||
27 | teq r3, #0 | ||
28 | beq 2f | ||
29 | subs r1, r1, #1 | ||
30 | bne 1b | ||
31 | add r0, r0, #1 | ||
32 | 2: sub r0, r0, r2 | ||
33 | mov pc, lr | ||
34 | ENDPROC(__strnlen_user) | ||
35 | |||
36 | .pushsection .fixup,"ax" | ||
37 | .align 0 | ||
38 | 9001: mov r0, #0 | ||
39 | mov pc, lr | ||
40 | .popsection | ||
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index db0117ec55f4..e012dc8391cf 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c | |||
@@ -127,7 +127,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
127 | * the boot monitor to read the system wide flags register, | 127 | * the boot monitor to read the system wide flags register, |
128 | * and branch to the address found there. | 128 | * and branch to the address found there. |
129 | */ | 129 | */ |
130 | gic_raise_softirq(cpumask_of(cpu), 1); | 130 | gic_raise_softirq(cpumask_of(cpu), 0); |
131 | 131 | ||
132 | timeout = jiffies + (1 * HZ); | 132 | timeout = jiffies + (1 * HZ); |
133 | while (time_before(jiffies, timeout)) { | 133 | while (time_before(jiffies, timeout)) { |
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 7d118b9bdd5f..9a35adf91232 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -125,7 +125,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
125 | booted = true; | 125 | booted = true; |
126 | } | 126 | } |
127 | 127 | ||
128 | gic_raise_softirq(cpumask_of(cpu), 1); | 128 | gic_raise_softirq(cpumask_of(cpu), 0); |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * Now the secondary core is starting up let it run its | 131 | * Now the secondary core is starting up let it run its |
diff --git a/arch/arm/mach-pxa/include/mach/regs-ost.h b/arch/arm/mach-pxa/include/mach/regs-ost.h index a3e5f86ef67e..628819995c52 100644 --- a/arch/arm/mach-pxa/include/mach/regs-ost.h +++ b/arch/arm/mach-pxa/include/mach/regs-ost.h | |||
@@ -7,17 +7,17 @@ | |||
7 | * OS Timer & Match Registers | 7 | * OS Timer & Match Registers |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define OSMR0 __REG(0x40A00000) /* */ | 10 | #define OSMR0 io_p2v(0x40A00000) /* */ |
11 | #define OSMR1 __REG(0x40A00004) /* */ | 11 | #define OSMR1 io_p2v(0x40A00004) /* */ |
12 | #define OSMR2 __REG(0x40A00008) /* */ | 12 | #define OSMR2 io_p2v(0x40A00008) /* */ |
13 | #define OSMR3 __REG(0x40A0000C) /* */ | 13 | #define OSMR3 io_p2v(0x40A0000C) /* */ |
14 | #define OSMR4 __REG(0x40A00080) /* */ | 14 | #define OSMR4 io_p2v(0x40A00080) /* */ |
15 | #define OSCR __REG(0x40A00010) /* OS Timer Counter Register */ | 15 | #define OSCR io_p2v(0x40A00010) /* OS Timer Counter Register */ |
16 | #define OSCR4 __REG(0x40A00040) /* OS Timer Counter Register */ | 16 | #define OSCR4 io_p2v(0x40A00040) /* OS Timer Counter Register */ |
17 | #define OMCR4 __REG(0x40A000C0) /* */ | 17 | #define OMCR4 io_p2v(0x40A000C0) /* */ |
18 | #define OSSR __REG(0x40A00014) /* OS Timer Status Register */ | 18 | #define OSSR io_p2v(0x40A00014) /* OS Timer Status Register */ |
19 | #define OWER __REG(0x40A00018) /* OS Timer Watchdog Enable Register */ | 19 | #define OWER io_p2v(0x40A00018) /* OS Timer Watchdog Enable Register */ |
20 | #define OIER __REG(0x40A0001C) /* OS Timer Interrupt Enable Register */ | 20 | #define OIER io_p2v(0x40A0001C) /* OS Timer Interrupt Enable Register */ |
21 | 21 | ||
22 | #define OSSR_M3 (1 << 3) /* Match status channel 3 */ | 22 | #define OSSR_M3 (1 << 3) /* Match status channel 3 */ |
23 | #define OSSR_M2 (1 << 2) /* Match status channel 2 */ | 23 | #define OSSR_M2 (1 << 2) /* Match status channel 2 */ |
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c index b4528899ef08..3fab583755d4 100644 --- a/arch/arm/mach-pxa/reset.c +++ b/arch/arm/mach-pxa/reset.c | |||
@@ -77,9 +77,10 @@ static void do_gpio_reset(void) | |||
77 | static void do_hw_reset(void) | 77 | static void do_hw_reset(void) |
78 | { | 78 | { |
79 | /* Initialize the watchdog and let it fire */ | 79 | /* Initialize the watchdog and let it fire */ |
80 | OWER = OWER_WME; | 80 | writel_relaxed(OWER_WME, OWER); |
81 | OSSR = OSSR_M3; | 81 | writel_relaxed(OSSR_M3, OSSR); |
82 | OSMR3 = OSCR + 368640; /* ... in 100 ms */ | 82 | /* ... in 100 ms */ |
83 | writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3); | ||
83 | } | 84 | } |
84 | 85 | ||
85 | void pxa_restart(char mode, const char *cmd) | 86 | void pxa_restart(char mode, const char *cmd) |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c index 3d6c9bd90de6..4bc47d63698b 100644 --- a/arch/arm/mach-pxa/time.c +++ b/arch/arm/mach-pxa/time.c | |||
@@ -35,7 +35,7 @@ | |||
35 | 35 | ||
36 | static u32 notrace pxa_read_sched_clock(void) | 36 | static u32 notrace pxa_read_sched_clock(void) |
37 | { | 37 | { |
38 | return OSCR; | 38 | return readl_relaxed(OSCR); |
39 | } | 39 | } |
40 | 40 | ||
41 | 41 | ||
@@ -47,8 +47,8 @@ pxa_ost0_interrupt(int irq, void *dev_id) | |||
47 | struct clock_event_device *c = dev_id; | 47 | struct clock_event_device *c = dev_id; |
48 | 48 | ||
49 | /* Disarm the compare/match, signal the event. */ | 49 | /* Disarm the compare/match, signal the event. */ |
50 | OIER &= ~OIER_E0; | 50 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); |
51 | OSSR = OSSR_M0; | 51 | writel_relaxed(OSSR_M0, OSSR); |
52 | c->event_handler(c); | 52 | c->event_handler(c); |
53 | 53 | ||
54 | return IRQ_HANDLED; | 54 | return IRQ_HANDLED; |
@@ -59,10 +59,10 @@ pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) | |||
59 | { | 59 | { |
60 | unsigned long next, oscr; | 60 | unsigned long next, oscr; |
61 | 61 | ||
62 | OIER |= OIER_E0; | 62 | writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); |
63 | next = OSCR + delta; | 63 | next = readl_relaxed(OSCR) + delta; |
64 | OSMR0 = next; | 64 | writel_relaxed(next, OSMR0); |
65 | oscr = OSCR; | 65 | oscr = readl_relaxed(OSCR); |
66 | 66 | ||
67 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; | 67 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; |
68 | } | 68 | } |
@@ -72,15 +72,15 @@ pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) | |||
72 | { | 72 | { |
73 | switch (mode) { | 73 | switch (mode) { |
74 | case CLOCK_EVT_MODE_ONESHOT: | 74 | case CLOCK_EVT_MODE_ONESHOT: |
75 | OIER &= ~OIER_E0; | 75 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); |
76 | OSSR = OSSR_M0; | 76 | writel_relaxed(OSSR_M0, OSSR); |
77 | break; | 77 | break; |
78 | 78 | ||
79 | case CLOCK_EVT_MODE_UNUSED: | 79 | case CLOCK_EVT_MODE_UNUSED: |
80 | case CLOCK_EVT_MODE_SHUTDOWN: | 80 | case CLOCK_EVT_MODE_SHUTDOWN: |
81 | /* initializing, released, or preparing for suspend */ | 81 | /* initializing, released, or preparing for suspend */ |
82 | OIER &= ~OIER_E0; | 82 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); |
83 | OSSR = OSSR_M0; | 83 | writel_relaxed(OSSR_M0, OSSR); |
84 | break; | 84 | break; |
85 | 85 | ||
86 | case CLOCK_EVT_MODE_RESUME: | 86 | case CLOCK_EVT_MODE_RESUME: |
@@ -108,8 +108,8 @@ static void __init pxa_timer_init(void) | |||
108 | { | 108 | { |
109 | unsigned long clock_tick_rate = get_clock_tick_rate(); | 109 | unsigned long clock_tick_rate = get_clock_tick_rate(); |
110 | 110 | ||
111 | OIER = 0; | 111 | writel_relaxed(0, OIER); |
112 | OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; | 112 | writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); |
113 | 113 | ||
114 | setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate); | 114 | setup_sched_clock(pxa_read_sched_clock, 32, clock_tick_rate); |
115 | 115 | ||
@@ -122,7 +122,7 @@ static void __init pxa_timer_init(void) | |||
122 | 122 | ||
123 | setup_irq(IRQ_OST0, &pxa_ost0_irq); | 123 | setup_irq(IRQ_OST0, &pxa_ost0_irq); |
124 | 124 | ||
125 | clocksource_mmio_init(&OSCR, "oscr0", clock_tick_rate, 200, 32, | 125 | clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, |
126 | clocksource_mmio_readl_up); | 126 | clocksource_mmio_readl_up); |
127 | clockevents_register_device(&ckevt_pxa_osmr0); | 127 | clockevents_register_device(&ckevt_pxa_osmr0); |
128 | } | 128 | } |
@@ -132,12 +132,12 @@ static unsigned long osmr[4], oier, oscr; | |||
132 | 132 | ||
133 | static void pxa_timer_suspend(void) | 133 | static void pxa_timer_suspend(void) |
134 | { | 134 | { |
135 | osmr[0] = OSMR0; | 135 | osmr[0] = readl_relaxed(OSMR0); |
136 | osmr[1] = OSMR1; | 136 | osmr[1] = readl_relaxed(OSMR1); |
137 | osmr[2] = OSMR2; | 137 | osmr[2] = readl_relaxed(OSMR2); |
138 | osmr[3] = OSMR3; | 138 | osmr[3] = readl_relaxed(OSMR3); |
139 | oier = OIER; | 139 | oier = readl_relaxed(OIER); |
140 | oscr = OSCR; | 140 | oscr = readl_relaxed(OSCR); |
141 | } | 141 | } |
142 | 142 | ||
143 | static void pxa_timer_resume(void) | 143 | static void pxa_timer_resume(void) |
@@ -151,12 +151,12 @@ static void pxa_timer_resume(void) | |||
151 | if (osmr[0] - oscr < MIN_OSCR_DELTA) | 151 | if (osmr[0] - oscr < MIN_OSCR_DELTA) |
152 | osmr[0] += MIN_OSCR_DELTA; | 152 | osmr[0] += MIN_OSCR_DELTA; |
153 | 153 | ||
154 | OSMR0 = osmr[0]; | 154 | writel_relaxed(osmr[0], OSMR0); |
155 | OSMR1 = osmr[1]; | 155 | writel_relaxed(osmr[1], OSMR1); |
156 | OSMR2 = osmr[2]; | 156 | writel_relaxed(osmr[2], OSMR2); |
157 | OSMR3 = osmr[3]; | 157 | writel_relaxed(osmr[3], OSMR3); |
158 | OIER = oier; | 158 | writel_relaxed(oier, OIER); |
159 | OSCR = oscr; | 159 | writel_relaxed(oscr, OSCR); |
160 | } | 160 | } |
161 | #else | 161 | #else |
162 | #define pxa_timer_suspend NULL | 162 | #define pxa_timer_suspend NULL |
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index d1dc7f1a239c..d673211f121c 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -362,7 +362,7 @@ static void __init assabet_init(void) | |||
362 | static void __init map_sa1100_gpio_regs( void ) | 362 | static void __init map_sa1100_gpio_regs( void ) |
363 | { | 363 | { |
364 | unsigned long phys = __PREG(GPLR) & PMD_MASK; | 364 | unsigned long phys = __PREG(GPLR) & PMD_MASK; |
365 | unsigned long virt = io_p2v(phys); | 365 | unsigned long virt = (unsigned long)io_p2v(phys); |
366 | int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO); | 366 | int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO); |
367 | pmd_t *pmd; | 367 | pmd_t *pmd; |
368 | 368 | ||
diff --git a/arch/arm/mach-sa1100/cpu-sa1100.c b/arch/arm/mach-sa1100/cpu-sa1100.c index 19b2053f5af4..e8f4d1e19233 100644 --- a/arch/arm/mach-sa1100/cpu-sa1100.c +++ b/arch/arm/mach-sa1100/cpu-sa1100.c | |||
@@ -87,6 +87,7 @@ | |||
87 | #include <linux/types.h> | 87 | #include <linux/types.h> |
88 | #include <linux/init.h> | 88 | #include <linux/init.h> |
89 | #include <linux/cpufreq.h> | 89 | #include <linux/cpufreq.h> |
90 | #include <linux/io.h> | ||
90 | 91 | ||
91 | #include <asm/cputype.h> | 92 | #include <asm/cputype.h> |
92 | 93 | ||
diff --git a/arch/arm/mach-sa1100/cpu-sa1110.c b/arch/arm/mach-sa1100/cpu-sa1110.c index 675bf8ef97e8..48c45b0c92bb 100644 --- a/arch/arm/mach-sa1100/cpu-sa1110.c +++ b/arch/arm/mach-sa1100/cpu-sa1110.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/io.h> | ||
22 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
23 | #include <linux/moduleparam.h> | 24 | #include <linux/moduleparam.h> |
24 | #include <linux/types.h> | 25 | #include <linux/types.h> |
diff --git a/arch/arm/mach-sa1100/include/mach/SA-1100.h b/arch/arm/mach-sa1100/include/mach/SA-1100.h index 3f2d1b60188c..0ac6cc08a19c 100644 --- a/arch/arm/mach-sa1100/include/mach/SA-1100.h +++ b/arch/arm/mach-sa1100/include/mach/SA-1100.h | |||
@@ -830,14 +830,14 @@ | |||
830 | * (read/write). | 830 | * (read/write). |
831 | */ | 831 | */ |
832 | 832 | ||
833 | #define OSMR0 __REG(0x90000000) /* OS timer Match Reg. 0 */ | 833 | #define OSMR0 io_p2v(0x90000000) /* OS timer Match Reg. 0 */ |
834 | #define OSMR1 __REG(0x90000004) /* OS timer Match Reg. 1 */ | 834 | #define OSMR1 io_p2v(0x90000004) /* OS timer Match Reg. 1 */ |
835 | #define OSMR2 __REG(0x90000008) /* OS timer Match Reg. 2 */ | 835 | #define OSMR2 io_p2v(0x90000008) /* OS timer Match Reg. 2 */ |
836 | #define OSMR3 __REG(0x9000000c) /* OS timer Match Reg. 3 */ | 836 | #define OSMR3 io_p2v(0x9000000c) /* OS timer Match Reg. 3 */ |
837 | #define OSCR __REG(0x90000010) /* OS timer Counter Reg. */ | 837 | #define OSCR io_p2v(0x90000010) /* OS timer Counter Reg. */ |
838 | #define OSSR __REG(0x90000014 ) /* OS timer Status Reg. */ | 838 | #define OSSR io_p2v(0x90000014) /* OS timer Status Reg. */ |
839 | #define OWER __REG(0x90000018 ) /* OS timer Watch-dog Enable Reg. */ | 839 | #define OWER io_p2v(0x90000018) /* OS timer Watch-dog Enable Reg. */ |
840 | #define OIER __REG(0x9000001C ) /* OS timer Interrupt Enable Reg. */ | 840 | #define OIER io_p2v(0x9000001C) /* OS timer Interrupt Enable Reg. */ |
841 | 841 | ||
842 | #define OSSR_M(Nb) /* Match detected [0..3] */ \ | 842 | #define OSSR_M(Nb) /* Match detected [0..3] */ \ |
843 | (0x00000001 << (Nb)) | 843 | (0x00000001 << (Nb)) |
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h index a38fc4f54241..6a9eecf3137e 100644 --- a/arch/arm/mach-sa1100/include/mach/gpio.h +++ b/arch/arm/mach-sa1100/include/mach/gpio.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __ASM_ARCH_SA1100_GPIO_H | 24 | #ifndef __ASM_ARCH_SA1100_GPIO_H |
25 | #define __ASM_ARCH_SA1100_GPIO_H | 25 | #define __ASM_ARCH_SA1100_GPIO_H |
26 | 26 | ||
27 | #include <linux/io.h> | ||
27 | #include <mach/hardware.h> | 28 | #include <mach/hardware.h> |
28 | #include <asm/irq.h> | 29 | #include <asm/irq.h> |
29 | #include <asm-generic/gpio.h> | 30 | #include <asm-generic/gpio.h> |
diff --git a/arch/arm/mach-sa1100/include/mach/hardware.h b/arch/arm/mach-sa1100/include/mach/hardware.h index 99f5856d8de4..cbedd75a9d65 100644 --- a/arch/arm/mach-sa1100/include/mach/hardware.h +++ b/arch/arm/mach-sa1100/include/mach/hardware.h | |||
@@ -32,7 +32,7 @@ | |||
32 | #define PIO_START 0x80000000 /* physical start of IO space */ | 32 | #define PIO_START 0x80000000 /* physical start of IO space */ |
33 | 33 | ||
34 | #define io_p2v( x ) \ | 34 | #define io_p2v( x ) \ |
35 | ( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE ) | 35 | IOMEM( (((x)&0x00ffffff) | (((x)&0x30000000)>>VIO_SHIFT)) + VIO_BASE ) |
36 | #define io_v2p( x ) \ | 36 | #define io_v2p( x ) \ |
37 | ( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START ) | 37 | ( (((x)&0x00ffffff) | (((x)&(0x30000000>>VIO_SHIFT))<<VIO_SHIFT)) + PIO_START ) |
38 | 38 | ||
@@ -47,6 +47,8 @@ | |||
47 | #define CPU_SA1110_ID (0x6901b110) | 47 | #define CPU_SA1110_ID (0x6901b110) |
48 | #define CPU_SA1110_MASK (0xfffffff0) | 48 | #define CPU_SA1110_MASK (0xfffffff0) |
49 | 49 | ||
50 | #define __MREG(x) IOMEM(io_p2v(x)) | ||
51 | |||
50 | #ifndef __ASSEMBLY__ | 52 | #ifndef __ASSEMBLY__ |
51 | 53 | ||
52 | #include <asm/cputype.h> | 54 | #include <asm/cputype.h> |
@@ -56,7 +58,7 @@ | |||
56 | #define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID) | 58 | #define cpu_is_sa1100() ((read_cpuid_id() & CPU_SA1100_MASK) == CPU_SA1100_ID) |
57 | #define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID) | 59 | #define cpu_is_sa1110() ((read_cpuid_id() & CPU_SA1110_MASK) == CPU_SA1110_ID) |
58 | 60 | ||
59 | # define __REG(x) (*((volatile unsigned long *)io_p2v(x))) | 61 | # define __REG(x) (*((volatile unsigned long __iomem *)io_p2v(x))) |
60 | # define __PREG(x) (io_v2p((unsigned long)&(x))) | 62 | # define __PREG(x) (io_v2p((unsigned long)&(x))) |
61 | 63 | ||
62 | static inline unsigned long get_clock_tick_rate(void) | 64 | static inline unsigned long get_clock_tick_rate(void) |
diff --git a/arch/arm/mach-sa1100/include/mach/uncompress.h b/arch/arm/mach-sa1100/include/mach/uncompress.h index 6cb39ddde656..5cf71da60e42 100644 --- a/arch/arm/mach-sa1100/include/mach/uncompress.h +++ b/arch/arm/mach-sa1100/include/mach/uncompress.h | |||
@@ -8,6 +8,8 @@ | |||
8 | 8 | ||
9 | #include "hardware.h" | 9 | #include "hardware.h" |
10 | 10 | ||
11 | #define IOMEM(x) (x) | ||
12 | |||
11 | /* | 13 | /* |
12 | * The following code assumes the serial port has already been | 14 | * The following code assumes the serial port has already been |
13 | * initialized by the bootloader. We search for the first enabled | 15 | * initialized by the bootloader. We search for the first enabled |
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c index 516ccc25d7fd..2124f1fc2fbe 100644 --- a/arch/arm/mach-sa1100/irq.c +++ b/arch/arm/mach-sa1100/irq.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/io.h> | ||
15 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
16 | #include <linux/ioport.h> | 17 | #include <linux/ioport.h> |
17 | #include <linux/syscore_ops.h> | 18 | #include <linux/syscore_ops.h> |
diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index b412fc09c80c..7f07f08d8968 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
21 | #include <linux/io.h> | ||
21 | 22 | ||
22 | #include <mach/hardware.h> | 23 | #include <mach/hardware.h> |
23 | #include <mach/jornada720.h> | 24 | #include <mach/jornada720.h> |
diff --git a/arch/arm/mach-sa1100/leds-cerf.c b/arch/arm/mach-sa1100/leds-cerf.c index 040540fb7d8a..30fc3b2bf555 100644 --- a/arch/arm/mach-sa1100/leds-cerf.c +++ b/arch/arm/mach-sa1100/leds-cerf.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Author: ??? | 4 | * Author: ??? |
5 | */ | 5 | */ |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/io.h> | ||
7 | 8 | ||
8 | #include <mach/hardware.h> | 9 | #include <mach/hardware.h> |
9 | #include <asm/leds.h> | 10 | #include <asm/leds.h> |
diff --git a/arch/arm/mach-sa1100/leds-lart.c b/arch/arm/mach-sa1100/leds-lart.c index a51830c60e53..50a5b143b460 100644 --- a/arch/arm/mach-sa1100/leds-lart.c +++ b/arch/arm/mach-sa1100/leds-lart.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * pace of the LED. | 10 | * pace of the LED. |
11 | */ | 11 | */ |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/io.h> | ||
13 | 14 | ||
14 | #include <mach/hardware.h> | 15 | #include <mach/hardware.h> |
15 | #include <asm/leds.h> | 16 | #include <asm/leds.h> |
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index 690cf0ce5c0c..6645d1e31f14 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c | |||
@@ -23,6 +23,7 @@ | |||
23 | * Storage is local on the stack now. | 23 | * Storage is local on the stack now. |
24 | */ | 24 | */ |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/io.h> | ||
26 | #include <linux/suspend.h> | 27 | #include <linux/suspend.h> |
27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
28 | #include <linux/time.h> | 29 | #include <linux/time.h> |
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 30cc6721665b..85863741ef8b 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S | |||
@@ -38,9 +38,9 @@ ENTRY(sa1100_finish_suspend) | |||
38 | orr r4, r4, #MDREFR_K1DB2 | 38 | orr r4, r4, #MDREFR_K1DB2 |
39 | ldr r5, =PPCR | 39 | ldr r5, =PPCR |
40 | 40 | ||
41 | @ Pre-load __udelay into the I-cache | 41 | @ Pre-load __loop_udelay into the I-cache |
42 | mov r0, #1 | 42 | mov r0, #1 |
43 | bl __udelay | 43 | bl __loop_udelay |
44 | mov r0, r0 | 44 | mov r0, r0 |
45 | 45 | ||
46 | @ The following must all exist in a single cache line to | 46 | @ The following must all exist in a single cache line to |
@@ -53,11 +53,11 @@ ENTRY(sa1100_finish_suspend) | |||
53 | @ delay 90us and set CPU PLL to lowest speed | 53 | @ delay 90us and set CPU PLL to lowest speed |
54 | @ fixes resume problem on high speed SA1110 | 54 | @ fixes resume problem on high speed SA1110 |
55 | mov r0, #90 | 55 | mov r0, #90 |
56 | bl __udelay | 56 | bl __loop_udelay |
57 | mov r1, #0 | 57 | mov r1, #0 |
58 | str r1, [r5] | 58 | str r1, [r5] |
59 | mov r0, #90 | 59 | mov r0, #90 |
60 | bl __udelay | 60 | bl __loop_udelay |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * SA1110 SDRAM controller workaround. register values: | 63 | * SA1110 SDRAM controller workaround. register values: |
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c index 6af26e8d55e6..80702c9ecc77 100644 --- a/arch/arm/mach-sa1100/time.c +++ b/arch/arm/mach-sa1100/time.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | static u32 notrace sa1100_read_sched_clock(void) | 23 | static u32 notrace sa1100_read_sched_clock(void) |
24 | { | 24 | { |
25 | return OSCR; | 25 | return readl_relaxed(OSCR); |
26 | } | 26 | } |
27 | 27 | ||
28 | #define MIN_OSCR_DELTA 2 | 28 | #define MIN_OSCR_DELTA 2 |
@@ -32,8 +32,8 @@ static irqreturn_t sa1100_ost0_interrupt(int irq, void *dev_id) | |||
32 | struct clock_event_device *c = dev_id; | 32 | struct clock_event_device *c = dev_id; |
33 | 33 | ||
34 | /* Disarm the compare/match, signal the event. */ | 34 | /* Disarm the compare/match, signal the event. */ |
35 | OIER &= ~OIER_E0; | 35 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); |
36 | OSSR = OSSR_M0; | 36 | writel_relaxed(OSSR_M0, OSSR); |
37 | c->event_handler(c); | 37 | c->event_handler(c); |
38 | 38 | ||
39 | return IRQ_HANDLED; | 39 | return IRQ_HANDLED; |
@@ -44,10 +44,10 @@ sa1100_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c) | |||
44 | { | 44 | { |
45 | unsigned long next, oscr; | 45 | unsigned long next, oscr; |
46 | 46 | ||
47 | OIER |= OIER_E0; | 47 | writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); |
48 | next = OSCR + delta; | 48 | next = readl_relaxed(OSCR) + delta; |
49 | OSMR0 = next; | 49 | writel_relaxed(next, OSMR0); |
50 | oscr = OSCR; | 50 | oscr = readl_relaxed(OSCR); |
51 | 51 | ||
52 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; | 52 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; |
53 | } | 53 | } |
@@ -59,8 +59,8 @@ sa1100_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c) | |||
59 | case CLOCK_EVT_MODE_ONESHOT: | 59 | case CLOCK_EVT_MODE_ONESHOT: |
60 | case CLOCK_EVT_MODE_UNUSED: | 60 | case CLOCK_EVT_MODE_UNUSED: |
61 | case CLOCK_EVT_MODE_SHUTDOWN: | 61 | case CLOCK_EVT_MODE_SHUTDOWN: |
62 | OIER &= ~OIER_E0; | 62 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); |
63 | OSSR = OSSR_M0; | 63 | writel_relaxed(OSSR_M0, OSSR); |
64 | break; | 64 | break; |
65 | 65 | ||
66 | case CLOCK_EVT_MODE_RESUME: | 66 | case CLOCK_EVT_MODE_RESUME: |
@@ -86,8 +86,8 @@ static struct irqaction sa1100_timer_irq = { | |||
86 | 86 | ||
87 | static void __init sa1100_timer_init(void) | 87 | static void __init sa1100_timer_init(void) |
88 | { | 88 | { |
89 | OIER = 0; | 89 | writel_relaxed(0, OIER); |
90 | OSSR = OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3; | 90 | writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); |
91 | 91 | ||
92 | setup_sched_clock(sa1100_read_sched_clock, 32, 3686400); | 92 | setup_sched_clock(sa1100_read_sched_clock, 32, 3686400); |
93 | 93 | ||
@@ -100,7 +100,7 @@ static void __init sa1100_timer_init(void) | |||
100 | 100 | ||
101 | setup_irq(IRQ_OST0, &sa1100_timer_irq); | 101 | setup_irq(IRQ_OST0, &sa1100_timer_irq); |
102 | 102 | ||
103 | clocksource_mmio_init(&OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, | 103 | clocksource_mmio_init(OSCR, "oscr", CLOCK_TICK_RATE, 200, 32, |
104 | clocksource_mmio_readl_up); | 104 | clocksource_mmio_readl_up); |
105 | clockevents_register_device(&ckevt_sa1100_osmr0); | 105 | clockevents_register_device(&ckevt_sa1100_osmr0); |
106 | } | 106 | } |
@@ -110,26 +110,26 @@ unsigned long osmr[4], oier; | |||
110 | 110 | ||
111 | static void sa1100_timer_suspend(void) | 111 | static void sa1100_timer_suspend(void) |
112 | { | 112 | { |
113 | osmr[0] = OSMR0; | 113 | osmr[0] = readl_relaxed(OSMR0); |
114 | osmr[1] = OSMR1; | 114 | osmr[1] = readl_relaxed(OSMR1); |
115 | osmr[2] = OSMR2; | 115 | osmr[2] = readl_relaxed(OSMR2); |
116 | osmr[3] = OSMR3; | 116 | osmr[3] = readl_relaxed(OSMR3); |
117 | oier = OIER; | 117 | oier = readl_relaxed(OIER); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void sa1100_timer_resume(void) | 120 | static void sa1100_timer_resume(void) |
121 | { | 121 | { |
122 | OSSR = 0x0f; | 122 | writel_relaxed(0x0f, OSSR); |
123 | OSMR0 = osmr[0]; | 123 | writel_relaxed(osmr[0], OSMR0); |
124 | OSMR1 = osmr[1]; | 124 | writel_relaxed(osmr[1], OSMR1); |
125 | OSMR2 = osmr[2]; | 125 | writel_relaxed(osmr[2], OSMR2); |
126 | OSMR3 = osmr[3]; | 126 | writel_relaxed(osmr[3], OSMR3); |
127 | OIER = oier; | 127 | writel_relaxed(oier, OIER); |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * OSMR0 is the system timer: make sure OSCR is sufficiently behind | 130 | * OSMR0 is the system timer: make sure OSCR is sufficiently behind |
131 | */ | 131 | */ |
132 | OSCR = OSMR0 - LATCH; | 132 | writel_relaxed(OSMR0 - LATCH, OSCR); |
133 | } | 133 | } |
134 | #else | 134 | #else |
135 | #define sa1100_timer_suspend NULL | 135 | #define sa1100_timer_suspend NULL |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 806cc4f63516..119bc52ab93e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | 15 | ||
16 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/thread_notify.h> | ||
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
18 | 19 | ||
19 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 20 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
@@ -48,6 +49,40 @@ void cpu_set_reserved_ttbr0(void) | |||
48 | } | 49 | } |
49 | #endif | 50 | #endif |
50 | 51 | ||
52 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
53 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | ||
54 | void *t) | ||
55 | { | ||
56 | u32 contextidr; | ||
57 | pid_t pid; | ||
58 | struct thread_info *thread = t; | ||
59 | |||
60 | if (cmd != THREAD_NOTIFY_SWITCH) | ||
61 | return NOTIFY_DONE; | ||
62 | |||
63 | pid = task_pid_nr(thread->task) << ASID_BITS; | ||
64 | asm volatile( | ||
65 | " mrc p15, 0, %0, c13, c0, 1\n" | ||
66 | " bfi %1, %0, #0, %2\n" | ||
67 | " mcr p15, 0, %1, c13, c0, 1\n" | ||
68 | : "=r" (contextidr), "+r" (pid) | ||
69 | : "I" (ASID_BITS)); | ||
70 | isb(); | ||
71 | |||
72 | return NOTIFY_OK; | ||
73 | } | ||
74 | |||
75 | static struct notifier_block contextidr_notifier_block = { | ||
76 | .notifier_call = contextidr_notifier, | ||
77 | }; | ||
78 | |||
79 | static int __init contextidr_notifier_init(void) | ||
80 | { | ||
81 | return thread_register_notifier(&contextidr_notifier_block); | ||
82 | } | ||
83 | arch_initcall(contextidr_notifier_init); | ||
84 | #endif | ||
85 | |||
51 | /* | 86 | /* |
52 | * We fork()ed a process, and we need a new context for the child | 87 | * We fork()ed a process, and we need a new context for the child |
53 | * to run in. | 88 | * to run in. |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 655878bcc96d..5cfc98994076 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -23,12 +23,12 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/iommu.h> | 24 | #include <linux/iommu.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/sizes.h> | ||
26 | 27 | ||
27 | #include <asm/memory.h> | 28 | #include <asm/memory.h> |
28 | #include <asm/highmem.h> | 29 | #include <asm/highmem.h> |
29 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
30 | #include <asm/tlbflush.h> | 31 | #include <asm/tlbflush.h> |
31 | #include <asm/sizes.h> | ||
32 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
33 | #include <asm/dma-iommu.h> | 33 | #include <asm/dma-iommu.h> |
34 | #include <asm/mach/map.h> | 34 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index f54d59219764..9aec41fa80ae 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -21,13 +21,13 @@ | |||
21 | #include <linux/gfp.h> | 21 | #include <linux/gfp.h> |
22 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
23 | #include <linux/dma-contiguous.h> | 23 | #include <linux/dma-contiguous.h> |
24 | #include <linux/sizes.h> | ||
24 | 25 | ||
25 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
26 | #include <asm/memblock.h> | 27 | #include <asm/memblock.h> |
27 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
28 | #include <asm/sections.h> | 29 | #include <asm/sections.h> |
29 | #include <asm/setup.h> | 30 | #include <asm/setup.h> |
30 | #include <asm/sizes.h> | ||
31 | #include <asm/tlb.h> | 31 | #include <asm/tlb.h> |
32 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
33 | 33 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 4f55f5062ab7..566750fa57d4 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/sizes.h> | ||
28 | 29 | ||
29 | #include <asm/cp15.h> | 30 | #include <asm/cp15.h> |
30 | #include <asm/cputype.h> | 31 | #include <asm/cputype.h> |
@@ -32,7 +33,6 @@ | |||
32 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
33 | #include <asm/pgalloc.h> | 34 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | 35 | #include <asm/tlbflush.h> |
35 | #include <asm/sizes.h> | ||
36 | #include <asm/system_info.h> | 36 | #include <asm/system_info.h> |
37 | 37 | ||
38 | #include <asm/mach/map.h> | 38 | #include <asm/mach/map.h> |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index cf4528d51774..4c2d0451e84a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -16,13 +16,13 @@ | |||
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | #include <linux/sizes.h> | ||
19 | 20 | ||
20 | #include <asm/cp15.h> | 21 | #include <asm/cp15.h> |
21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
22 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
23 | #include <asm/cachetype.h> | 24 | #include <asm/cachetype.h> |
24 | #include <asm/setup.h> | 25 | #include <asm/setup.h> |
25 | #include <asm/sizes.h> | ||
26 | #include <asm/smp_plat.h> | 26 | #include <asm/smp_plat.h> |
27 | #include <asm/tlb.h> | 27 | #include <asm/tlb.h> |
28 | #include <asm/highmem.h> | 28 | #include <asm/highmem.h> |
@@ -422,12 +422,6 @@ static void __init build_mem_type_table(void) | |||
422 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 422 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
423 | 423 | ||
424 | /* | 424 | /* |
425 | * Only use write-through for non-SMP systems | ||
426 | */ | ||
427 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
428 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
429 | |||
430 | /* | ||
431 | * Enable CPU-specific coherency if supported. | 425 | * Enable CPU-specific coherency if supported. |
432 | * (Only available on XSC3 at the moment.) | 426 | * (Only available on XSC3 at the moment.) |
433 | */ | 427 | */ |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 5900cd520e84..86b8b480634f 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -107,6 +107,12 @@ ENTRY(cpu_v6_switch_mm) | |||
107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
108 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer | 108 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
109 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 109 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
110 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
111 | mrc p15, 0, r2, c13, c0, 1 @ read current context ID | ||
112 | bic r2, r2, #0xff @ extract the PID | ||
113 | and r1, r1, #0xff | ||
114 | orr r1, r1, r2 @ insert into new context ID | ||
115 | #endif | ||
110 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | 116 | mcr p15, 0, r1, c13, c0, 1 @ set context ID |
111 | #endif | 117 | #endif |
112 | mov pc, lr | 118 | mov pc, lr |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 42ac069c8012..fd045e706390 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -46,6 +46,11 @@ ENTRY(cpu_v7_switch_mm) | |||
46 | #ifdef CONFIG_ARM_ERRATA_430973 | 46 | #ifdef CONFIG_ARM_ERRATA_430973 |
47 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 47 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
48 | #endif | 48 | #endif |
49 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
50 | mrc p15, 0, r2, c13, c0, 1 @ read current context ID | ||
51 | lsr r2, r2, #8 @ extract the PID | ||
52 | bfi r1, r2, #8, #24 @ insert into new context ID | ||
53 | #endif | ||
49 | #ifdef CONFIG_ARM_ERRATA_754322 | 54 | #ifdef CONFIG_ARM_ERRATA_754322 |
50 | dsb | 55 | dsb |
51 | #endif | 56 | #endif |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 4e0a371630b3..99c63d4b6af8 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -23,26 +23,37 @@ | |||
23 | #include <asm/ptrace.h> | 23 | #include <asm/ptrace.h> |
24 | 24 | ||
25 | #ifdef CONFIG_HW_PERF_EVENTS | 25 | #ifdef CONFIG_HW_PERF_EVENTS |
26 | |||
27 | /* | ||
28 | * OProfile has a curious naming scheme for the ARM PMUs, but they are | ||
29 | * part of the user ABI so we need to map from the perf PMU name for | ||
30 | * supported PMUs. | ||
31 | */ | ||
32 | static struct op_perf_name { | ||
33 | char *perf_name; | ||
34 | char *op_name; | ||
35 | } op_perf_name_map[] = { | ||
36 | { "xscale1", "arm/xscale1" }, | ||
37 | { "xscale1", "arm/xscale2" }, | ||
38 | { "v6", "arm/armv6" }, | ||
39 | { "v6mpcore", "arm/mpcore" }, | ||
40 | { "ARMv7 Cortex-A8", "arm/armv7" }, | ||
41 | { "ARMv7 Cortex-A9", "arm/armv7-ca9" }, | ||
42 | }; | ||
43 | |||
26 | char *op_name_from_perf_id(void) | 44 | char *op_name_from_perf_id(void) |
27 | { | 45 | { |
28 | enum arm_perf_pmu_ids id = armpmu_get_pmu_id(); | 46 | int i; |
29 | 47 | struct op_perf_name names; | |
30 | switch (id) { | 48 | const char *perf_name = perf_pmu_name(); |
31 | case ARM_PERF_PMU_ID_XSCALE1: | 49 | |
32 | return "arm/xscale1"; | 50 | for (i = 0; i < ARRAY_SIZE(op_perf_name_map); ++i) { |
33 | case ARM_PERF_PMU_ID_XSCALE2: | 51 | names = op_perf_name_map[i]; |
34 | return "arm/xscale2"; | 52 | if (!strcmp(names.perf_name, perf_name)) |
35 | case ARM_PERF_PMU_ID_V6: | 53 | return names.op_name; |
36 | return "arm/armv6"; | ||
37 | case ARM_PERF_PMU_ID_V6MP: | ||
38 | return "arm/mpcore"; | ||
39 | case ARM_PERF_PMU_ID_CA8: | ||
40 | return "arm/armv7"; | ||
41 | case ARM_PERF_PMU_ID_CA9: | ||
42 | return "arm/armv7-ca9"; | ||
43 | default: | ||
44 | return NULL; | ||
45 | } | 54 | } |
55 | |||
56 | return NULL; | ||
46 | } | 57 | } |
47 | #endif | 58 | #endif |
48 | 59 | ||
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index 49c7db48c7f1..d7c5c171f5aa 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c | |||
@@ -85,7 +85,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
85 | * the boot monitor to read the system wide flags register, | 85 | * the boot monitor to read the system wide flags register, |
86 | * and branch to the address found there. | 86 | * and branch to the address found there. |
87 | */ | 87 | */ |
88 | gic_raise_softirq(cpumask_of(cpu), 1); | 88 | gic_raise_softirq(cpumask_of(cpu), 0); |
89 | 89 | ||
90 | timeout = jiffies + (1 * HZ); | 90 | timeout = jiffies + (1 * HZ); |
91 | while (time_before(jiffies, timeout)) { | 91 | while (time_before(jiffies, timeout)) { |
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index b7e728517284..e8eb91bd0d28 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c | |||
@@ -16,9 +16,9 @@ | |||
16 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
18 | #include <linux/amba/bus.h> | 18 | #include <linux/amba/bus.h> |
19 | #include <linux/sizes.h> | ||
19 | 20 | ||
20 | #include <asm/irq.h> | 21 | #include <asm/irq.h> |
21 | #include <asm/sizes.h> | ||
22 | 22 | ||
23 | #define to_amba_driver(d) container_of(d, struct amba_driver, drv) | 23 | #define to_amba_driver(d) container_of(d, struct amba_driver, drv) |
24 | 24 | ||
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c index d9be6eac99b1..7f03d1bd916e 100644 --- a/drivers/input/touchscreen/jornada720_ts.c +++ b/drivers/input/touchscreen/jornada720_ts.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/io.h> | ||
22 | 23 | ||
23 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
24 | #include <mach/jornada720.h> | 25 | #include <mach/jornada720.h> |
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index ff16daf33ae1..8d5476707912 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c | |||
@@ -289,7 +289,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) | |||
289 | } | 289 | } |
290 | lsr = STLSR; | 290 | lsr = STLSR; |
291 | } | 291 | } |
292 | si->last_oscr = OSCR; | 292 | si->last_oscr = readl_relaxed(OSCR); |
293 | break; | 293 | break; |
294 | 294 | ||
295 | case 0x04: /* Received Data Available */ | 295 | case 0x04: /* Received Data Available */ |
@@ -300,7 +300,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) | |||
300 | dev->stats.rx_bytes++; | 300 | dev->stats.rx_bytes++; |
301 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); | 301 | async_unwrap_char(dev, &dev->stats, &si->rx_buff, STRBR); |
302 | } while (STLSR & LSR_DR); | 302 | } while (STLSR & LSR_DR); |
303 | si->last_oscr = OSCR; | 303 | si->last_oscr = readl_relaxed(OSCR); |
304 | break; | 304 | break; |
305 | 305 | ||
306 | case 0x02: /* Transmit FIFO Data Request */ | 306 | case 0x02: /* Transmit FIFO Data Request */ |
@@ -316,7 +316,7 @@ static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id) | |||
316 | /* We need to ensure that the transmitter has finished. */ | 316 | /* We need to ensure that the transmitter has finished. */ |
317 | while ((STLSR & LSR_TEMT) == 0) | 317 | while ((STLSR & LSR_TEMT) == 0) |
318 | cpu_relax(); | 318 | cpu_relax(); |
319 | si->last_oscr = OSCR; | 319 | si->last_oscr = readl_relaxed(OSCR); |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Ok, we've finished transmitting. Now enable | 322 | * Ok, we've finished transmitting. Now enable |
@@ -370,7 +370,7 @@ static void pxa_irda_fir_dma_tx_irq(int channel, void *data) | |||
370 | 370 | ||
371 | while (ICSR1 & ICSR1_TBY) | 371 | while (ICSR1 & ICSR1_TBY) |
372 | cpu_relax(); | 372 | cpu_relax(); |
373 | si->last_oscr = OSCR; | 373 | si->last_oscr = readl_relaxed(OSCR); |
374 | 374 | ||
375 | /* | 375 | /* |
376 | * HACK: It looks like the TBY bit is dropped too soon. | 376 | * HACK: It looks like the TBY bit is dropped too soon. |
@@ -470,7 +470,7 @@ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id) | |||
470 | 470 | ||
471 | /* stop RX DMA */ | 471 | /* stop RX DMA */ |
472 | DCSR(si->rxdma) &= ~DCSR_RUN; | 472 | DCSR(si->rxdma) &= ~DCSR_RUN; |
473 | si->last_oscr = OSCR; | 473 | si->last_oscr = readl_relaxed(OSCR); |
474 | icsr0 = ICSR0; | 474 | icsr0 = ICSR0; |
475 | 475 | ||
476 | if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { | 476 | if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) { |
@@ -546,7 +546,7 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev) | |||
546 | skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); | 546 | skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len); |
547 | 547 | ||
548 | if (mtt) | 548 | if (mtt) |
549 | while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) | 549 | while ((unsigned)(readl_relaxed(OSCR) - si->last_oscr)/4 < mtt) |
550 | cpu_relax(); | 550 | cpu_relax(); |
551 | 551 | ||
552 | /* stop RX DMA, disable FICP */ | 552 | /* stop RX DMA, disable FICP */ |
diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c index decb34730bcf..56ab73915602 100644 --- a/drivers/pcmcia/sa1100_shannon.c +++ b/drivers/pcmcia/sa1100_shannon.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/io.h> | ||
11 | 12 | ||
12 | #include <mach/hardware.h> | 13 | #include <mach/hardware.h> |
13 | #include <asm/mach-types.h> | 14 | #include <asm/mach-types.h> |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index c17923ec6e95..d3553b5d3fca 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <linux/delay.h> | 53 | #include <linux/delay.h> |
54 | #include <linux/types.h> | 54 | #include <linux/types.h> |
55 | #include <linux/pinctrl/consumer.h> | 55 | #include <linux/pinctrl/consumer.h> |
56 | #include <linux/sizes.h> | ||
56 | 57 | ||
57 | #include <asm/io.h> | 58 | #include <asm/io.h> |
58 | #include <asm/sizes.h> | ||
59 | 59 | ||
60 | #define UART_NR 14 | 60 | #define UART_NR 14 |
61 | 61 | ||
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index 54984deb8561..ccd6b29e21bf 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c | |||
@@ -54,10 +54,10 @@ static int sa1100dog_open(struct inode *inode, struct file *file) | |||
54 | return -EBUSY; | 54 | return -EBUSY; |
55 | 55 | ||
56 | /* Activate SA1100 Watchdog timer */ | 56 | /* Activate SA1100 Watchdog timer */ |
57 | OSMR3 = OSCR + pre_margin; | 57 | writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); |
58 | OSSR = OSSR_M3; | 58 | writel_relaxed(OSSR_M3, OSSR); |
59 | OWER = OWER_WME; | 59 | writel_relaxed(OWER_WME, OWER); |
60 | OIER |= OIER_E3; | 60 | writel_relaxed(readl_relaxed(OIER) | OIER_E3, OIER); |
61 | return nonseekable_open(inode, file); | 61 | return nonseekable_open(inode, file); |
62 | } | 62 | } |
63 | 63 | ||
@@ -80,7 +80,7 @@ static ssize_t sa1100dog_write(struct file *file, const char __user *data, | |||
80 | { | 80 | { |
81 | if (len) | 81 | if (len) |
82 | /* Refresh OSMR3 timer. */ | 82 | /* Refresh OSMR3 timer. */ |
83 | OSMR3 = OSCR + pre_margin; | 83 | writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); |
84 | return len; | 84 | return len; |
85 | } | 85 | } |
86 | 86 | ||
@@ -114,7 +114,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd, | |||
114 | break; | 114 | break; |
115 | 115 | ||
116 | case WDIOC_KEEPALIVE: | 116 | case WDIOC_KEEPALIVE: |
117 | OSMR3 = OSCR + pre_margin; | 117 | writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); |
118 | ret = 0; | 118 | ret = 0; |
119 | break; | 119 | break; |
120 | 120 | ||
@@ -129,7 +129,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd, | |||
129 | } | 129 | } |
130 | 130 | ||
131 | pre_margin = oscr_freq * time; | 131 | pre_margin = oscr_freq * time; |
132 | OSMR3 = OSCR + pre_margin; | 132 | writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); |
133 | /*fall through*/ | 133 | /*fall through*/ |
134 | 134 | ||
135 | case WDIOC_GETTIMEOUT: | 135 | case WDIOC_GETTIMEOUT: |
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h index ea5d4ef81061..1dcfad9629ef 100644 --- a/include/asm-generic/sizes.h +++ b/include/asm-generic/sizes.h | |||
@@ -1,47 +1,2 @@ | |||
1 | /* | 1 | /* This is a placeholder, to be removed over time */ |
2 | * linux/include/asm-generic/sizes.h | 2 | #include <linux/sizes.h> |
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef __ASM_GENERIC_SIZES_H__ | ||
9 | #define __ASM_GENERIC_SIZES_H__ | ||
10 | |||
11 | #define SZ_1 0x00000001 | ||
12 | #define SZ_2 0x00000002 | ||
13 | #define SZ_4 0x00000004 | ||
14 | #define SZ_8 0x00000008 | ||
15 | #define SZ_16 0x00000010 | ||
16 | #define SZ_32 0x00000020 | ||
17 | #define SZ_64 0x00000040 | ||
18 | #define SZ_128 0x00000080 | ||
19 | #define SZ_256 0x00000100 | ||
20 | #define SZ_512 0x00000200 | ||
21 | |||
22 | #define SZ_1K 0x00000400 | ||
23 | #define SZ_2K 0x00000800 | ||
24 | #define SZ_4K 0x00001000 | ||
25 | #define SZ_8K 0x00002000 | ||
26 | #define SZ_16K 0x00004000 | ||
27 | #define SZ_32K 0x00008000 | ||
28 | #define SZ_64K 0x00010000 | ||
29 | #define SZ_128K 0x00020000 | ||
30 | #define SZ_256K 0x00040000 | ||
31 | #define SZ_512K 0x00080000 | ||
32 | |||
33 | #define SZ_1M 0x00100000 | ||
34 | #define SZ_2M 0x00200000 | ||
35 | #define SZ_4M 0x00400000 | ||
36 | #define SZ_8M 0x00800000 | ||
37 | #define SZ_16M 0x01000000 | ||
38 | #define SZ_32M 0x02000000 | ||
39 | #define SZ_64M 0x04000000 | ||
40 | #define SZ_128M 0x08000000 | ||
41 | #define SZ_256M 0x10000000 | ||
42 | #define SZ_512M 0x20000000 | ||
43 | |||
44 | #define SZ_1G 0x40000000 | ||
45 | #define SZ_2G 0x80000000 | ||
46 | |||
47 | #endif /* __ASM_GENERIC_SIZES_H__ */ | ||
diff --git a/include/linux/sizes.h b/include/linux/sizes.h new file mode 100644 index 000000000000..ce3e8150c174 --- /dev/null +++ b/include/linux/sizes.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * include/linux/sizes.h | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #ifndef __LINUX_SIZES_H__ | ||
9 | #define __LINUX_SIZES_H__ | ||
10 | |||
11 | #define SZ_1 0x00000001 | ||
12 | #define SZ_2 0x00000002 | ||
13 | #define SZ_4 0x00000004 | ||
14 | #define SZ_8 0x00000008 | ||
15 | #define SZ_16 0x00000010 | ||
16 | #define SZ_32 0x00000020 | ||
17 | #define SZ_64 0x00000040 | ||
18 | #define SZ_128 0x00000080 | ||
19 | #define SZ_256 0x00000100 | ||
20 | #define SZ_512 0x00000200 | ||
21 | |||
22 | #define SZ_1K 0x00000400 | ||
23 | #define SZ_2K 0x00000800 | ||
24 | #define SZ_4K 0x00001000 | ||
25 | #define SZ_8K 0x00002000 | ||
26 | #define SZ_16K 0x00004000 | ||
27 | #define SZ_32K 0x00008000 | ||
28 | #define SZ_64K 0x00010000 | ||
29 | #define SZ_128K 0x00020000 | ||
30 | #define SZ_256K 0x00040000 | ||
31 | #define SZ_512K 0x00080000 | ||
32 | |||
33 | #define SZ_1M 0x00100000 | ||
34 | #define SZ_2M 0x00200000 | ||
35 | #define SZ_4M 0x00400000 | ||
36 | #define SZ_8M 0x00800000 | ||
37 | #define SZ_16M 0x01000000 | ||
38 | #define SZ_32M 0x02000000 | ||
39 | #define SZ_64M 0x04000000 | ||
40 | #define SZ_128M 0x08000000 | ||
41 | #define SZ_256M 0x10000000 | ||
42 | #define SZ_512M 0x20000000 | ||
43 | |||
44 | #define SZ_1G 0x40000000 | ||
45 | #define SZ_2G 0x80000000 | ||
46 | |||
47 | #endif /* __LINUX_SIZES_H__ */ | ||
diff --git a/init/Kconfig b/init/Kconfig index d07dcf9fc8a9..b3f55f15e107 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -357,7 +357,7 @@ config AUDIT | |||
357 | 357 | ||
358 | config AUDITSYSCALL | 358 | config AUDITSYSCALL |
359 | bool "Enable system-call auditing support" | 359 | bool "Enable system-call auditing support" |
360 | depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM) | 360 | depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT)) |
361 | default y if SECURITY_SELINUX | 361 | default y if SECURITY_SELINUX |
362 | help | 362 | help |
363 | Enable low-overhead system-call auditing infrastructure that | 363 | Enable low-overhead system-call auditing infrastructure that |