diff options
63 files changed, 602 insertions, 458 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -4,6 +4,7 @@ config MIPS | |||
4 | select HAVE_GENERIC_DMA_COHERENT | 4 | select HAVE_GENERIC_DMA_COHERENT |
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_IRQ_WORK | ||
7 | select HAVE_PERF_EVENTS | 8 | select HAVE_PERF_EVENTS |
8 | select PERF_USE_VMALLOC | 9 | select PERF_USE_VMALLOC |
9 | select HAVE_ARCH_KGDB | 10 | select HAVE_ARCH_KGDB |
@@ -208,6 +209,7 @@ config MACH_JZ4740 | |||
208 | select ARCH_REQUIRE_GPIOLIB | 209 | select ARCH_REQUIRE_GPIOLIB |
209 | select SYS_HAS_EARLY_PRINTK | 210 | select SYS_HAS_EARLY_PRINTK |
210 | select HAVE_PWM | 211 | select HAVE_PWM |
212 | select HAVE_CLK | ||
211 | 213 | ||
212 | config LASAT | 214 | config LASAT |
213 | bool "LASAT Networks platforms" | 215 | bool "LASAT Networks platforms" |
@@ -333,6 +335,8 @@ config PNX8550_STB810 | |||
333 | config PMC_MSP | 335 | config PMC_MSP |
334 | bool "PMC-Sierra MSP chipsets" | 336 | bool "PMC-Sierra MSP chipsets" |
335 | depends on EXPERIMENTAL | 337 | depends on EXPERIMENTAL |
338 | select CEVT_R4K | ||
339 | select CSRC_R4K | ||
336 | select DMA_NONCOHERENT | 340 | select DMA_NONCOHERENT |
337 | select SWAP_IO_SPACE | 341 | select SWAP_IO_SPACE |
338 | select NO_EXCEPT_FILL | 342 | select NO_EXCEPT_FILL |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); | |||
54 | 54 | ||
55 | static void mtx1_reset(char *c) | 55 | static void mtx1_reset(char *c) |
56 | { | 56 | { |
57 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 57 | /* Jump to the reset vector */ |
58 | au_writel(0x00000000, 0xAE00001C); | 58 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void mtx1_power_off(void) | 61 | static void mtx1_power_off(void) |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/mtd/physmap.h> | 28 | #include <linux/mtd/physmap.h> |
29 | #include <mtd/mtd-abi.h> | 29 | #include <mtd/mtd-abi.h> |
30 | 30 | ||
31 | #include <asm/mach-au1x00/au1xxx_eth.h> | ||
32 | |||
31 | static struct gpio_keys_button mtx1_gpio_button[] = { | 33 | static struct gpio_keys_button mtx1_gpio_button[] = { |
32 | { | 34 | { |
33 | .gpio = 207, | 35 | .gpio = 207, |
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { | |||
140 | &mtx1_mtd, | 142 | &mtx1_mtd, |
141 | }; | 143 | }; |
142 | 144 | ||
145 | static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { | ||
146 | .phy_search_highest_addr = 1, | ||
147 | .phy1_search_mac0 = 1, | ||
148 | }; | ||
149 | |||
143 | static int __init mtx1_register_devices(void) | 150 | static int __init mtx1_register_devices(void) |
144 | { | 151 | { |
145 | int rc; | 152 | int rc; |
146 | 153 | ||
154 | au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); | ||
155 | |||
147 | rc = gpio_request(mtx1_gpio_button[0].gpio, | 156 | rc = gpio_request(mtx1_gpio_button[0].gpio, |
148 | mtx1_gpio_button[0].desc); | 157 | mtx1_gpio_button[0].desc); |
149 | if (rc < 0) { | 158 | if (rc < 0) { |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -36,8 +36,8 @@ | |||
36 | 36 | ||
37 | static void xxs1500_reset(char *c) | 37 | static void xxs1500_reset(char *c) |
38 | { | 38 | { |
39 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 39 | /* Jump to the reset vector */ |
40 | au_writel(0x00000000, 0xAE00001C); | 40 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void xxs1500_power_off(void) | 43 | static void xxs1500_power_off(void) |
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h | |||
@@ -11,15 +11,5 @@ | |||
11 | 11 | ||
12 | #ifndef __MIPS_PERF_EVENT_H__ | 12 | #ifndef __MIPS_PERF_EVENT_H__ |
13 | #define __MIPS_PERF_EVENT_H__ | 13 | #define __MIPS_PERF_EVENT_H__ |
14 | 14 | /* Leave it empty here. The file is required by linux/perf_event.h */ | |
15 | /* | ||
16 | * MIPS performance counters do not raise NMI upon overflow, a regular | ||
17 | * interrupt will be signaled. Hence we can do the pending perf event | ||
18 | * work at the tail of the irq handler. | ||
19 | */ | ||
20 | static inline void | ||
21 | set_perf_event_pending(void) | ||
22 | { | ||
23 | } | ||
24 | |||
25 | #endif /* __MIPS_PERF_EVENT_H__ */ | 15 | #endif /* __MIPS_PERF_EVENT_H__ */ |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -17,29 +17,13 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/uasm.h> | 18 | #include <asm/uasm.h> |
19 | 19 | ||
20 | /* | 20 | #include <asm-generic/sections.h> |
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
35 | 21 | ||
36 | #ifdef CONFIG_DYNAMIC_FTRACE | 22 | #ifdef CONFIG_DYNAMIC_FTRACE |
37 | 23 | ||
38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
40 | 26 | ||
41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ | ||
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | 27 | #define INSN_NOP 0x00000000 /* nop */ |
44 | #define INSN_JAL(addr) \ | 28 | #define INSN_JAL(addr) \ |
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | 29 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) |
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
69 | #endif | 53 | #endif |
70 | } | 54 | } |
71 | 55 | ||
56 | /* | ||
57 | * Check if the address is in kernel space | ||
58 | * | ||
59 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
60 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
61 | */ | ||
62 | static inline int in_kernel_space(unsigned long ip) | ||
63 | { | ||
64 | if (ip >= (unsigned long)_stext && | ||
65 | ip <= (unsigned long)_etext) | ||
66 | return 1; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 70 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
73 | { | 71 | { |
74 | int faulted; | 72 | int faulted; |
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
84 | return 0; | 82 | return 0; |
85 | } | 83 | } |
86 | 84 | ||
85 | /* | ||
86 | * The details about the calling site of mcount on MIPS | ||
87 | * | ||
88 | * 1. For kernel: | ||
89 | * | ||
90 | * move at, ra | ||
91 | * jal _mcount --> nop | ||
92 | * | ||
93 | * 2. For modules: | ||
94 | * | ||
95 | * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT | ||
96 | * | ||
97 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
98 | * addiu v1, v1, low_16bit_of_mcount | ||
99 | * move at, ra | ||
100 | * move $12, ra_address | ||
101 | * jalr v1 | ||
102 | * sub sp, sp, 8 | ||
103 | * 1: offset = 5 instructions | ||
104 | * 2.2 For the Other situations | ||
105 | * | ||
106 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
107 | * addiu v1, v1, low_16bit_of_mcount | ||
108 | * move at, ra | ||
109 | * jalr v1 | ||
110 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
111 | * 1: offset = 4 instructions | ||
112 | */ | ||
113 | |||
114 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
115 | #define MCOUNT_OFFSET_INSNS 5 | ||
116 | #else | ||
117 | #define MCOUNT_OFFSET_INSNS 4 | ||
118 | #endif | ||
119 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | ||
120 | |||
87 | int ftrace_make_nop(struct module *mod, | 121 | int ftrace_make_nop(struct module *mod, |
88 | struct dyn_ftrace *rec, unsigned long addr) | 122 | struct dyn_ftrace *rec, unsigned long addr) |
89 | { | 123 | { |
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, | |||
91 | unsigned long ip = rec->ip; | 125 | unsigned long ip = rec->ip; |
92 | 126 | ||
93 | /* | 127 | /* |
94 | * We have compiled module with -mlong-calls, but compiled the kernel | 128 | * If ip is in kernel space, no long call, otherwise, long call is |
95 | * without it, we need to cope with them respectively. | 129 | * needed. |
96 | */ | 130 | */ |
97 | if (in_module(ip)) { | 131 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; |
98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | 132 | |
99 | /* | ||
100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
101 | * addiu v1, v1, low_16bit_of_mcount | ||
102 | * move at, ra | ||
103 | * move $12, ra_address | ||
104 | * jalr v1 | ||
105 | * sub sp, sp, 8 | ||
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
112 | * addiu v1, v1, low_16bit_of_mcount | ||
113 | * move at, ra | ||
114 | * jalr v1 | ||
115 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
116 | * 1: offset = 4 instructions | ||
117 | */ | ||
118 | new = INSN_B_1F_4; | ||
119 | #endif | ||
120 | } else { | ||
121 | /* | ||
122 | * move at, ra | ||
123 | * jal _mcount --> nop | ||
124 | */ | ||
125 | new = INSN_NOP; | ||
126 | } | ||
127 | return ftrace_modify_code(ip, new); | 133 | return ftrace_modify_code(ip, new); |
128 | } | 134 | } |
129 | 135 | ||
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
132 | unsigned int new; | 138 | unsigned int new; |
133 | unsigned long ip = rec->ip; | 139 | unsigned long ip = rec->ip; |
134 | 140 | ||
135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 141 | new = in_kernel_space(ip) ? insn_jal_ftrace_caller : |
136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; | 142 | insn_lui_v1_hi16_mcount; |
137 | 143 | ||
138 | return ftrace_modify_code(ip, new); | 144 | return ftrace_modify_code(ip, new); |
139 | } | 145 | } |
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 196 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 197 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
192 | 198 | ||
193 | unsigned long ftrace_get_parent_addr(unsigned long self_addr, | 199 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long |
194 | unsigned long parent, | 200 | old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) |
195 | unsigned long parent_addr, | ||
196 | unsigned long fp) | ||
197 | { | 201 | { |
198 | unsigned long sp, ip, ra; | 202 | unsigned long sp, ip, tmp; |
199 | unsigned int code; | 203 | unsigned int code; |
200 | int faulted; | 204 | int faulted; |
201 | 205 | ||
202 | /* | 206 | /* |
203 | * For module, move the ip from calling site of mcount to the | 207 | * For module, move the ip from the return address after the |
204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for | 208 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for |
205 | * kernel, move to the instruction "move ra, at"(offset is 12) | 209 | * kernel, move after the instruction "move ra, at"(offset is 16) |
206 | */ | 210 | */ |
207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); | 211 | ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); |
208 | 212 | ||
209 | /* | 213 | /* |
210 | * search the text until finding the non-store instruction or "s{d,w} | 214 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | 215 | * ra, offset(sp)" instruction |
212 | */ | 216 | */ |
213 | do { | 217 | do { |
214 | ip -= 4; | ||
215 | |||
216 | /* get the code at "ip": code = *(unsigned int *)ip; */ | 218 | /* get the code at "ip": code = *(unsigned int *)ip; */ |
217 | safe_load_code(code, ip, faulted); | 219 | safe_load_code(code, ip, faulted); |
218 | 220 | ||
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
224 | * store the ra on the stack | 226 | * store the ra on the stack |
225 | */ | 227 | */ |
226 | if ((code & S_R_SP) != S_R_SP) | 228 | if ((code & S_R_SP) != S_R_SP) |
227 | return parent_addr; | 229 | return parent_ra_addr; |
228 | 230 | ||
229 | } while (((code & S_RA_SP) != S_RA_SP)); | 231 | /* Move to the next instruction */ |
232 | ip -= 4; | ||
233 | } while ((code & S_RA_SP) != S_RA_SP); | ||
230 | 234 | ||
231 | sp = fp + (code & OFFSET_MASK); | 235 | sp = fp + (code & OFFSET_MASK); |
232 | 236 | ||
233 | /* ra = *(unsigned long *)sp; */ | 237 | /* tmp = *(unsigned long *)sp; */ |
234 | safe_load_stack(ra, sp, faulted); | 238 | safe_load_stack(tmp, sp, faulted); |
235 | if (unlikely(faulted)) | 239 | if (unlikely(faulted)) |
236 | return 0; | 240 | return 0; |
237 | 241 | ||
238 | if (ra == parent) | 242 | if (tmp == old_parent_ra) |
239 | return sp; | 243 | return sp; |
240 | return 0; | 244 | return 0; |
241 | } | 245 | } |
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
246 | * Hook the return address and push it in the stack of return addrs | 250 | * Hook the return address and push it in the stack of return addrs |
247 | * in current thread info. | 251 | * in current thread info. |
248 | */ | 252 | */ |
249 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | 253 | void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, |
250 | unsigned long fp) | 254 | unsigned long fp) |
251 | { | 255 | { |
252 | unsigned long old; | 256 | unsigned long old_parent_ra; |
253 | struct ftrace_graph_ent trace; | 257 | struct ftrace_graph_ent trace; |
254 | unsigned long return_hooker = (unsigned long) | 258 | unsigned long return_hooker = (unsigned long) |
255 | &return_to_handler; | 259 | &return_to_handler; |
256 | int faulted; | 260 | int faulted, insns; |
257 | 261 | ||
258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 262 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
259 | return; | 263 | return; |
260 | 264 | ||
261 | /* | 265 | /* |
262 | * "parent" is the stack address saved the return address of the caller | 266 | * "parent_ra_addr" is the stack address saved the return address of |
263 | * of _mcount. | 267 | * the caller of _mcount. |
264 | * | 268 | * |
265 | * if the gcc < 4.5, a leaf function does not save the return address | 269 | * if the gcc < 4.5, a leaf function does not save the return address |
266 | * in the stack address, so, we "emulate" one in _mcount's stack space, | 270 | * in the stack address, so, we "emulate" one in _mcount's stack space, |
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
275 | * do it in ftrace_graph_caller of mcount.S. | 279 | * do it in ftrace_graph_caller of mcount.S. |
276 | */ | 280 | */ |
277 | 281 | ||
278 | /* old = *parent; */ | 282 | /* old_parent_ra = *parent_ra_addr; */ |
279 | safe_load_stack(old, parent, faulted); | 283 | safe_load_stack(old_parent_ra, parent_ra_addr, faulted); |
280 | if (unlikely(faulted)) | 284 | if (unlikely(faulted)) |
281 | goto out; | 285 | goto out; |
282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 286 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 287 | parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, |
284 | (unsigned long)parent, fp); | 288 | old_parent_ra, (unsigned long)parent_ra_addr, fp); |
285 | /* | 289 | /* |
286 | * If fails when getting the stack address of the non-leaf function's | 290 | * If fails when getting the stack address of the non-leaf function's |
287 | * ra, stop function graph tracer and return | 291 | * ra, stop function graph tracer and return |
288 | */ | 292 | */ |
289 | if (parent == 0) | 293 | if (parent_ra_addr == 0) |
290 | goto out; | 294 | goto out; |
291 | #endif | 295 | #endif |
292 | /* *parent = return_hooker; */ | 296 | /* *parent_ra_addr = return_hooker; */ |
293 | safe_store_stack(return_hooker, parent, faulted); | 297 | safe_store_stack(return_hooker, parent_ra_addr, faulted); |
294 | if (unlikely(faulted)) | 298 | if (unlikely(faulted)) |
295 | goto out; | 299 | goto out; |
296 | 300 | ||
297 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == | 301 | if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) |
298 | -EBUSY) { | 302 | == -EBUSY) { |
299 | *parent = old; | 303 | *parent_ra_addr = old_parent_ra; |
300 | return; | 304 | return; |
301 | } | 305 | } |
302 | 306 | ||
303 | trace.func = self_addr; | 307 | /* |
308 | * Get the recorded ip of the current mcount calling site in the | ||
309 | * __mcount_loc section, which will be used to filter the function | ||
310 | * entries configured through the tracing/set_graph_function interface. | ||
311 | */ | ||
312 | |||
313 | insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | ||
314 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | ||
304 | 315 | ||
305 | /* Only trace if the calling function expects to */ | 316 | /* Only trace if the calling function expects to */ |
306 | if (!ftrace_graph_entry(&trace)) { | 317 | if (!ftrace_graph_entry(&trace)) { |
307 | current->curr_ret_stack--; | 318 | current->curr_ret_stack--; |
308 | *parent = old; | 319 | *parent_ra_addr = old_parent_ra; |
309 | } | 320 | } |
310 | return; | 321 | return; |
311 | out: | 322 | out: |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int mipspmu_enable(struct perf_event *event) | ||
165 | { | ||
166 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
167 | struct hw_perf_event *hwc = &event->hw; | ||
168 | int idx; | ||
169 | int err = 0; | ||
170 | |||
171 | /* To look for a free counter for this event. */ | ||
172 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
173 | if (idx < 0) { | ||
174 | err = idx; | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * If there is an event in the counter we are going to use then | ||
180 | * make sure it is disabled. | ||
181 | */ | ||
182 | event->hw.idx = idx; | ||
183 | mipspmu->disable_event(idx); | ||
184 | cpuc->events[idx] = event; | ||
185 | |||
186 | /* Set the period for the event. */ | ||
187 | mipspmu_event_set_period(event, hwc, idx); | ||
188 | |||
189 | /* Enable the event. */ | ||
190 | mipspmu->enable_event(hwc, idx); | ||
191 | |||
192 | /* Propagate our changes to the userspace mapping. */ | ||
193 | perf_event_update_userpage(event); | ||
194 | |||
195 | out: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static void mipspmu_event_update(struct perf_event *event, | 164 | static void mipspmu_event_update(struct perf_event *event, |
200 | struct hw_perf_event *hwc, | 165 | struct hw_perf_event *hwc, |
201 | int idx) | 166 | int idx) |
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, | |||
204 | unsigned long flags; | 169 | unsigned long flags; |
205 | int shift = 64 - TOTAL_BITS; | 170 | int shift = 64 - TOTAL_BITS; |
206 | s64 prev_raw_count, new_raw_count; | 171 | s64 prev_raw_count, new_raw_count; |
207 | s64 delta; | 172 | u64 delta; |
208 | 173 | ||
209 | again: | 174 | again: |
210 | prev_raw_count = local64_read(&hwc->prev_count); | 175 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -231,32 +196,90 @@ again: | |||
231 | return; | 196 | return; |
232 | } | 197 | } |
233 | 198 | ||
234 | static void mipspmu_disable(struct perf_event *event) | 199 | static void mipspmu_start(struct perf_event *event, int flags) |
200 | { | ||
201 | struct hw_perf_event *hwc = &event->hw; | ||
202 | |||
203 | if (!mipspmu) | ||
204 | return; | ||
205 | |||
206 | if (flags & PERF_EF_RELOAD) | ||
207 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
208 | |||
209 | hwc->state = 0; | ||
210 | |||
211 | /* Set the period for the event. */ | ||
212 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
213 | |||
214 | /* Enable the event. */ | ||
215 | mipspmu->enable_event(hwc, hwc->idx); | ||
216 | } | ||
217 | |||
218 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
219 | { | ||
220 | struct hw_perf_event *hwc = &event->hw; | ||
221 | |||
222 | if (!mipspmu) | ||
223 | return; | ||
224 | |||
225 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
226 | /* We are working on a local event. */ | ||
227 | mipspmu->disable_event(hwc->idx); | ||
228 | barrier(); | ||
229 | mipspmu_event_update(event, hwc, hwc->idx); | ||
230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static int mipspmu_add(struct perf_event *event, int flags) | ||
235 | { | 235 | { |
236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
237 | struct hw_perf_event *hwc = &event->hw; | 237 | struct hw_perf_event *hwc = &event->hw; |
238 | int idx = hwc->idx; | 238 | int idx; |
239 | int err = 0; | ||
239 | 240 | ||
241 | perf_pmu_disable(event->pmu); | ||
240 | 242 | ||
241 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | 243 | /* To look for a free counter for this event. */ |
244 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
245 | if (idx < 0) { | ||
246 | err = idx; | ||
247 | goto out; | ||
248 | } | ||
242 | 249 | ||
243 | /* We are working on a local event. */ | 250 | /* |
251 | * If there is an event in the counter we are going to use then | ||
252 | * make sure it is disabled. | ||
253 | */ | ||
254 | event->hw.idx = idx; | ||
244 | mipspmu->disable_event(idx); | 255 | mipspmu->disable_event(idx); |
256 | cpuc->events[idx] = event; | ||
245 | 257 | ||
246 | barrier(); | 258 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
247 | 259 | if (flags & PERF_EF_START) | |
248 | mipspmu_event_update(event, hwc, idx); | 260 | mipspmu_start(event, PERF_EF_RELOAD); |
249 | cpuc->events[idx] = NULL; | ||
250 | clear_bit(idx, cpuc->used_mask); | ||
251 | 261 | ||
262 | /* Propagate our changes to the userspace mapping. */ | ||
252 | perf_event_update_userpage(event); | 263 | perf_event_update_userpage(event); |
264 | |||
265 | out: | ||
266 | perf_pmu_enable(event->pmu); | ||
267 | return err; | ||
253 | } | 268 | } |
254 | 269 | ||
255 | static void mipspmu_unthrottle(struct perf_event *event) | 270 | static void mipspmu_del(struct perf_event *event, int flags) |
256 | { | 271 | { |
272 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
257 | struct hw_perf_event *hwc = &event->hw; | 273 | struct hw_perf_event *hwc = &event->hw; |
274 | int idx = hwc->idx; | ||
258 | 275 | ||
259 | mipspmu->enable_event(hwc, hwc->idx); | 276 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); |
277 | |||
278 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
279 | cpuc->events[idx] = NULL; | ||
280 | clear_bit(idx, cpuc->used_mask); | ||
281 | |||
282 | perf_event_update_userpage(event); | ||
260 | } | 283 | } |
261 | 284 | ||
262 | static void mipspmu_read(struct perf_event *event) | 285 | static void mipspmu_read(struct perf_event *event) |
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) | |||
270 | mipspmu_event_update(event, hwc, hwc->idx); | 293 | mipspmu_event_update(event, hwc, hwc->idx); |
271 | } | 294 | } |
272 | 295 | ||
273 | static struct pmu pmu = { | 296 | static void mipspmu_enable(struct pmu *pmu) |
274 | .enable = mipspmu_enable, | 297 | { |
275 | .disable = mipspmu_disable, | 298 | if (mipspmu) |
276 | .unthrottle = mipspmu_unthrottle, | 299 | mipspmu->start(); |
277 | .read = mipspmu_read, | 300 | } |
278 | }; | 301 | |
302 | static void mipspmu_disable(struct pmu *pmu) | ||
303 | { | ||
304 | if (mipspmu) | ||
305 | mipspmu->stop(); | ||
306 | } | ||
279 | 307 | ||
280 | static atomic_t active_events = ATOMIC_INIT(0); | 308 | static atomic_t active_events = ATOMIC_INIT(0); |
281 | static DEFINE_MUTEX(pmu_reserve_mutex); | 309 | static DEFINE_MUTEX(pmu_reserve_mutex); |
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) | |||
318 | perf_irq = save_perf_irq; | 346 | perf_irq = save_perf_irq; |
319 | } | 347 | } |
320 | 348 | ||
349 | /* | ||
350 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
351 | * specific low-level init routines. | ||
352 | */ | ||
353 | static void reset_counters(void *arg); | ||
354 | static int __hw_perf_event_init(struct perf_event *event); | ||
355 | |||
356 | static void hw_perf_event_destroy(struct perf_event *event) | ||
357 | { | ||
358 | if (atomic_dec_and_mutex_lock(&active_events, | ||
359 | &pmu_reserve_mutex)) { | ||
360 | /* | ||
361 | * We must not call the destroy function with interrupts | ||
362 | * disabled. | ||
363 | */ | ||
364 | on_each_cpu(reset_counters, | ||
365 | (void *)(long)mipspmu->num_counters, 1); | ||
366 | mipspmu_free_irq(); | ||
367 | mutex_unlock(&pmu_reserve_mutex); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | static int mipspmu_event_init(struct perf_event *event) | ||
372 | { | ||
373 | int err = 0; | ||
374 | |||
375 | switch (event->attr.type) { | ||
376 | case PERF_TYPE_RAW: | ||
377 | case PERF_TYPE_HARDWARE: | ||
378 | case PERF_TYPE_HW_CACHE: | ||
379 | break; | ||
380 | |||
381 | default: | ||
382 | return -ENOENT; | ||
383 | } | ||
384 | |||
385 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
386 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
387 | return -ENODEV; | ||
388 | |||
389 | if (!atomic_inc_not_zero(&active_events)) { | ||
390 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
391 | atomic_dec(&active_events); | ||
392 | return -ENOSPC; | ||
393 | } | ||
394 | |||
395 | mutex_lock(&pmu_reserve_mutex); | ||
396 | if (atomic_read(&active_events) == 0) | ||
397 | err = mipspmu_get_irq(); | ||
398 | |||
399 | if (!err) | ||
400 | atomic_inc(&active_events); | ||
401 | mutex_unlock(&pmu_reserve_mutex); | ||
402 | } | ||
403 | |||
404 | if (err) | ||
405 | return err; | ||
406 | |||
407 | err = __hw_perf_event_init(event); | ||
408 | if (err) | ||
409 | hw_perf_event_destroy(event); | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | static struct pmu pmu = { | ||
415 | .pmu_enable = mipspmu_enable, | ||
416 | .pmu_disable = mipspmu_disable, | ||
417 | .event_init = mipspmu_event_init, | ||
418 | .add = mipspmu_add, | ||
419 | .del = mipspmu_del, | ||
420 | .start = mipspmu_start, | ||
421 | .stop = mipspmu_stop, | ||
422 | .read = mipspmu_read, | ||
423 | }; | ||
424 | |||
321 | static inline unsigned int | 425 | static inline unsigned int |
322 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | 426 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
323 | { | 427 | { |
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, | |||
382 | { | 486 | { |
383 | struct hw_perf_event fake_hwc = event->hw; | 487 | struct hw_perf_event fake_hwc = event->hw; |
384 | 488 | ||
385 | if (event->pmu && event->pmu != &pmu) | 489 | /* Allow mixed event group. So return 1 to pass validation. */ |
386 | return 0; | 490 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
491 | return 1; | ||
387 | 492 | ||
388 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | 493 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; |
389 | } | 494 | } |
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) | |||
409 | return 0; | 514 | return 0; |
410 | } | 515 | } |
411 | 516 | ||
412 | /* | ||
413 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
414 | * specific low-level init routines. | ||
415 | */ | ||
416 | static void reset_counters(void *arg); | ||
417 | static int __hw_perf_event_init(struct perf_event *event); | ||
418 | |||
419 | static void hw_perf_event_destroy(struct perf_event *event) | ||
420 | { | ||
421 | if (atomic_dec_and_mutex_lock(&active_events, | ||
422 | &pmu_reserve_mutex)) { | ||
423 | /* | ||
424 | * We must not call the destroy function with interrupts | ||
425 | * disabled. | ||
426 | */ | ||
427 | on_each_cpu(reset_counters, | ||
428 | (void *)(long)mipspmu->num_counters, 1); | ||
429 | mipspmu_free_irq(); | ||
430 | mutex_unlock(&pmu_reserve_mutex); | ||
431 | } | ||
432 | } | ||
433 | |||
434 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
435 | { | ||
436 | int err = 0; | ||
437 | |||
438 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
439 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
440 | return ERR_PTR(-ENODEV); | ||
441 | |||
442 | if (!atomic_inc_not_zero(&active_events)) { | ||
443 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
444 | atomic_dec(&active_events); | ||
445 | return ERR_PTR(-ENOSPC); | ||
446 | } | ||
447 | |||
448 | mutex_lock(&pmu_reserve_mutex); | ||
449 | if (atomic_read(&active_events) == 0) | ||
450 | err = mipspmu_get_irq(); | ||
451 | |||
452 | if (!err) | ||
453 | atomic_inc(&active_events); | ||
454 | mutex_unlock(&pmu_reserve_mutex); | ||
455 | } | ||
456 | |||
457 | if (err) | ||
458 | return ERR_PTR(err); | ||
459 | |||
460 | err = __hw_perf_event_init(event); | ||
461 | if (err) | ||
462 | hw_perf_event_destroy(event); | ||
463 | |||
464 | return err ? ERR_PTR(err) : &pmu; | ||
465 | } | ||
466 | |||
467 | void hw_perf_enable(void) | ||
468 | { | ||
469 | if (mipspmu) | ||
470 | mipspmu->start(); | ||
471 | } | ||
472 | |||
473 | void hw_perf_disable(void) | ||
474 | { | ||
475 | if (mipspmu) | ||
476 | mipspmu->stop(); | ||
477 | } | ||
478 | |||
479 | /* This is needed by specific irq handlers in perf_event_*.c */ | 517 | /* This is needed by specific irq handlers in perf_event_*.c */ |
480 | static void | 518 | static void |
481 | handle_associated_event(struct cpu_hw_events *cpuc, | 519 | handle_associated_event(struct cpu_hw_events *cpuc, |
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, | |||
496 | #include "perf_event_mipsxx.c" | 534 | #include "perf_event_mipsxx.c" |
497 | 535 | ||
498 | /* Callchain handling code. */ | 536 | /* Callchain handling code. */ |
499 | static inline void | ||
500 | callchain_store(struct perf_callchain_entry *entry, | ||
501 | u64 ip) | ||
502 | { | ||
503 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
504 | entry->ip[entry->nr++] = ip; | ||
505 | } | ||
506 | 537 | ||
507 | /* | 538 | /* |
508 | * Leave userspace callchain empty for now. When we find a way to trace | 539 | * Leave userspace callchain empty for now. When we find a way to trace |
509 | * the user stack callchains, we add here. | 540 | * the user stack callchains, we add here. |
510 | */ | 541 | */ |
511 | static void | 542 | void perf_callchain_user(struct perf_callchain_entry *entry, |
512 | perf_callchain_user(struct pt_regs *regs, | 543 | struct pt_regs *regs) |
513 | struct perf_callchain_entry *entry) | ||
514 | { | 544 | { |
515 | } | 545 | } |
516 | 546 | ||
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |||
523 | while (!kstack_end(sp)) { | 553 | while (!kstack_end(sp)) { |
524 | addr = *sp++; | 554 | addr = *sp++; |
525 | if (__kernel_text_address(addr)) { | 555 | if (__kernel_text_address(addr)) { |
526 | callchain_store(entry, addr); | 556 | perf_callchain_store(entry, addr); |
527 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 557 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
528 | break; | 558 | break; |
529 | } | 559 | } |
530 | } | 560 | } |
531 | } | 561 | } |
532 | 562 | ||
533 | static void | 563 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
534 | perf_callchain_kernel(struct pt_regs *regs, | 564 | struct pt_regs *regs) |
535 | struct perf_callchain_entry *entry) | ||
536 | { | 565 | { |
537 | unsigned long sp = regs->regs[29]; | 566 | unsigned long sp = regs->regs[29]; |
538 | #ifdef CONFIG_KALLSYMS | 567 | #ifdef CONFIG_KALLSYMS |
539 | unsigned long ra = regs->regs[31]; | 568 | unsigned long ra = regs->regs[31]; |
540 | unsigned long pc = regs->cp0_epc; | 569 | unsigned long pc = regs->cp0_epc; |
541 | 570 | ||
542 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
543 | if (raw_show_trace || !__kernel_text_address(pc)) { | 571 | if (raw_show_trace || !__kernel_text_address(pc)) { |
544 | unsigned long stack_page = | 572 | unsigned long stack_page = |
545 | (unsigned long)task_stack_page(current); | 573 | (unsigned long)task_stack_page(current); |
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, | |||
549 | return; | 577 | return; |
550 | } | 578 | } |
551 | do { | 579 | do { |
552 | callchain_store(entry, pc); | 580 | perf_callchain_store(entry, pc); |
553 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 581 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
554 | break; | 582 | break; |
555 | pc = unwind_stack(current, &sp, pc, &ra); | 583 | pc = unwind_stack(current, &sp, pc, &ra); |
556 | } while (pc); | 584 | } while (pc); |
557 | #else | 585 | #else |
558 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
559 | save_raw_perf_callchain(entry, sp); | 586 | save_raw_perf_callchain(entry, sp); |
560 | #endif | 587 | #endif |
561 | } | 588 | } |
562 | |||
563 | static void | ||
564 | perf_do_callchain(struct pt_regs *regs, | ||
565 | struct perf_callchain_entry *entry) | ||
566 | { | ||
567 | int is_user; | ||
568 | |||
569 | if (!regs) | ||
570 | return; | ||
571 | |||
572 | is_user = user_mode(regs); | ||
573 | |||
574 | if (!current || !current->pid) | ||
575 | return; | ||
576 | |||
577 | if (is_user && current->state != TASK_RUNNING) | ||
578 | return; | ||
579 | |||
580 | if (!is_user) { | ||
581 | perf_callchain_kernel(regs, entry); | ||
582 | if (current->mm) | ||
583 | regs = task_pt_regs(current); | ||
584 | else | ||
585 | regs = NULL; | ||
586 | } | ||
587 | if (regs) | ||
588 | perf_callchain_user(regs, entry); | ||
589 | } | ||
590 | |||
591 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
592 | |||
593 | struct perf_callchain_entry * | ||
594 | perf_callchain(struct pt_regs *regs) | ||
595 | { | ||
596 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
597 | |||
598 | entry->nr = 0; | ||
599 | perf_do_callchain(regs, entry); | ||
600 | return entry; | ||
601 | } | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
696 | * interrupt, not NMI. | 696 | * interrupt, not NMI. |
697 | */ | 697 | */ |
698 | if (handled == IRQ_HANDLED) | 698 | if (handled == IRQ_HANDLED) |
699 | perf_event_do_pending(); | 699 | irq_work_run(); |
700 | 700 | ||
701 | #ifdef CONFIG_MIPS_MT_SMP | 701 | #ifdef CONFIG_MIPS_MT_SMP |
702 | read_unlock(&pmuint_rwlock); | 702 | read_unlock(&pmuint_rwlock); |
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void) | |||
1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, | 1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, |
1046 | irq < 0 ? " (share with timer interrupt)" : ""); | 1046 | irq < 0 ? " (share with timer interrupt)" : ""); |
1047 | 1047 | ||
1048 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
1049 | |||
1048 | return 0; | 1050 | return 0; |
1049 | } | 1051 | } |
1050 | early_initcall(init_hw_perf_events); | 1052 | early_initcall(init_hw_perf_events); |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) | |||
84 | 84 | ||
85 | static int protected_restore_fp_context(struct sigcontext __user *sc) | 85 | static int protected_restore_fp_context(struct sigcontext __user *sc) |
86 | { | 86 | { |
87 | int err, tmp; | 87 | int err, tmp __maybe_unused; |
88 | while (1) { | 88 | while (1) { |
89 | lock_fpu_owner(); | 89 | lock_fpu_owner(); |
90 | own_fpu_inatomic(0); | 90 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) | |||
115 | 115 | ||
116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) | 116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) |
117 | { | 117 | { |
118 | int err, tmp; | 118 | int err, tmp __maybe_unused; |
119 | while (1) { | 119 | while (1) { |
120 | lock_fpu_owner(); | 120 | lock_fpu_owner(); |
121 | own_fpu_inatomic(0); | 121 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) | |||
193 | */ | 193 | */ |
194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; | 194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; |
195 | 195 | ||
196 | struct create_idle { | ||
197 | struct work_struct work; | ||
198 | struct task_struct *idle; | ||
199 | struct completion done; | ||
200 | int cpu; | ||
201 | }; | ||
202 | |||
203 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
204 | { | ||
205 | struct create_idle *c_idle = | ||
206 | container_of(work, struct create_idle, work); | ||
207 | |||
208 | c_idle->idle = fork_idle(c_idle->cpu); | ||
209 | complete(&c_idle->done); | ||
210 | } | ||
211 | |||
196 | int __cpuinit __cpu_up(unsigned int cpu) | 212 | int __cpuinit __cpu_up(unsigned int cpu) |
197 | { | 213 | { |
198 | struct task_struct *idle; | 214 | struct task_struct *idle; |
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
203 | * Linux can schedule processes on this slave. | 219 | * Linux can schedule processes on this slave. |
204 | */ | 220 | */ |
205 | if (!cpu_idle_thread[cpu]) { | 221 | if (!cpu_idle_thread[cpu]) { |
206 | idle = fork_idle(cpu); | 222 | /* |
207 | cpu_idle_thread[cpu] = idle; | 223 | * Schedule work item to avoid forking user task |
224 | * Ported from arch/x86/kernel/smpboot.c | ||
225 | */ | ||
226 | struct create_idle c_idle = { | ||
227 | .cpu = cpu, | ||
228 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
229 | }; | ||
230 | |||
231 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
232 | schedule_work(&c_idle.work); | ||
233 | wait_for_completion(&c_idle.done); | ||
234 | idle = cpu_idle_thread[cpu] = c_idle.idle; | ||
208 | 235 | ||
209 | if (IS_ERR(idle)) | 236 | if (IS_ERR(idle)) |
210 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 237 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips); | |||
383 | static int __used noinline | 383 | static int __used noinline |
384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) | 384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) |
385 | { | 385 | { |
386 | long cmd, arg1, arg2, arg3; | 386 | long cmd, arg1, arg2; |
387 | 387 | ||
388 | cmd = regs.regs[4]; | 388 | cmd = regs.regs[4]; |
389 | arg1 = regs.regs[5]; | 389 | arg1 = regs.regs[5]; |
390 | arg2 = regs.regs[6]; | 390 | arg2 = regs.regs[6]; |
391 | arg3 = regs.regs[7]; | ||
392 | 391 | ||
393 | switch (cmd) { | 392 | switch (cmd) { |
394 | case MIPS_ATOMIC_SET: | 393 | case MIPS_ATOMIC_SET: |
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) | |||
405 | if (arg1 & 2) | 404 | if (arg1 & 2) |
406 | set_thread_flag(TIF_LOGADE); | 405 | set_thread_flag(TIF_LOGADE); |
407 | else | 406 | else |
408 | clear_thread_flag(TIF_FIXADE); | 407 | clear_thread_flag(TIF_LOGADE); |
409 | 408 | ||
410 | return 0; | 409 | return 0; |
411 | 410 | ||
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -148,9 +148,9 @@ struct { | |||
148 | spinlock_t tc_list_lock; | 148 | spinlock_t tc_list_lock; |
149 | struct list_head tc_list; /* Thread contexts */ | 149 | struct list_head tc_list; /* Thread contexts */ |
150 | } vpecontrol = { | 150 | } vpecontrol = { |
151 | .vpe_list_lock = SPIN_LOCK_UNLOCKED, | 151 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), |
152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | 152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), |
153 | .tc_list_lock = SPIN_LOCK_UNLOCKED, | 153 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), |
154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | 154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) |
155 | }; | 155 | }; |
156 | 156 | ||
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | if MACH_LOONGSON | ||
2 | |||
1 | choice | 3 | choice |
2 | prompt "Machine Type" | 4 | prompt "Machine Type" |
3 | depends on MACH_LOONGSON | ||
4 | 5 | ||
5 | config LEMOTE_FULOONG2E | 6 | config LEMOTE_FULOONG2E |
6 | bool "Lemote Fuloong(2e) mini-PC" | 7 | bool "Lemote Fuloong(2e) mini-PC" |
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE | |||
87 | config LOONGSON_MC146818 | 88 | config LOONGSON_MC146818 |
88 | bool | 89 | bool |
89 | default n | 90 | default n |
91 | |||
92 | endif # MACH_LOONGSON | ||
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c | |||
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) | |||
44 | strcat(arcs_cmdline, " "); | 44 | strcat(arcs_cmdline, " "); |
45 | } | 45 | } |
46 | 46 | ||
47 | if ((strstr(arcs_cmdline, "console=")) == NULL) | ||
48 | strcat(arcs_cmdline, " console=ttyS0,115200"); | ||
49 | if ((strstr(arcs_cmdline, "root=")) == NULL) | ||
50 | strcat(arcs_cmdline, " root=/dev/hda1"); | ||
51 | |||
52 | prom_init_machtype(); | 47 | prom_init_machtype(); |
53 | } | 48 | } |
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c | |||
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) | |||
41 | 41 | ||
42 | void __init prom_init_machtype(void) | 42 | void __init prom_init_machtype(void) |
43 | { | 43 | { |
44 | char *p, str[MACHTYPE_LEN]; | 44 | char *p, str[MACHTYPE_LEN + 1]; |
45 | int machtype = MACH_LEMOTE_FL2E; | 45 | int machtype = MACH_LEMOTE_FL2E; |
46 | 46 | ||
47 | mips_machtype = LOONGSON_MACHTYPE; | 47 | mips_machtype = LOONGSON_MACHTYPE; |
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void) | |||
53 | } | 53 | } |
54 | p += strlen("machtype="); | 54 | p += strlen("machtype="); |
55 | strncpy(str, p, MACHTYPE_LEN); | 55 | strncpy(str, p, MACHTYPE_LEN); |
56 | str[MACHTYPE_LEN] = '\0'; | ||
56 | p = strstr(str, " "); | 57 | p = strstr(str, " "); |
57 | if (p) | 58 | if (p) |
58 | *p = '\0'; | 59 | *p = '\0'; |
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | 71 | ||
72 | #define COMPXSP \ | 72 | #define COMPXSP \ |
73 | unsigned xm; int xe; int xs; int xc | 73 | unsigned xm; int xe; int xs __maybe_unused; int xc |
74 | 74 | ||
75 | #define COMPYSP \ | 75 | #define COMPYSP \ |
76 | unsigned ym; int ye; int ys; int yc | 76 | unsigned ym; int ye; int ys; int yc |
@@ -104,7 +104,7 @@ | |||
104 | 104 | ||
105 | 105 | ||
106 | #define COMPXDP \ | 106 | #define COMPXDP \ |
107 | u64 xm; int xe; int xs; int xc | 107 | u64 xm; int xe; int xs __maybe_unused; int xc |
108 | 108 | ||
109 | #define COMPYDP \ | 109 | #define COMPYDP \ |
110 | u64 ym; int ye; int ys; int yc | 110 | u64 ym; int ye; int ys; int yc |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) | |||
324 | void __init paging_init(void) | 324 | void __init paging_init(void) |
325 | { | 325 | { |
326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
327 | unsigned long lastpfn; | 327 | unsigned long lastpfn __maybe_unused; |
328 | 328 | ||
329 | pagetable_init(); | 329 | pagetable_init(); |
330 | 330 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -109,6 +109,8 @@ static bool scratchpad_available(void) | |||
109 | static int scratchpad_offset(int i) | 109 | static int scratchpad_offset(int i) |
110 | { | 110 | { |
111 | BUG(); | 111 | BUG(); |
112 | /* Really unreachable, but evidently some GCC want this. */ | ||
113 | return 0; | ||
112 | } | 114 | } |
113 | #endif | 115 | #endif |
114 | /* | 116 | /* |
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c | |||
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { | |||
308 | * RETURNS: PCIBIOS_SUCCESSFUL - success | 308 | * RETURNS: PCIBIOS_SUCCESSFUL - success |
309 | * | 309 | * |
310 | ****************************************************************************/ | 310 | ****************************************************************************/ |
311 | static int bpci_interrupt(int irq, void *dev_id) | 311 | static irqreturn_t bpci_interrupt(int irq, void *dev_id) |
312 | { | 312 | { |
313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; | 313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; |
314 | unsigned int stat = preg->if_status; | 314 | unsigned int stat = preg->if_status; |
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) | |||
326 | /* write to clear all asserted interrupts */ | 326 | /* write to clear all asserted interrupts */ |
327 | preg->if_status = stat; | 327 | preg->if_status = stat; |
328 | 328 | ||
329 | return PCIBIOS_SUCCESSFUL; | 329 | return IRQ_HANDLED; |
330 | } | 330 | } |
331 | 331 | ||
332 | /***************************************************************************** | 332 | /***************************************************************************** |
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig | |||
@@ -4,15 +4,11 @@ choice | |||
4 | 4 | ||
5 | config PMC_MSP4200_EVAL | 5 | config PMC_MSP4200_EVAL |
6 | bool "PMC-Sierra MSP4200 Eval Board" | 6 | bool "PMC-Sierra MSP4200 Eval Board" |
7 | select CEVT_R4K | ||
8 | select CSRC_R4K | ||
9 | select IRQ_MSP_SLP | 7 | select IRQ_MSP_SLP |
10 | select HW_HAS_PCI | 8 | select HW_HAS_PCI |
11 | 9 | ||
12 | config PMC_MSP4200_GW | 10 | config PMC_MSP4200_GW |
13 | bool "PMC-Sierra MSP4200 VoIP Gateway" | 11 | bool "PMC-Sierra MSP4200 VoIP Gateway" |
14 | select CEVT_R4K | ||
15 | select CSRC_R4K | ||
16 | select IRQ_MSP_SLP | 12 | select IRQ_MSP_SLP |
17 | select HW_HAS_PCI | 13 | select HW_HAS_PCI |
18 | 14 | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c | |||
@@ -81,7 +81,7 @@ void __init plat_time_init(void) | |||
81 | mips_hpt_frequency = cpu_rate/2; | 81 | mips_hpt_frequency = cpu_rate/2; |
82 | } | 82 | } |
83 | 83 | ||
84 | unsigned int __init get_c0_compare_int(void) | 84 | unsigned int __cpuinit get_c0_compare_int(void) |
85 | { | 85 | { |
86 | return MSP_INT_VPE0_TIMER; | 86 | return MSP_INT_VPE0_TIMER; |
87 | } | 87 | } |
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_CE4100_H_ | ||
2 | #define _ASM_CE4100_H_ | ||
3 | |||
4 | int ce4100_pci_init(void); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) | |||
106 | addr += size; | 106 | addr += size; |
107 | } | 107 | } |
108 | 108 | ||
109 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | 109 | if (num_scan_areas) |
110 | num_scan_areas); | 110 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); |
111 | } | 111 | } |
112 | 112 | ||
113 | 113 | ||
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) | |||
143 | { | 143 | { |
144 | check_for_bios_corruption(); | 144 | check_for_bios_corruption(); |
145 | schedule_delayed_work(&bios_check_work, | 145 | schedule_delayed_work(&bios_check_work, |
146 | round_jiffies_relative(corruption_check_period*HZ)); | 146 | round_jiffies_relative(corruption_check_period*HZ)); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int start_periodic_check_for_corruption(void) | 149 | static int start_periodic_check_for_corruption(void) |
150 | { | 150 | { |
151 | if (!memory_corruption_check || corruption_check_period == 0) | 151 | if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) |
152 | return 0; | 152 | return 0; |
153 | 153 | ||
154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", | 154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void) | |||
229 | for (address = VMALLOC_START & PMD_MASK; | 229 | for (address = VMALLOC_START & PMD_MASK; |
230 | address >= TASK_SIZE && address < FIXADDR_TOP; | 230 | address >= TASK_SIZE && address < FIXADDR_TOP; |
231 | address += PMD_SIZE) { | 231 | address += PMD_SIZE) { |
232 | |||
233 | unsigned long flags; | ||
234 | struct page *page; | 232 | struct page *page; |
235 | 233 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | 234 | spin_lock(&pgd_lock); |
237 | list_for_each_entry(page, &pgd_list, lru) { | 235 | list_for_each_entry(page, &pgd_list, lru) { |
238 | spinlock_t *pgt_lock; | 236 | spinlock_t *pgt_lock; |
239 | pmd_t *ret; | 237 | pmd_t *ret; |
240 | 238 | ||
239 | /* the pgt_lock only for Xen */ | ||
241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 240 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
242 | 241 | ||
243 | spin_lock(pgt_lock); | 242 | spin_lock(pgt_lock); |
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void) | |||
247 | if (!ret) | 246 | if (!ret) |
248 | break; | 247 | break; |
249 | } | 248 | } |
250 | spin_unlock_irqrestore(&pgd_lock, flags); | 249 | spin_unlock(&pgd_lock); |
251 | } | 250 | } |
252 | } | 251 | } |
253 | 252 | ||
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
828 | unsigned long address, unsigned int fault) | 827 | unsigned long address, unsigned int fault) |
829 | { | 828 | { |
830 | if (fault & VM_FAULT_OOM) { | 829 | if (fault & VM_FAULT_OOM) { |
830 | /* Kernel mode? Handle exceptions or die: */ | ||
831 | if (!(error_code & PF_USER)) { | ||
832 | up_read(¤t->mm->mmap_sem); | ||
833 | no_context(regs, error_code, address); | ||
834 | return; | ||
835 | } | ||
836 | |||
831 | out_of_memory(regs, error_code, address); | 837 | out_of_memory(regs, error_code, address); |
832 | } else { | 838 | } else { |
833 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 839 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..c14a5422e152 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
105 | 105 | ||
106 | for (address = start; address <= end; address += PGDIR_SIZE) { | 106 | for (address = start; address <= end; address += PGDIR_SIZE) { |
107 | const pgd_t *pgd_ref = pgd_offset_k(address); | 107 | const pgd_t *pgd_ref = pgd_offset_k(address); |
108 | unsigned long flags; | ||
109 | struct page *page; | 108 | struct page *page; |
110 | 109 | ||
111 | if (pgd_none(*pgd_ref)) | 110 | if (pgd_none(*pgd_ref)) |
112 | continue; | 111 | continue; |
113 | 112 | ||
114 | spin_lock_irqsave(&pgd_lock, flags); | 113 | spin_lock(&pgd_lock); |
115 | list_for_each_entry(page, &pgd_list, lru) { | 114 | list_for_each_entry(page, &pgd_list, lru) { |
116 | pgd_t *pgd; | 115 | pgd_t *pgd; |
117 | spinlock_t *pgt_lock; | 116 | spinlock_t *pgt_lock; |
118 | 117 | ||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 118 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
119 | /* the pgt_lock only for Xen */ | ||
120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
121 | spin_lock(pgt_lock); | 121 | spin_lock(pgt_lock); |
122 | 122 | ||
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
128 | 128 | ||
129 | spin_unlock(pgt_lock); | 129 | spin_unlock(pgt_lock); |
130 | } | 130 | } |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 131 | spin_unlock(&pgd_lock); |
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |||
57 | 57 | ||
58 | void update_page_count(int level, unsigned long pages) | 58 | void update_page_count(int level, unsigned long pages) |
59 | { | 59 | { |
60 | unsigned long flags; | ||
61 | |||
62 | /* Protect against CPA */ | 60 | /* Protect against CPA */ |
63 | spin_lock_irqsave(&pgd_lock, flags); | 61 | spin_lock(&pgd_lock); |
64 | direct_pages_count[level] += pages; | 62 | direct_pages_count[level] += pages; |
65 | spin_unlock_irqrestore(&pgd_lock, flags); | 63 | spin_unlock(&pgd_lock); |
66 | } | 64 | } |
67 | 65 | ||
68 | static void split_page_count(int level) | 66 | static void split_page_count(int level) |
@@ -394,7 +392,7 @@ static int | |||
394 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
395 | struct cpa_data *cpa) | 393 | struct cpa_data *cpa) |
396 | { | 394 | { |
397 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 395 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
398 | pte_t new_pte, old_pte, *tmp; | 396 | pte_t new_pte, old_pte, *tmp; |
399 | pgprot_t old_prot, new_prot, req_prot; | 397 | pgprot_t old_prot, new_prot, req_prot; |
400 | int i, do_split = 1; | 398 | int i, do_split = 1; |
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
403 | if (cpa->force_split) | 401 | if (cpa->force_split) |
404 | return 1; | 402 | return 1; |
405 | 403 | ||
406 | spin_lock_irqsave(&pgd_lock, flags); | 404 | spin_lock(&pgd_lock); |
407 | /* | 405 | /* |
408 | * Check for races, another CPU might have split this page | 406 | * Check for races, another CPU might have split this page |
409 | * up already: | 407 | * up already: |
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
498 | } | 496 | } |
499 | 497 | ||
500 | out_unlock: | 498 | out_unlock: |
501 | spin_unlock_irqrestore(&pgd_lock, flags); | 499 | spin_unlock(&pgd_lock); |
502 | 500 | ||
503 | return do_split; | 501 | return do_split; |
504 | } | 502 | } |
505 | 503 | ||
506 | static int split_large_page(pte_t *kpte, unsigned long address) | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
507 | { | 505 | { |
508 | unsigned long flags, pfn, pfninc = 1; | 506 | unsigned long pfn, pfninc = 1; |
509 | unsigned int i, level; | 507 | unsigned int i, level; |
510 | pte_t *pbase, *tmp; | 508 | pte_t *pbase, *tmp; |
511 | pgprot_t ref_prot; | 509 | pgprot_t ref_prot; |
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
519 | if (!base) | 517 | if (!base) |
520 | return -ENOMEM; | 518 | return -ENOMEM; |
521 | 519 | ||
522 | spin_lock_irqsave(&pgd_lock, flags); | 520 | spin_lock(&pgd_lock); |
523 | /* | 521 | /* |
524 | * Check for races, another CPU might have split this page | 522 | * Check for races, another CPU might have split this page |
525 | * up for us already: | 523 | * up for us already: |
@@ -591,7 +589,7 @@ out_unlock: | |||
591 | */ | 589 | */ |
592 | if (base) | 590 | if (base) |
593 | __free_page(base); | 591 | __free_page(base); |
594 | spin_unlock_irqrestore(&pgd_lock, flags); | 592 | spin_unlock(&pgd_lock); |
595 | 593 | ||
596 | return 0; | 594 | return 0; |
597 | } | 595 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |||
121 | 121 | ||
122 | static void pgd_dtor(pgd_t *pgd) | 122 | static void pgd_dtor(pgd_t *pgd) |
123 | { | 123 | { |
124 | unsigned long flags; /* can be called from interrupt context */ | ||
125 | |||
126 | if (SHARED_KERNEL_PMD) | 124 | if (SHARED_KERNEL_PMD) |
127 | return; | 125 | return; |
128 | 126 | ||
129 | spin_lock_irqsave(&pgd_lock, flags); | 127 | spin_lock(&pgd_lock); |
130 | pgd_list_del(pgd); | 128 | pgd_list_del(pgd); |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 129 | spin_unlock(&pgd_lock); |
132 | } | 130 | } |
133 | 131 | ||
134 | /* | 132 | /* |
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
260 | { | 258 | { |
261 | pgd_t *pgd; | 259 | pgd_t *pgd; |
262 | pmd_t *pmds[PREALLOCATED_PMDS]; | 260 | pmd_t *pmds[PREALLOCATED_PMDS]; |
263 | unsigned long flags; | ||
264 | 261 | ||
265 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); | 262 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
266 | 263 | ||
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
280 | * respect to anything walking the pgd_list, so that they | 277 | * respect to anything walking the pgd_list, so that they |
281 | * never see a partially populated pgd. | 278 | * never see a partially populated pgd. |
282 | */ | 279 | */ |
283 | spin_lock_irqsave(&pgd_lock, flags); | 280 | spin_lock(&pgd_lock); |
284 | 281 | ||
285 | pgd_ctor(mm, pgd); | 282 | pgd_ctor(mm, pgd); |
286 | pgd_prepopulate_pmd(mm, pgd, pmds); | 283 | pgd_prepopulate_pmd(mm, pgd, pmds); |
287 | 284 | ||
288 | spin_unlock_irqrestore(&pgd_lock, flags); | 285 | spin_unlock(&pgd_lock); |
289 | 286 | ||
290 | return pgd; | 287 | return pgd; |
291 | 288 | ||
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..9260b3eb18d4 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | 36 | ||
37 | #include <asm/ce4100.h> | ||
37 | #include <asm/pci_x86.h> | 38 | #include <asm/pci_x86.h> |
38 | 39 | ||
39 | struct sim_reg { | 40 | struct sim_reg { |
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { | |||
306 | .write = ce4100_conf_write, | 307 | .write = ce4100_conf_write, |
307 | }; | 308 | }; |
308 | 309 | ||
309 | static int __init ce4100_pci_init(void) | 310 | int __init ce4100_pci_init(void) |
310 | { | 311 | { |
311 | init_sim_regs(); | 312 | init_sim_regs(); |
312 | raw_pci_ops = &ce4100_pci_conf; | 313 | raw_pci_ops = &ce4100_pci_conf; |
313 | return 0; | 314 | /* Indicate caller that it should invoke pci_legacy_init() */ |
315 | return 1; | ||
314 | } | 316 | } |
315 | subsys_initcall(ce4100_pci_init); | ||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..cd6f184c3b3f 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | 17 | ||
18 | #include <asm/ce4100.h> | ||
18 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
20 | 21 | ||
@@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void) | |||
129 | x86_init.resources.probe_roms = x86_init_noop; | 130 | x86_init.resources.probe_roms = x86_init_noop; |
130 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 131 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
131 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; | 132 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; |
133 | x86_init.pci.init = ce4100_pci_init; | ||
132 | } | 134 | } |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..f6089421147a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -986,10 +986,9 @@ static void xen_pgd_pin(struct mm_struct *mm) | |||
986 | */ | 986 | */ |
987 | void xen_mm_pin_all(void) | 987 | void xen_mm_pin_all(void) |
988 | { | 988 | { |
989 | unsigned long flags; | ||
990 | struct page *page; | 989 | struct page *page; |
991 | 990 | ||
992 | spin_lock_irqsave(&pgd_lock, flags); | 991 | spin_lock(&pgd_lock); |
993 | 992 | ||
994 | list_for_each_entry(page, &pgd_list, lru) { | 993 | list_for_each_entry(page, &pgd_list, lru) { |
995 | if (!PagePinned(page)) { | 994 | if (!PagePinned(page)) { |
@@ -998,7 +997,7 @@ void xen_mm_pin_all(void) | |||
998 | } | 997 | } |
999 | } | 998 | } |
1000 | 999 | ||
1001 | spin_unlock_irqrestore(&pgd_lock, flags); | 1000 | spin_unlock(&pgd_lock); |
1002 | } | 1001 | } |
1003 | 1002 | ||
1004 | /* | 1003 | /* |
@@ -1099,10 +1098,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) | |||
1099 | */ | 1098 | */ |
1100 | void xen_mm_unpin_all(void) | 1099 | void xen_mm_unpin_all(void) |
1101 | { | 1100 | { |
1102 | unsigned long flags; | ||
1103 | struct page *page; | 1101 | struct page *page; |
1104 | 1102 | ||
1105 | spin_lock_irqsave(&pgd_lock, flags); | 1103 | spin_lock(&pgd_lock); |
1106 | 1104 | ||
1107 | list_for_each_entry(page, &pgd_list, lru) { | 1105 | list_for_each_entry(page, &pgd_list, lru) { |
1108 | if (PageSavePinned(page)) { | 1106 | if (PageSavePinned(page)) { |
@@ -1112,7 +1110,7 @@ void xen_mm_unpin_all(void) | |||
1112 | } | 1110 | } |
1113 | } | 1111 | } |
1114 | 1112 | ||
1115 | spin_unlock_irqrestore(&pgd_lock, flags); | 1113 | spin_unlock(&pgd_lock); |
1116 | } | 1114 | } |
1117 | 1115 | ||
1118 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1116 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d270b3ff896b..6140ea1de45a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
2194 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2194 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
2195 | } | 2195 | } |
2196 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2196 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2197 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2198 | r700_vram_gtt_location(rdev, &rdev->mc); | 2197 | r700_vram_gtt_location(rdev, &rdev->mc); |
2199 | radeon_update_bandwidth_info(rdev); | 2198 | radeon_update_bandwidth_info(rdev); |
2200 | 2199 | ||
@@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2934 | /* XXX: ontario has problems blitting to gart at the moment */ | 2933 | /* XXX: ontario has problems blitting to gart at the moment */ |
2935 | if (rdev->family == CHIP_PALM) { | 2934 | if (rdev->family == CHIP_PALM) { |
2936 | rdev->asic->copy = NULL; | 2935 | rdev->asic->copy = NULL; |
2937 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 2936 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
2938 | } | 2937 | } |
2939 | 2938 | ||
2940 | /* allocate wb buffer */ | 2939 | /* allocate wb buffer */ |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2adfb03f479b..2be698e78ff2 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -623,7 +623,7 @@ done: | |||
623 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | 623 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
624 | return r; | 624 | return r; |
625 | } | 625 | } |
626 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 626 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev) | |||
631 | { | 631 | { |
632 | int r; | 632 | int r; |
633 | 633 | ||
634 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 634 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
635 | if (rdev->r600_blit.shader_obj == NULL) | 635 | if (rdev->r600_blit.shader_obj == NULL) |
636 | return; | 636 | return; |
637 | /* If we can't reserve the bo, unref should be enough to destroy | 637 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 79de991e1ea3..e372f9e1e5ce 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -1024,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1024 | return r; | 1024 | return r; |
1025 | } | 1025 | } |
1026 | rdev->cp.ready = true; | 1026 | rdev->cp.ready = true; |
1027 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 1027 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1028 | return 0; | 1028 | return 0; |
1029 | } | 1029 | } |
1030 | 1030 | ||
@@ -1042,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
1042 | void r100_cp_disable(struct radeon_device *rdev) | 1042 | void r100_cp_disable(struct radeon_device *rdev) |
1043 | { | 1043 | { |
1044 | /* Disable ring */ | 1044 | /* Disable ring */ |
1045 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1046 | rdev->cp.ready = false; | 1046 | rdev->cp.ready = false; |
1047 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1047 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1048 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1048 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
@@ -2312,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2312 | /* FIXME we don't use the second aperture yet when we could use it */ | 2312 | /* FIXME we don't use the second aperture yet when we could use it */ |
2313 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) | 2313 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
2314 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2314 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2315 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2316 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | 2315 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2317 | if (rdev->flags & RADEON_IS_IGP) { | 2316 | if (rdev->flags & RADEON_IS_IGP) { |
2318 | uint32_t tom; | 2317 | uint32_t tom; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index de88624d5f87..9b3fad23b76c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
1255 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1255 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1256 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1256 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1257 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1257 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1258 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
1259 | r600_vram_gtt_location(rdev, &rdev->mc); | 1258 | r600_vram_gtt_location(rdev, &rdev->mc); |
1260 | 1259 | ||
1261 | if (rdev->flags & RADEON_IS_IGP) { | 1260 | if (rdev->flags & RADEON_IS_IGP) { |
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1937 | */ | 1936 | */ |
1938 | void r600_cp_stop(struct radeon_device *rdev) | 1937 | void r600_cp_stop(struct radeon_device *rdev) |
1939 | { | 1938 | { |
1940 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1939 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1941 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1940 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1942 | WREG32(SCRATCH_UMSK, 0); | 1941 | WREG32(SCRATCH_UMSK, 0); |
1943 | } | 1942 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 41f7aafc97c4..df68d91e8190 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -558,7 +558,7 @@ done: | |||
558 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | 558 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
559 | return r; | 559 | return r; |
560 | } | 560 | } |
561 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 561 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
562 | return 0; | 562 | return 0; |
563 | } | 563 | } |
564 | 564 | ||
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
566 | { | 566 | { |
567 | int r; | 567 | int r; |
568 | 568 | ||
569 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 569 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
570 | if (rdev->r600_blit.shader_obj == NULL) | 570 | if (rdev->r600_blit.shader_obj == NULL) |
571 | return; | 571 | return; |
572 | /* If we can't reserve the bo, unref should be enough to destroy | 572 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 56c48b67ef3d..6b3429495118 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -345,7 +345,6 @@ struct radeon_mc { | |||
345 | * about vram size near mc fb location */ | 345 | * about vram size near mc fb location */ |
346 | u64 mc_vram_size; | 346 | u64 mc_vram_size; |
347 | u64 visible_vram_size; | 347 | u64 visible_vram_size; |
348 | u64 active_vram_size; | ||
349 | u64 gtt_size; | 348 | u64 gtt_size; |
350 | u64 gtt_start; | 349 | u64 gtt_start; |
351 | u64 gtt_end; | 350 | u64 gtt_end; |
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m | |||
1448 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | 1447 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
1449 | extern int radeon_resume_kms(struct drm_device *dev); | 1448 | extern int radeon_resume_kms(struct drm_device *dev); |
1450 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1449 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1450 | extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); | ||
1451 | 1451 | ||
1452 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ | 1452 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ |
1453 | extern bool r600_card_posted(struct radeon_device *rdev); | 1453 | extern bool r600_card_posted(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index df95eb83dac6..1fe95dfe48c9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
156 | { | 156 | { |
157 | struct radeon_device *rdev = dev->dev_private; | 157 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 158 | struct drm_radeon_gem_info *args = data; |
159 | struct ttm_mem_type_manager *man; | ||
160 | |||
161 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
159 | 162 | ||
160 | args->vram_size = rdev->mc.real_vram_size; | 163 | args->vram_size = rdev->mc.real_vram_size; |
161 | args->vram_visible = rdev->mc.real_vram_size; | 164 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
162 | if (rdev->stollen_vga_memory) | 165 | if (rdev->stollen_vga_memory) |
163 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); | 166 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
164 | args->vram_visible -= radeon_fbdev_total_size(rdev); | 167 | args->vram_visible -= radeon_fbdev_total_size(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e5b2cf10cbf4..8389b4c63d12 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
589 | DRM_INFO("radeon: ttm finalized\n"); | 589 | DRM_INFO("radeon: ttm finalized\n"); |
590 | } | 590 | } |
591 | 591 | ||
592 | /* this should only be called at bootup or when userspace | ||
593 | * isn't running */ | ||
594 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | ||
595 | { | ||
596 | struct ttm_mem_type_manager *man; | ||
597 | |||
598 | if (!rdev->mman.initialized) | ||
599 | return; | ||
600 | |||
601 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
602 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||
603 | man->size = size >> PAGE_SHIFT; | ||
604 | } | ||
605 | |||
592 | static struct vm_operations_struct radeon_ttm_vm_ops; | 606 | static struct vm_operations_struct radeon_ttm_vm_ops; |
593 | static const struct vm_operations_struct *ttm_vm_ops = NULL; | 607 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
594 | 608 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5afe294ed51f..8af4679db23e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
751 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 751 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
752 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 752 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
753 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 753 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
754 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
755 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 754 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
756 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 755 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
757 | base = G_000004_MC_FB_START(base) << 16; | 756 | base = G_000004_MC_FB_START(base) << 16; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 6638c8e4c81b..66c949b7c18c 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
160 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
161 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 160 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
162 | base = G_000100_MC_FB_START(base) << 16; | 161 | base = G_000100_MC_FB_START(base) << 16; |
163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index d8ba67690656..714ad45757d0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
307 | */ | 307 | */ |
308 | void r700_cp_stop(struct radeon_device *rdev) | 308 | void r700_cp_stop(struct radeon_device *rdev) |
309 | { | 309 | { |
310 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 310 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
311 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 311 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
312 | WREG32(SCRATCH_UMSK, 0); | 312 | WREG32(SCRATCH_UMSK, 0); |
313 | } | 313 | } |
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
1123 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1123 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1124 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1124 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1125 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1125 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1126 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
1127 | r700_vram_gtt_location(rdev, &rdev->mc); | 1126 | r700_vram_gtt_location(rdev, &rdev->mc); |
1128 | radeon_update_bandwidth_info(rdev); | 1127 | radeon_update_bandwidth_info(rdev); |
1129 | 1128 | ||
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index eca855a55c0d..3de4ba0260a5 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c | |||
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op) | |||
646 | struct cpwd *p = dev_get_drvdata(&op->dev); | 646 | struct cpwd *p = dev_get_drvdata(&op->dev); |
647 | int i; | 647 | int i; |
648 | 648 | ||
649 | for (i = 0; i < 4; i++) { | 649 | for (i = 0; i < WD_NUMDEVS; i++) { |
650 | misc_deregister(&p->devs[i].misc); | 650 | misc_deregister(&p->devs[i].misc); |
651 | 651 | ||
652 | if (!p->enabled) { | 652 | if (!p->enabled) { |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 24b966d5061a..204a5603c4ae 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
710 | return 0; | 710 | return 0; |
711 | } | 711 | } |
712 | 712 | ||
713 | static void __devexit hpwdt_exit_nmi_decoding(void) | 713 | static void hpwdt_exit_nmi_decoding(void) |
714 | { | 714 | { |
715 | unregister_die_notifier(&die_notifier); | 715 | unregister_die_notifier(&die_notifier); |
716 | if (cru_rom_addr) | 716 | if (cru_rom_addr) |
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
728 | 728 | ||
729 | static void __devexit hpwdt_exit_nmi_decoding(void) | 729 | static void hpwdt_exit_nmi_decoding(void) |
730 | { | 730 | { |
731 | } | 731 | } |
732 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | 732 | #endif /* CONFIG_HPWDT_NMI_DECODING */ |
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 0461858e07d0..b61ab1c54293 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c | |||
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) | |||
508 | sch311x_sio_outb(sio_config_port, 0x07, 0x0a); | 508 | sch311x_sio_outb(sio_config_port, 0x07, 0x0a); |
509 | 509 | ||
510 | /* Check if Logical Device Register is currently active */ | 510 | /* Check if Logical Device Register is currently active */ |
511 | if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0) | 511 | if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0) |
512 | printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); | 512 | printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); |
513 | 513 | ||
514 | /* Get the base address of the runtime registers */ | 514 | /* Get the base address of the runtime registers */ |
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index a6c12dec91a1..df2a64dc9672 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c | |||
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void) | |||
109 | outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ | 109 | outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ |
110 | outb_p(0x30, WDT_EFER); /* select CR30 */ | 110 | outb_p(0x30, WDT_EFER); /* select CR30 */ |
111 | c = inb_p(WDT_EFDR); | 111 | c = inb_p(WDT_EFDR); |
112 | outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ | 112 | outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ |
113 | 113 | ||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1cc600e77bb4..2f8e61816d75 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/inet.h> | 37 | #include <linux/inet.h> |
38 | #include <linux/nfs_xdr.h> | 38 | #include <linux/nfs_xdr.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/compat.h> | ||
40 | 41 | ||
41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word) | |||
89 | */ | 90 | */ |
90 | u64 nfs_compat_user_ino64(u64 fileid) | 91 | u64 nfs_compat_user_ino64(u64 fileid) |
91 | { | 92 | { |
92 | int ino; | 93 | #ifdef CONFIG_COMPAT |
94 | compat_ulong_t ino; | ||
95 | #else | ||
96 | unsigned long ino; | ||
97 | #endif | ||
93 | 98 | ||
94 | if (enable_ino64) | 99 | if (enable_ino64) |
95 | return fileid; | 100 | return fileid; |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 7a7474073148..1be36cf65bfc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp); | |||
298 | #if defined(CONFIG_NFS_V4_1) | 298 | #if defined(CONFIG_NFS_V4_1) |
299 | struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); | 299 | struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); |
300 | struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); | 300 | struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); |
301 | extern void nfs4_schedule_session_recovery(struct nfs4_session *); | ||
302 | #else | ||
303 | static inline void nfs4_schedule_session_recovery(struct nfs4_session *session) | ||
304 | { | ||
305 | } | ||
301 | #endif /* CONFIG_NFS_V4_1 */ | 306 | #endif /* CONFIG_NFS_V4_1 */ |
302 | 307 | ||
303 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); | 308 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); |
@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *); | |||
307 | extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); | 312 | extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); |
308 | extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); | 313 | extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); |
309 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); | 314 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); |
310 | extern void nfs4_schedule_state_recovery(struct nfs_client *); | 315 | extern void nfs4_schedule_lease_recovery(struct nfs_client *); |
311 | extern void nfs4_schedule_state_manager(struct nfs_client *); | 316 | extern void nfs4_schedule_state_manager(struct nfs_client *); |
312 | extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); | 317 | extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); |
313 | extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); | ||
314 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); | 318 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); |
315 | extern void nfs41_handle_recall_slot(struct nfs_client *clp); | 319 | extern void nfs41_handle_recall_slot(struct nfs_client *clp); |
316 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); | 320 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); |
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index f5c9b125e8cc..b73c34375f60 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c | |||
@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) | |||
219 | goto out_err; | 219 | goto out_err; |
220 | } | 220 | } |
221 | buf = kmalloc(rlen + 1, GFP_KERNEL); | 221 | buf = kmalloc(rlen + 1, GFP_KERNEL); |
222 | if (!buf) { | ||
223 | dprintk("%s: Not enough memory\n", __func__); | ||
224 | goto out_err; | ||
225 | } | ||
222 | buf[rlen] = '\0'; | 226 | buf[rlen] = '\0'; |
223 | memcpy(buf, r_addr, rlen); | 227 | memcpy(buf, r_addr, rlen); |
224 | 228 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1ff76acc7e98..0a07e353a961 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -51,7 +51,6 @@ | |||
51 | #include <linux/sunrpc/bc_xprt.h> | 51 | #include <linux/sunrpc/bc_xprt.h> |
52 | #include <linux/xattr.h> | 52 | #include <linux/xattr.h> |
53 | #include <linux/utsname.h> | 53 | #include <linux/utsname.h> |
54 | #include <linux/mm.h> | ||
55 | 54 | ||
56 | #include "nfs4_fs.h" | 55 | #include "nfs4_fs.h" |
57 | #include "delegation.h" | 56 | #include "delegation.h" |
@@ -257,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
257 | case -NFS4ERR_OPENMODE: | 256 | case -NFS4ERR_OPENMODE: |
258 | if (state == NULL) | 257 | if (state == NULL) |
259 | break; | 258 | break; |
260 | nfs4_state_mark_reclaim_nograce(clp, state); | 259 | nfs4_schedule_stateid_recovery(server, state); |
261 | goto do_state_recovery; | 260 | goto wait_on_recovery; |
262 | case -NFS4ERR_STALE_STATEID: | 261 | case -NFS4ERR_STALE_STATEID: |
263 | case -NFS4ERR_STALE_CLIENTID: | 262 | case -NFS4ERR_STALE_CLIENTID: |
264 | case -NFS4ERR_EXPIRED: | 263 | case -NFS4ERR_EXPIRED: |
265 | goto do_state_recovery; | 264 | nfs4_schedule_lease_recovery(clp); |
265 | goto wait_on_recovery; | ||
266 | #if defined(CONFIG_NFS_V4_1) | 266 | #if defined(CONFIG_NFS_V4_1) |
267 | case -NFS4ERR_BADSESSION: | 267 | case -NFS4ERR_BADSESSION: |
268 | case -NFS4ERR_BADSLOT: | 268 | case -NFS4ERR_BADSLOT: |
@@ -273,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
273 | case -NFS4ERR_SEQ_MISORDERED: | 273 | case -NFS4ERR_SEQ_MISORDERED: |
274 | dprintk("%s ERROR: %d Reset session\n", __func__, | 274 | dprintk("%s ERROR: %d Reset session\n", __func__, |
275 | errorcode); | 275 | errorcode); |
276 | nfs4_schedule_state_recovery(clp); | 276 | nfs4_schedule_session_recovery(clp->cl_session); |
277 | exception->retry = 1; | 277 | exception->retry = 1; |
278 | break; | 278 | break; |
279 | #endif /* defined(CONFIG_NFS_V4_1) */ | 279 | #endif /* defined(CONFIG_NFS_V4_1) */ |
@@ -296,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
296 | } | 296 | } |
297 | /* We failed to handle the error */ | 297 | /* We failed to handle the error */ |
298 | return nfs4_map_errors(ret); | 298 | return nfs4_map_errors(ret); |
299 | do_state_recovery: | 299 | wait_on_recovery: |
300 | nfs4_schedule_state_recovery(clp); | ||
301 | ret = nfs4_wait_clnt_recover(clp); | 300 | ret = nfs4_wait_clnt_recover(clp); |
302 | if (ret == 0) | 301 | if (ret == 0) |
303 | exception->retry = 1; | 302 | exception->retry = 1; |
@@ -436,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * | |||
436 | clp = res->sr_session->clp; | 435 | clp = res->sr_session->clp; |
437 | do_renew_lease(clp, timestamp); | 436 | do_renew_lease(clp, timestamp); |
438 | /* Check sequence flags */ | 437 | /* Check sequence flags */ |
439 | if (atomic_read(&clp->cl_count) > 1) | 438 | if (res->sr_status_flags != 0) |
440 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | 439 | nfs4_schedule_lease_recovery(clp); |
441 | break; | 440 | break; |
442 | case -NFS4ERR_DELAY: | 441 | case -NFS4ERR_DELAY: |
443 | /* The server detected a resend of the RPC call and | 442 | /* The server detected a resend of the RPC call and |
@@ -1256,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state | |||
1256 | case -NFS4ERR_BAD_HIGH_SLOT: | 1255 | case -NFS4ERR_BAD_HIGH_SLOT: |
1257 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 1256 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
1258 | case -NFS4ERR_DEADSESSION: | 1257 | case -NFS4ERR_DEADSESSION: |
1259 | nfs4_schedule_state_recovery( | 1258 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
1260 | server->nfs_client); | ||
1261 | goto out; | 1259 | goto out; |
1262 | case -NFS4ERR_STALE_CLIENTID: | 1260 | case -NFS4ERR_STALE_CLIENTID: |
1263 | case -NFS4ERR_STALE_STATEID: | 1261 | case -NFS4ERR_STALE_STATEID: |
1264 | case -NFS4ERR_EXPIRED: | 1262 | case -NFS4ERR_EXPIRED: |
1265 | /* Don't recall a delegation if it was lost */ | 1263 | /* Don't recall a delegation if it was lost */ |
1266 | nfs4_schedule_state_recovery(server->nfs_client); | 1264 | nfs4_schedule_lease_recovery(server->nfs_client); |
1267 | goto out; | 1265 | goto out; |
1268 | case -ERESTARTSYS: | 1266 | case -ERESTARTSYS: |
1269 | /* | 1267 | /* |
@@ -1272,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state | |||
1272 | */ | 1270 | */ |
1273 | case -NFS4ERR_ADMIN_REVOKED: | 1271 | case -NFS4ERR_ADMIN_REVOKED: |
1274 | case -NFS4ERR_BAD_STATEID: | 1272 | case -NFS4ERR_BAD_STATEID: |
1275 | nfs4_state_mark_reclaim_nograce(server->nfs_client, state); | 1273 | nfs4_schedule_stateid_recovery(server, state); |
1276 | case -EKEYEXPIRED: | 1274 | case -EKEYEXPIRED: |
1277 | /* | 1275 | /* |
1278 | * User RPCSEC_GSS context has expired. | 1276 | * User RPCSEC_GSS context has expired. |
@@ -1588,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) | |||
1588 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && | 1586 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && |
1589 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) | 1587 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) |
1590 | break; | 1588 | break; |
1591 | nfs4_schedule_state_recovery(clp); | 1589 | nfs4_schedule_state_manager(clp); |
1592 | ret = -EIO; | 1590 | ret = -EIO; |
1593 | } | 1591 | } |
1594 | return ret; | 1592 | return ret; |
@@ -3179,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) | |||
3179 | if (task->tk_status < 0) { | 3177 | if (task->tk_status < 0) { |
3180 | /* Unless we're shutting down, schedule state recovery! */ | 3178 | /* Unless we're shutting down, schedule state recovery! */ |
3181 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) | 3179 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) |
3182 | nfs4_schedule_state_recovery(clp); | 3180 | nfs4_schedule_lease_recovery(clp); |
3183 | return; | 3181 | return; |
3184 | } | 3182 | } |
3185 | do_renew_lease(clp, timestamp); | 3183 | do_renew_lease(clp, timestamp); |
@@ -3262,7 +3260,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, | |||
3262 | spages = pages; | 3260 | spages = pages; |
3263 | 3261 | ||
3264 | do { | 3262 | do { |
3265 | len = min(PAGE_CACHE_SIZE, buflen); | 3263 | len = min_t(size_t, PAGE_CACHE_SIZE, buflen); |
3266 | newpage = alloc_page(GFP_KERNEL); | 3264 | newpage = alloc_page(GFP_KERNEL); |
3267 | 3265 | ||
3268 | if (newpage == NULL) | 3266 | if (newpage == NULL) |
@@ -3504,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3504 | case -NFS4ERR_OPENMODE: | 3502 | case -NFS4ERR_OPENMODE: |
3505 | if (state == NULL) | 3503 | if (state == NULL) |
3506 | break; | 3504 | break; |
3507 | nfs4_state_mark_reclaim_nograce(clp, state); | 3505 | nfs4_schedule_stateid_recovery(server, state); |
3508 | goto do_state_recovery; | 3506 | goto wait_on_recovery; |
3509 | case -NFS4ERR_STALE_STATEID: | 3507 | case -NFS4ERR_STALE_STATEID: |
3510 | case -NFS4ERR_STALE_CLIENTID: | 3508 | case -NFS4ERR_STALE_CLIENTID: |
3511 | case -NFS4ERR_EXPIRED: | 3509 | case -NFS4ERR_EXPIRED: |
3512 | goto do_state_recovery; | 3510 | nfs4_schedule_lease_recovery(clp); |
3511 | goto wait_on_recovery; | ||
3513 | #if defined(CONFIG_NFS_V4_1) | 3512 | #if defined(CONFIG_NFS_V4_1) |
3514 | case -NFS4ERR_BADSESSION: | 3513 | case -NFS4ERR_BADSESSION: |
3515 | case -NFS4ERR_BADSLOT: | 3514 | case -NFS4ERR_BADSLOT: |
@@ -3520,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3520 | case -NFS4ERR_SEQ_MISORDERED: | 3519 | case -NFS4ERR_SEQ_MISORDERED: |
3521 | dprintk("%s ERROR %d, Reset session\n", __func__, | 3520 | dprintk("%s ERROR %d, Reset session\n", __func__, |
3522 | task->tk_status); | 3521 | task->tk_status); |
3523 | nfs4_schedule_state_recovery(clp); | 3522 | nfs4_schedule_session_recovery(clp->cl_session); |
3524 | task->tk_status = 0; | 3523 | task->tk_status = 0; |
3525 | return -EAGAIN; | 3524 | return -EAGAIN; |
3526 | #endif /* CONFIG_NFS_V4_1 */ | 3525 | #endif /* CONFIG_NFS_V4_1 */ |
@@ -3537,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3537 | } | 3536 | } |
3538 | task->tk_status = nfs4_map_errors(task->tk_status); | 3537 | task->tk_status = nfs4_map_errors(task->tk_status); |
3539 | return 0; | 3538 | return 0; |
3540 | do_state_recovery: | 3539 | wait_on_recovery: |
3541 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); | 3540 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); |
3542 | nfs4_schedule_state_recovery(clp); | ||
3543 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) | 3541 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) |
3544 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); | 3542 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); |
3545 | task->tk_status = 0; | 3543 | task->tk_status = 0; |
@@ -4150,7 +4148,7 @@ static void nfs4_lock_release(void *calldata) | |||
4150 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, | 4148 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, |
4151 | data->arg.lock_seqid); | 4149 | data->arg.lock_seqid); |
4152 | if (!IS_ERR(task)) | 4150 | if (!IS_ERR(task)) |
4153 | rpc_put_task(task); | 4151 | rpc_put_task_async(task); |
4154 | dprintk("%s: cancelling lock!\n", __func__); | 4152 | dprintk("%s: cancelling lock!\n", __func__); |
4155 | } else | 4153 | } else |
4156 | nfs_free_seqid(data->arg.lock_seqid); | 4154 | nfs_free_seqid(data->arg.lock_seqid); |
@@ -4174,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { | |||
4174 | 4172 | ||
4175 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) | 4173 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) |
4176 | { | 4174 | { |
4177 | struct nfs_client *clp = server->nfs_client; | ||
4178 | struct nfs4_state *state = lsp->ls_state; | ||
4179 | |||
4180 | switch (error) { | 4175 | switch (error) { |
4181 | case -NFS4ERR_ADMIN_REVOKED: | 4176 | case -NFS4ERR_ADMIN_REVOKED: |
4182 | case -NFS4ERR_BAD_STATEID: | 4177 | case -NFS4ERR_BAD_STATEID: |
4183 | case -NFS4ERR_EXPIRED: | 4178 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4184 | if (new_lock_owner != 0 || | 4179 | if (new_lock_owner != 0 || |
4185 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | 4180 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) |
4186 | nfs4_state_mark_reclaim_nograce(clp, state); | 4181 | nfs4_schedule_stateid_recovery(server, lsp->ls_state); |
4187 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | ||
4188 | break; | 4182 | break; |
4189 | case -NFS4ERR_STALE_STATEID: | 4183 | case -NFS4ERR_STALE_STATEID: |
4190 | if (new_lock_owner != 0 || | ||
4191 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | ||
4192 | nfs4_state_mark_reclaim_reboot(clp, state); | ||
4193 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | 4184 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4185 | case -NFS4ERR_EXPIRED: | ||
4186 | nfs4_schedule_lease_recovery(server->nfs_client); | ||
4194 | }; | 4187 | }; |
4195 | } | 4188 | } |
4196 | 4189 | ||
@@ -4406,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | |||
4406 | case -NFS4ERR_EXPIRED: | 4399 | case -NFS4ERR_EXPIRED: |
4407 | case -NFS4ERR_STALE_CLIENTID: | 4400 | case -NFS4ERR_STALE_CLIENTID: |
4408 | case -NFS4ERR_STALE_STATEID: | 4401 | case -NFS4ERR_STALE_STATEID: |
4402 | nfs4_schedule_lease_recovery(server->nfs_client); | ||
4403 | goto out; | ||
4409 | case -NFS4ERR_BADSESSION: | 4404 | case -NFS4ERR_BADSESSION: |
4410 | case -NFS4ERR_BADSLOT: | 4405 | case -NFS4ERR_BADSLOT: |
4411 | case -NFS4ERR_BAD_HIGH_SLOT: | 4406 | case -NFS4ERR_BAD_HIGH_SLOT: |
4412 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 4407 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
4413 | case -NFS4ERR_DEADSESSION: | 4408 | case -NFS4ERR_DEADSESSION: |
4414 | nfs4_schedule_state_recovery(server->nfs_client); | 4409 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
4415 | goto out; | 4410 | goto out; |
4416 | case -ERESTARTSYS: | 4411 | case -ERESTARTSYS: |
4417 | /* | 4412 | /* |
@@ -4421,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | |||
4421 | case -NFS4ERR_ADMIN_REVOKED: | 4416 | case -NFS4ERR_ADMIN_REVOKED: |
4422 | case -NFS4ERR_BAD_STATEID: | 4417 | case -NFS4ERR_BAD_STATEID: |
4423 | case -NFS4ERR_OPENMODE: | 4418 | case -NFS4ERR_OPENMODE: |
4424 | nfs4_state_mark_reclaim_nograce(server->nfs_client, state); | 4419 | nfs4_schedule_stateid_recovery(server, state); |
4425 | err = 0; | 4420 | err = 0; |
4426 | goto out; | 4421 | goto out; |
4427 | case -EKEYEXPIRED: | 4422 | case -EKEYEXPIRED: |
@@ -5028,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp) | |||
5028 | int status; | 5023 | int status; |
5029 | unsigned *ptr; | 5024 | unsigned *ptr; |
5030 | struct nfs4_session *session = clp->cl_session; | 5025 | struct nfs4_session *session = clp->cl_session; |
5026 | long timeout = 0; | ||
5027 | int err; | ||
5031 | 5028 | ||
5032 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); | 5029 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); |
5033 | 5030 | ||
5034 | status = _nfs4_proc_create_session(clp); | 5031 | do { |
5032 | status = _nfs4_proc_create_session(clp); | ||
5033 | if (status == -NFS4ERR_DELAY) { | ||
5034 | err = nfs4_delay(clp->cl_rpcclient, &timeout); | ||
5035 | if (err) | ||
5036 | status = err; | ||
5037 | } | ||
5038 | } while (status == -NFS4ERR_DELAY); | ||
5039 | |||
5035 | if (status) | 5040 | if (status) |
5036 | goto out; | 5041 | goto out; |
5037 | 5042 | ||
@@ -5140,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client | |||
5140 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5145 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5141 | return -EAGAIN; | 5146 | return -EAGAIN; |
5142 | default: | 5147 | default: |
5143 | nfs4_schedule_state_recovery(clp); | 5148 | nfs4_schedule_lease_recovery(clp); |
5144 | } | 5149 | } |
5145 | return 0; | 5150 | return 0; |
5146 | } | 5151 | } |
@@ -5227,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr | |||
5227 | if (IS_ERR(task)) | 5232 | if (IS_ERR(task)) |
5228 | ret = PTR_ERR(task); | 5233 | ret = PTR_ERR(task); |
5229 | else | 5234 | else |
5230 | rpc_put_task(task); | 5235 | rpc_put_task_async(task); |
5231 | dprintk("<-- %s status=%d\n", __func__, ret); | 5236 | dprintk("<-- %s status=%d\n", __func__, ret); |
5232 | return ret; | 5237 | return ret; |
5233 | } | 5238 | } |
@@ -5243,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | |||
5243 | goto out; | 5248 | goto out; |
5244 | } | 5249 | } |
5245 | ret = rpc_wait_for_completion_task(task); | 5250 | ret = rpc_wait_for_completion_task(task); |
5246 | if (!ret) | 5251 | if (!ret) { |
5252 | struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; | ||
5253 | |||
5254 | if (task->tk_status == 0) | ||
5255 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | ||
5247 | ret = task->tk_status; | 5256 | ret = task->tk_status; |
5257 | } | ||
5248 | rpc_put_task(task); | 5258 | rpc_put_task(task); |
5249 | out: | 5259 | out: |
5250 | dprintk("<-- %s status=%d\n", __func__, ret); | 5260 | dprintk("<-- %s status=%d\n", __func__, ret); |
@@ -5281,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf | |||
5281 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5291 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5282 | return -EAGAIN; | 5292 | return -EAGAIN; |
5283 | default: | 5293 | default: |
5284 | nfs4_schedule_state_recovery(clp); | 5294 | nfs4_schedule_lease_recovery(clp); |
5285 | } | 5295 | } |
5286 | return 0; | 5296 | return 0; |
5287 | } | 5297 | } |
@@ -5349,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) | |||
5349 | status = PTR_ERR(task); | 5359 | status = PTR_ERR(task); |
5350 | goto out; | 5360 | goto out; |
5351 | } | 5361 | } |
5362 | status = nfs4_wait_for_completion_rpc_task(task); | ||
5363 | if (status == 0) | ||
5364 | status = task->tk_status; | ||
5352 | rpc_put_task(task); | 5365 | rpc_put_task(task); |
5353 | return 0; | 5366 | return 0; |
5354 | out: | 5367 | out: |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e6742b57a04c..0592288f9f06 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | /* | 1009 | /* |
1010 | * Schedule a state recovery attempt | 1010 | * Schedule a lease recovery attempt |
1011 | */ | 1011 | */ |
1012 | void nfs4_schedule_state_recovery(struct nfs_client *clp) | 1012 | void nfs4_schedule_lease_recovery(struct nfs_client *clp) |
1013 | { | 1013 | { |
1014 | if (!clp) | 1014 | if (!clp) |
1015 | return; | 1015 | return; |
@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) | |||
1018 | nfs4_schedule_state_manager(clp); | 1018 | nfs4_schedule_state_manager(clp); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) | 1021 | static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) |
1022 | { | 1022 | { |
1023 | 1023 | ||
1024 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 1024 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st | |||
1032 | return 1; | 1032 | return 1; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) | 1035 | static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) |
1036 | { | 1036 | { |
1037 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); | 1037 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); |
1038 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 1038 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s | |||
1041 | return 1; | 1041 | return 1; |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) | ||
1045 | { | ||
1046 | struct nfs_client *clp = server->nfs_client; | ||
1047 | |||
1048 | nfs4_state_mark_reclaim_nograce(clp, state); | ||
1049 | nfs4_schedule_state_manager(clp); | ||
1050 | } | ||
1051 | |||
1044 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) | 1052 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) |
1045 | { | 1053 | { |
1046 | struct inode *inode = state->inode; | 1054 | struct inode *inode = state->inode; |
@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) | |||
1436 | } | 1444 | } |
1437 | 1445 | ||
1438 | #ifdef CONFIG_NFS_V4_1 | 1446 | #ifdef CONFIG_NFS_V4_1 |
1447 | void nfs4_schedule_session_recovery(struct nfs4_session *session) | ||
1448 | { | ||
1449 | nfs4_schedule_lease_recovery(session->clp); | ||
1450 | } | ||
1451 | |||
1439 | void nfs41_handle_recall_slot(struct nfs_client *clp) | 1452 | void nfs41_handle_recall_slot(struct nfs_client *clp) |
1440 | { | 1453 | { |
1441 | set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | 1454 | set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); |
1442 | nfs4_schedule_state_recovery(clp); | 1455 | nfs4_schedule_state_manager(clp); |
1443 | } | 1456 | } |
1444 | 1457 | ||
1445 | static void nfs4_reset_all_state(struct nfs_client *clp) | 1458 | static void nfs4_reset_all_state(struct nfs_client *clp) |
@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp) | |||
1447 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | 1460 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { |
1448 | clp->cl_boot_time = CURRENT_TIME; | 1461 | clp->cl_boot_time = CURRENT_TIME; |
1449 | nfs4_state_start_reclaim_nograce(clp); | 1462 | nfs4_state_start_reclaim_nograce(clp); |
1450 | nfs4_schedule_state_recovery(clp); | 1463 | nfs4_schedule_state_manager(clp); |
1451 | } | 1464 | } |
1452 | } | 1465 | } |
1453 | 1466 | ||
@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) | |||
1455 | { | 1468 | { |
1456 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | 1469 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { |
1457 | nfs4_state_start_reclaim_reboot(clp); | 1470 | nfs4_state_start_reclaim_reboot(clp); |
1458 | nfs4_schedule_state_recovery(clp); | 1471 | nfs4_schedule_state_manager(clp); |
1459 | } | 1472 | } |
1460 | } | 1473 | } |
1461 | 1474 | ||
@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp) | |||
1475 | { | 1488 | { |
1476 | nfs_expire_all_delegations(clp); | 1489 | nfs_expire_all_delegations(clp); |
1477 | if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) | 1490 | if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) |
1478 | nfs4_schedule_state_recovery(clp); | 1491 | nfs4_schedule_state_manager(clp); |
1479 | } | 1492 | } |
1480 | 1493 | ||
1481 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) | 1494 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e2c168b6ee9..94d50e86a124 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1660 | 1660 | ||
1661 | p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); | 1661 | p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); |
1662 | *p++ = cpu_to_be32(OP_CREATE_SESSION); | 1662 | *p++ = cpu_to_be32(OP_CREATE_SESSION); |
1663 | p = xdr_encode_hyper(p, clp->cl_ex_clid); | 1663 | p = xdr_encode_hyper(p, clp->cl_clientid); |
1664 | *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ | 1664 | *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ |
1665 | *p++ = cpu_to_be32(args->flags); /*flags */ | 1665 | *p++ = cpu_to_be32(args->flags); /*flags */ |
1666 | 1666 | ||
@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr, | |||
4694 | p = xdr_inline_decode(xdr, 8); | 4694 | p = xdr_inline_decode(xdr, 8); |
4695 | if (unlikely(!p)) | 4695 | if (unlikely(!p)) |
4696 | goto out_overflow; | 4696 | goto out_overflow; |
4697 | xdr_decode_hyper(p, &clp->cl_ex_clid); | 4697 | xdr_decode_hyper(p, &clp->cl_clientid); |
4698 | p = xdr_inline_decode(xdr, 12); | 4698 | p = xdr_inline_decode(xdr, 12); |
4699 | if (unlikely(!p)) | 4699 | if (unlikely(!p)) |
4700 | goto out_overflow; | 4700 | goto out_overflow; |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 903908a20023..c541093a5bf2 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
@@ -86,11 +86,14 @@ | |||
86 | /* Default path we try to mount. "%s" gets replaced by our IP address */ | 86 | /* Default path we try to mount. "%s" gets replaced by our IP address */ |
87 | #define NFS_ROOT "/tftpboot/%s" | 87 | #define NFS_ROOT "/tftpboot/%s" |
88 | 88 | ||
89 | /* Default NFSROOT mount options. */ | ||
90 | #define NFS_DEF_OPTIONS "udp" | ||
91 | |||
89 | /* Parameters passed from the kernel command line */ | 92 | /* Parameters passed from the kernel command line */ |
90 | static char nfs_root_parms[256] __initdata = ""; | 93 | static char nfs_root_parms[256] __initdata = ""; |
91 | 94 | ||
92 | /* Text-based mount options passed to super.c */ | 95 | /* Text-based mount options passed to super.c */ |
93 | static char nfs_root_options[256] __initdata = ""; | 96 | static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS; |
94 | 97 | ||
95 | /* Address of NFS server */ | 98 | /* Address of NFS server */ |
96 | static __be32 servaddr __initdata = htonl(INADDR_NONE); | 99 | static __be32 servaddr __initdata = htonl(INADDR_NONE); |
@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src, | |||
160 | } | 163 | } |
161 | 164 | ||
162 | static int __init root_nfs_cat(char *dest, const char *src, | 165 | static int __init root_nfs_cat(char *dest, const char *src, |
163 | const size_t destlen) | 166 | const size_t destlen) |
164 | { | 167 | { |
168 | size_t len = strlen(dest); | ||
169 | |||
170 | if (len && dest[len - 1] != ',') | ||
171 | if (strlcat(dest, ",", destlen) > destlen) | ||
172 | return -1; | ||
173 | |||
165 | if (strlcat(dest, src, destlen) > destlen) | 174 | if (strlcat(dest, src, destlen) > destlen) |
166 | return -1; | 175 | return -1; |
167 | return 0; | 176 | return 0; |
@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, | |||
194 | if (root_nfs_cat(nfs_root_options, incoming, | 203 | if (root_nfs_cat(nfs_root_options, incoming, |
195 | sizeof(nfs_root_options))) | 204 | sizeof(nfs_root_options))) |
196 | return -1; | 205 | return -1; |
197 | |||
198 | /* | ||
199 | * Possibly prepare for more options to be appended | ||
200 | */ | ||
201 | if (nfs_root_options[0] != '\0' && | ||
202 | nfs_root_options[strlen(nfs_root_options)] != ',') | ||
203 | if (root_nfs_cat(nfs_root_options, ",", | ||
204 | sizeof(nfs_root_options))) | ||
205 | return -1; | ||
206 | |||
207 | return 0; | 206 | return 0; |
208 | } | 207 | } |
209 | 208 | ||
@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, | |||
217 | */ | 216 | */ |
218 | static int __init root_nfs_data(char *cmdline) | 217 | static int __init root_nfs_data(char *cmdline) |
219 | { | 218 | { |
220 | char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; | 219 | char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; |
221 | int len, retval = -1; | 220 | int len, retval = -1; |
222 | char *tmp = NULL; | 221 | char *tmp = NULL; |
223 | const size_t tmplen = sizeof(nfs_export_path); | 222 | const size_t tmplen = sizeof(nfs_export_path); |
@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline) | |||
244 | * Append mandatory options for nfsroot so they override | 243 | * Append mandatory options for nfsroot so they override |
245 | * what has come before | 244 | * what has come before |
246 | */ | 245 | */ |
247 | snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", | 246 | snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4", |
248 | &servaddr); | 247 | &servaddr); |
249 | if (root_nfs_cat(nfs_root_options, addr_option, | 248 | if (root_nfs_cat(nfs_root_options, mand_options, |
250 | sizeof(nfs_root_options))) | 249 | sizeof(nfs_root_options))) |
251 | goto out_optionstoolong; | 250 | goto out_optionstoolong; |
252 | 251 | ||
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index e313a51acdd1..6481d537d69d 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c | |||
@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n | |||
180 | task_setup_data.rpc_client = NFS_CLIENT(dir); | 180 | task_setup_data.rpc_client = NFS_CLIENT(dir); |
181 | task = rpc_run_task(&task_setup_data); | 181 | task = rpc_run_task(&task_setup_data); |
182 | if (!IS_ERR(task)) | 182 | if (!IS_ERR(task)) |
183 | rpc_put_task(task); | 183 | rpc_put_task_async(task); |
184 | return 1; | 184 | return 1; |
185 | } | 185 | } |
186 | 186 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c8278f4046cb..42b92d7a9cc4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1292 | task = rpc_run_task(&task_setup_data); | 1292 | task = rpc_run_task(&task_setup_data); |
1293 | if (IS_ERR(task)) | 1293 | if (IS_ERR(task)) |
1294 | return PTR_ERR(task); | 1294 | return PTR_ERR(task); |
1295 | if (how & FLUSH_SYNC) | ||
1296 | rpc_wait_for_completion_task(task); | ||
1295 | rpc_put_task(task); | 1297 | rpc_put_task(task); |
1296 | return 0; | 1298 | return 0; |
1297 | } | 1299 | } |
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c index 48cec7cbca17..be03a0b08b47 100644 --- a/fs/partitions/osf.c +++ b/fs/partitions/osf.c | |||
@@ -10,10 +10,13 @@ | |||
10 | #include "check.h" | 10 | #include "check.h" |
11 | #include "osf.h" | 11 | #include "osf.h" |
12 | 12 | ||
13 | #define MAX_OSF_PARTITIONS 8 | ||
14 | |||
13 | int osf_partition(struct parsed_partitions *state) | 15 | int osf_partition(struct parsed_partitions *state) |
14 | { | 16 | { |
15 | int i; | 17 | int i; |
16 | int slot = 1; | 18 | int slot = 1; |
19 | unsigned int npartitions; | ||
17 | Sector sect; | 20 | Sector sect; |
18 | unsigned char *data; | 21 | unsigned char *data; |
19 | struct disklabel { | 22 | struct disklabel { |
@@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state) | |||
45 | u8 p_fstype; | 48 | u8 p_fstype; |
46 | u8 p_frag; | 49 | u8 p_frag; |
47 | __le16 p_cpg; | 50 | __le16 p_cpg; |
48 | } d_partitions[8]; | 51 | } d_partitions[MAX_OSF_PARTITIONS]; |
49 | } * label; | 52 | } * label; |
50 | struct d_partition * partition; | 53 | struct d_partition * partition; |
51 | 54 | ||
@@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state) | |||
63 | put_dev_sector(sect); | 66 | put_dev_sector(sect); |
64 | return 0; | 67 | return 0; |
65 | } | 68 | } |
66 | for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { | 69 | npartitions = le16_to_cpu(label->d_npartitions); |
70 | if (npartitions > MAX_OSF_PARTITIONS) { | ||
71 | put_dev_sector(sect); | ||
72 | return 0; | ||
73 | } | ||
74 | for (i = 0 ; i < npartitions; i++, partition++) { | ||
67 | if (slot == state->limit) | 75 | if (slot == state->limit) |
68 | break; | 76 | break; |
69 | if (le32_to_cpu(partition->p_size)) | 77 | if (le32_to_cpu(partition->p_size)) |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b197563913bf..3e112de12d8d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -68,11 +68,7 @@ struct nfs_client { | |||
68 | unsigned char cl_id_uniquifier; | 68 | unsigned char cl_id_uniquifier; |
69 | u32 cl_cb_ident; /* v4.0 callback identifier */ | 69 | u32 cl_cb_ident; /* v4.0 callback identifier */ |
70 | const struct nfs4_minor_version_ops *cl_mvops; | 70 | const struct nfs4_minor_version_ops *cl_mvops; |
71 | #endif /* CONFIG_NFS_V4 */ | ||
72 | 71 | ||
73 | #ifdef CONFIG_NFS_V4_1 | ||
74 | /* clientid returned from EXCHANGE_ID, used by session operations */ | ||
75 | u64 cl_ex_clid; | ||
76 | /* The sequence id to use for the next CREATE_SESSION */ | 72 | /* The sequence id to use for the next CREATE_SESSION */ |
77 | u32 cl_seqid; | 73 | u32 cl_seqid; |
78 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ | 74 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ |
@@ -80,7 +76,7 @@ struct nfs_client { | |||
80 | struct nfs4_session *cl_session; /* sharred session */ | 76 | struct nfs4_session *cl_session; /* sharred session */ |
81 | struct list_head cl_layouts; | 77 | struct list_head cl_layouts; |
82 | struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ | 78 | struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ |
83 | #endif /* CONFIG_NFS_V4_1 */ | 79 | #endif /* CONFIG_NFS_V4 */ |
84 | 80 | ||
85 | #ifdef CONFIG_NFS_FSCACHE | 81 | #ifdef CONFIG_NFS_FSCACHE |
86 | struct fscache_cookie *fscache; /* client index cache cookie */ | 82 | struct fscache_cookie *fscache; /* client index cache cookie */ |
@@ -185,7 +181,7 @@ struct nfs_server { | |||
185 | /* maximum number of slots to use */ | 181 | /* maximum number of slots to use */ |
186 | #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE | 182 | #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE |
187 | 183 | ||
188 | #if defined(CONFIG_NFS_V4_1) | 184 | #if defined(CONFIG_NFS_V4) |
189 | 185 | ||
190 | /* Sessions */ | 186 | /* Sessions */ |
191 | #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) | 187 | #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) |
@@ -225,5 +221,5 @@ struct nfs4_session { | |||
225 | struct nfs_client *clp; | 221 | struct nfs_client *clp; |
226 | }; | 222 | }; |
227 | 223 | ||
228 | #endif /* CONFIG_NFS_V4_1 */ | 224 | #endif /* CONFIG_NFS_V4 */ |
229 | #endif | 225 | #endif |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 88513fd8e208..d81db8012c63 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); | |||
212 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | 212 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, |
213 | const struct rpc_call_ops *ops); | 213 | const struct rpc_call_ops *ops); |
214 | void rpc_put_task(struct rpc_task *); | 214 | void rpc_put_task(struct rpc_task *); |
215 | void rpc_put_task_async(struct rpc_task *); | ||
215 | void rpc_exit_task(struct rpc_task *); | 216 | void rpc_exit_task(struct rpc_task *); |
216 | void rpc_exit(struct rpc_task *, int); | 217 | void rpc_exit(struct rpc_task *, int); |
217 | void rpc_release_calldata(const struct rpc_call_ops *, void *); | 218 | void rpc_release_calldata(const struct rpc_call_ops *, void *); |
diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7ba..42eab5a8437d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | |||
4213 | { | 4213 | { |
4214 | __wake_up_common(q, mode, 1, 0, key); | 4214 | __wake_up_common(q, mode, 1, 0, key); |
4215 | } | 4215 | } |
4216 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | ||
4216 | 4217 | ||
4217 | /** | 4218 | /** |
4218 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | 4219 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index dbe99a5f2073..113e35c47502 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1762,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1762 | #ifndef CONFIG_NUMA | 1762 | #ifndef CONFIG_NUMA |
1763 | VM_BUG_ON(!*hpage); | 1763 | VM_BUG_ON(!*hpage); |
1764 | new_page = *hpage; | 1764 | new_page = *hpage; |
1765 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | ||
1766 | up_read(&mm->mmap_sem); | ||
1767 | return; | ||
1768 | } | ||
1765 | #else | 1769 | #else |
1766 | VM_BUG_ON(*hpage); | 1770 | VM_BUG_ON(*hpage); |
1767 | /* | 1771 | /* |
@@ -1781,12 +1785,12 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1781 | *hpage = ERR_PTR(-ENOMEM); | 1785 | *hpage = ERR_PTR(-ENOMEM); |
1782 | return; | 1786 | return; |
1783 | } | 1787 | } |
1784 | #endif | ||
1785 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1788 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
1786 | up_read(&mm->mmap_sem); | 1789 | up_read(&mm->mmap_sem); |
1787 | put_page(new_page); | 1790 | put_page(new_page); |
1788 | return; | 1791 | return; |
1789 | } | 1792 | } |
1793 | #endif | ||
1790 | 1794 | ||
1791 | /* after allocating the hugepage upgrade to mmap_sem write mode */ | 1795 | /* after allocating the hugepage upgrade to mmap_sem write mode */ |
1792 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 243fc09b164e..59e599498e37 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) | |||
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Mark an RPC call as having completed by clearing the 'active' bit | 254 | * Mark an RPC call as having completed by clearing the 'active' bit |
255 | * and then waking up all tasks that were sleeping. | ||
255 | */ | 256 | */ |
256 | static void rpc_mark_complete_task(struct rpc_task *task) | 257 | static int rpc_complete_task(struct rpc_task *task) |
257 | { | 258 | { |
258 | smp_mb__before_clear_bit(); | 259 | void *m = &task->tk_runstate; |
260 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | ||
261 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | ||
262 | unsigned long flags; | ||
263 | int ret; | ||
264 | |||
265 | spin_lock_irqsave(&wq->lock, flags); | ||
259 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 266 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
260 | smp_mb__after_clear_bit(); | 267 | ret = atomic_dec_and_test(&task->tk_count); |
261 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 268 | if (waitqueue_active(wq)) |
269 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | ||
270 | spin_unlock_irqrestore(&wq->lock, flags); | ||
271 | return ret; | ||
262 | } | 272 | } |
263 | 273 | ||
264 | /* | 274 | /* |
265 | * Allow callers to wait for completion of an RPC call | 275 | * Allow callers to wait for completion of an RPC call |
276 | * | ||
277 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | ||
278 | * to enforce taking of the wq->lock and hence avoid races with | ||
279 | * rpc_complete_task(). | ||
266 | */ | 280 | */ |
267 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 281 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
268 | { | 282 | { |
269 | if (action == NULL) | 283 | if (action == NULL) |
270 | action = rpc_wait_bit_killable; | 284 | action = rpc_wait_bit_killable; |
271 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 285 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
272 | action, TASK_KILLABLE); | 286 | action, TASK_KILLABLE); |
273 | } | 287 | } |
274 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 288 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) | |||
857 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 871 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
858 | } | 872 | } |
859 | 873 | ||
860 | void rpc_put_task(struct rpc_task *task) | 874 | static void rpc_release_resources_task(struct rpc_task *task) |
861 | { | 875 | { |
862 | if (!atomic_dec_and_test(&task->tk_count)) | ||
863 | return; | ||
864 | /* Release resources */ | ||
865 | if (task->tk_rqstp) | 876 | if (task->tk_rqstp) |
866 | xprt_release(task); | 877 | xprt_release(task); |
867 | if (task->tk_msg.rpc_cred) | 878 | if (task->tk_msg.rpc_cred) |
868 | put_rpccred(task->tk_msg.rpc_cred); | 879 | put_rpccred(task->tk_msg.rpc_cred); |
869 | rpc_task_release_client(task); | 880 | rpc_task_release_client(task); |
870 | if (task->tk_workqueue != NULL) { | 881 | } |
882 | |||
883 | static void rpc_final_put_task(struct rpc_task *task, | ||
884 | struct workqueue_struct *q) | ||
885 | { | ||
886 | if (q != NULL) { | ||
871 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 887 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
872 | queue_work(task->tk_workqueue, &task->u.tk_work); | 888 | queue_work(q, &task->u.tk_work); |
873 | } else | 889 | } else |
874 | rpc_free_task(task); | 890 | rpc_free_task(task); |
875 | } | 891 | } |
892 | |||
893 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | ||
894 | { | ||
895 | if (atomic_dec_and_test(&task->tk_count)) { | ||
896 | rpc_release_resources_task(task); | ||
897 | rpc_final_put_task(task, q); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | void rpc_put_task(struct rpc_task *task) | ||
902 | { | ||
903 | rpc_do_put_task(task, NULL); | ||
904 | } | ||
876 | EXPORT_SYMBOL_GPL(rpc_put_task); | 905 | EXPORT_SYMBOL_GPL(rpc_put_task); |
877 | 906 | ||
907 | void rpc_put_task_async(struct rpc_task *task) | ||
908 | { | ||
909 | rpc_do_put_task(task, task->tk_workqueue); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | ||
912 | |||
878 | static void rpc_release_task(struct rpc_task *task) | 913 | static void rpc_release_task(struct rpc_task *task) |
879 | { | 914 | { |
880 | dprintk("RPC: %5u release task\n", task->tk_pid); | 915 | dprintk("RPC: %5u release task\n", task->tk_pid); |
881 | 916 | ||
882 | BUG_ON (RPC_IS_QUEUED(task)); | 917 | BUG_ON (RPC_IS_QUEUED(task)); |
883 | 918 | ||
884 | /* Wake up anyone who is waiting for task completion */ | 919 | rpc_release_resources_task(task); |
885 | rpc_mark_complete_task(task); | ||
886 | 920 | ||
887 | rpc_put_task(task); | 921 | /* |
922 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | ||
923 | * so it should be safe to use task->tk_count as a test for whether | ||
924 | * or not any other processes still hold references to our rpc_task. | ||
925 | */ | ||
926 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | ||
927 | /* Wake up anyone who may be waiting for task completion */ | ||
928 | if (!rpc_complete_task(task)) | ||
929 | return; | ||
930 | } else { | ||
931 | if (!atomic_dec_and_test(&task->tk_count)) | ||
932 | return; | ||
933 | } | ||
934 | rpc_final_put_task(task, task->tk_workqueue); | ||
888 | } | 935 | } |
889 | 936 | ||
890 | int rpciod_up(void) | 937 | int rpciod_up(void) |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 9df1eadc912a..1a10dcd999ea 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1335 | p, 0, length, DMA_FROM_DEVICE); | 1335 | p, 0, length, DMA_FROM_DEVICE); |
1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { | 1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { |
1337 | put_page(p); | 1337 | put_page(p); |
1338 | svc_rdma_put_context(ctxt, 1); | ||
1338 | return; | 1339 | return; |
1339 | } | 1340 | } |
1340 | atomic_inc(&xprt->sc_dma_used); | 1341 | atomic_inc(&xprt->sc_dma_used); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c431f5a57960..be96d429b475 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, | |||
1631 | } | 1631 | } |
1632 | xs_reclassify_socket(family, sock); | 1632 | xs_reclassify_socket(family, sock); |
1633 | 1633 | ||
1634 | if (xs_bind(transport, sock)) { | 1634 | err = xs_bind(transport, sock); |
1635 | if (err) { | ||
1635 | sock_release(sock); | 1636 | sock_release(sock); |
1636 | goto out; | 1637 | goto out; |
1637 | } | 1638 | } |