diff options
91 files changed, 1014 insertions, 1010 deletions
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting index 4e686a2ed91..76850295af8 100644 --- a/Documentation/arm/Booting +++ b/Documentation/arm/Booting | |||
@@ -65,19 +65,13 @@ looks at the connected hardware is beyond the scope of this document. | |||
65 | The boot loader must ultimately be able to provide a MACH_TYPE_xxx | 65 | The boot loader must ultimately be able to provide a MACH_TYPE_xxx |
66 | value to the kernel. (see linux/arch/arm/tools/mach-types). | 66 | value to the kernel. (see linux/arch/arm/tools/mach-types). |
67 | 67 | ||
68 | 4. Setup boot data | 68 | |
69 | ------------------ | 69 | 4. Setup the kernel tagged list |
70 | ------------------------------- | ||
70 | 71 | ||
71 | Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED | 72 | Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED |
72 | New boot loaders: MANDATORY | 73 | New boot loaders: MANDATORY |
73 | 74 | ||
74 | The boot loader must provide either a tagged list or a dtb image for | ||
75 | passing configuration data to the kernel. The physical address of the | ||
76 | boot data is passed to the kernel in register r2. | ||
77 | |||
78 | 4a. Setup the kernel tagged list | ||
79 | -------------------------------- | ||
80 | |||
81 | The boot loader must create and initialise the kernel tagged list. | 75 | The boot loader must create and initialise the kernel tagged list. |
82 | A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE. | 76 | A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE. |
83 | The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag | 77 | The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag |
@@ -107,24 +101,6 @@ The tagged list must be placed in a region of memory where neither | |||
107 | the kernel decompressor nor initrd 'bootp' program will overwrite | 101 | the kernel decompressor nor initrd 'bootp' program will overwrite |
108 | it. The recommended placement is in the first 16KiB of RAM. | 102 | it. The recommended placement is in the first 16KiB of RAM. |
109 | 103 | ||
110 | 4b. Setup the device tree | ||
111 | ------------------------- | ||
112 | |||
113 | The boot loader must load a device tree image (dtb) into system ram | ||
114 | at a 64bit aligned address and initialize it with the boot data. The | ||
115 | dtb format is documented in Documentation/devicetree/booting-without-of.txt. | ||
116 | The kernel will look for the dtb magic value of 0xd00dfeed at the dtb | ||
117 | physical address to determine if a dtb has been passed instead of a | ||
118 | tagged list. | ||
119 | |||
120 | The boot loader must pass at a minimum the size and location of the | ||
121 | system memory, and the root filesystem location. The dtb must be | ||
122 | placed in a region of memory where the kernel decompressor will not | ||
123 | overwrite it. The recommended placement is in the first 16KiB of RAM | ||
124 | with the caveat that it may not be located at physical address 0 since | ||
125 | the kernel interprets a value of 0 in r2 to mean neither a tagged list | ||
126 | nor a dtb were passed. | ||
127 | |||
128 | 5. Calling the kernel image | 104 | 5. Calling the kernel image |
129 | --------------------------- | 105 | --------------------------- |
130 | 106 | ||
@@ -149,8 +125,7 @@ In either case, the following conditions must be met: | |||
149 | - CPU register settings | 125 | - CPU register settings |
150 | r0 = 0, | 126 | r0 = 0, |
151 | r1 = machine type number discovered in (3) above. | 127 | r1 = machine type number discovered in (3) above. |
152 | r2 = physical address of tagged list in system RAM, or | 128 | r2 = physical address of tagged list in system RAM. |
153 | physical address of device tree block (dtb) in system RAM | ||
154 | 129 | ||
155 | - CPU mode | 130 | - CPU mode |
156 | All forms of interrupts must be disabled (IRQs and FIQs) | 131 | All forms of interrupts must be disabled (IRQs and FIQs) |
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt index 9381a148102..28b1c9d3d35 100644 --- a/Documentation/devicetree/booting-without-of.txt +++ b/Documentation/devicetree/booting-without-of.txt | |||
@@ -13,7 +13,6 @@ Table of Contents | |||
13 | 13 | ||
14 | I - Introduction | 14 | I - Introduction |
15 | 1) Entry point for arch/powerpc | 15 | 1) Entry point for arch/powerpc |
16 | 2) Entry point for arch/arm | ||
17 | 16 | ||
18 | II - The DT block format | 17 | II - The DT block format |
19 | 1) Header | 18 | 1) Header |
@@ -226,45 +225,6 @@ it with special cases. | |||
226 | cannot support both configurations with Book E and configurations | 225 | cannot support both configurations with Book E and configurations |
227 | with classic Powerpc architectures. | 226 | with classic Powerpc architectures. |
228 | 227 | ||
229 | 2) Entry point for arch/arm | ||
230 | --------------------------- | ||
231 | |||
232 | There is one single entry point to the kernel, at the start | ||
233 | of the kernel image. That entry point supports two calling | ||
234 | conventions. A summary of the interface is described here. A full | ||
235 | description of the boot requirements is documented in | ||
236 | Documentation/arm/Booting | ||
237 | |||
238 | a) ATAGS interface. Minimal information is passed from firmware | ||
239 | to the kernel with a tagged list of predefined parameters. | ||
240 | |||
241 | r0 : 0 | ||
242 | |||
243 | r1 : Machine type number | ||
244 | |||
245 | r2 : Physical address of tagged list in system RAM | ||
246 | |||
247 | b) Entry with a flattened device-tree block. Firmware loads the | ||
248 | physical address of the flattened device tree block (dtb) into r2, | ||
249 | r1 is not used, but it is considered good practise to use a valid | ||
250 | machine number as described in Documentation/arm/Booting. | ||
251 | |||
252 | r0 : 0 | ||
253 | |||
254 | r1 : Valid machine type number. When using a device tree, | ||
255 | a single machine type number will often be assigned to | ||
256 | represent a class or family of SoCs. | ||
257 | |||
258 | r2 : physical pointer to the device-tree block | ||
259 | (defined in chapter II) in RAM. Device tree can be located | ||
260 | anywhere in system RAM, but it should be aligned on a 32 bit | ||
261 | boundary. | ||
262 | |||
263 | The kernel will differentiate between ATAGS and device tree booting by | ||
264 | reading the memory pointed to by r1 and looking for either the flattened | ||
265 | device tree block magic value (0xd00dfeed) or the ATAG_CORE value at | ||
266 | offset 0x4 from r2 (0x54410001). | ||
267 | |||
268 | 228 | ||
269 | II - The DT block format | 229 | II - The DT block format |
270 | ======================== | 230 | ======================== |
diff --git a/MAINTAINERS b/MAINTAINERS index 4837907a4ed..5dd6c751e6a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2126,6 +2126,7 @@ S: Supported | |||
2126 | F: fs/dlm/ | 2126 | F: fs/dlm/ |
2127 | 2127 | ||
2128 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM | 2128 | DMA GENERIC OFFLOAD ENGINE SUBSYSTEM |
2129 | M: Vinod Koul <vinod.koul@intel.com> | ||
2129 | M: Dan Williams <dan.j.williams@intel.com> | 2130 | M: Dan Williams <dan.j.williams@intel.com> |
2130 | S: Supported | 2131 | S: Supported |
2131 | F: drivers/dma/ | 2132 | F: drivers/dma/ |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 5cff165b7eb..26d45e5b636 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1391,7 +1391,7 @@ config AEABI | |||
1391 | 1391 | ||
1392 | config OABI_COMPAT | 1392 | config OABI_COMPAT |
1393 | bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" | 1393 | bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" |
1394 | depends on AEABI && EXPERIMENTAL | 1394 | depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL |
1395 | default y | 1395 | default y |
1396 | help | 1396 | help |
1397 | This option preserves the old syscall interface along with the | 1397 | This option preserves the old syscall interface along with the |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index c0225da3fb2..f06ff9feb0d 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on) | |||
391 | 391 | ||
392 | 392 | ||
393 | #ifdef CONFIG_SMP_ON_UP | 393 | #ifdef CONFIG_SMP_ON_UP |
394 | __INIT | ||
394 | __fixup_smp: | 395 | __fixup_smp: |
395 | and r3, r9, #0x000f0000 @ architecture version | 396 | and r3, r9, #0x000f0000 @ architecture version |
396 | teq r3, #0x000f0000 @ CPU ID supported? | 397 | teq r3, #0x000f0000 @ CPU ID supported? |
@@ -415,18 +416,7 @@ __fixup_smp_on_up: | |||
415 | sub r3, r0, r3 | 416 | sub r3, r0, r3 |
416 | add r4, r4, r3 | 417 | add r4, r4, r3 |
417 | add r5, r5, r3 | 418 | add r5, r5, r3 |
418 | 2: cmp r4, r5 | 419 | b __do_fixup_smp_on_up |
419 | movhs pc, lr | ||
420 | ldmia r4!, {r0, r6} | ||
421 | ARM( str r6, [r0, r3] ) | ||
422 | THUMB( add r0, r0, r3 ) | ||
423 | #ifdef __ARMEB__ | ||
424 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
425 | #endif | ||
426 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
427 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
428 | THUMB( strh r6, [r0] ) | ||
429 | b 2b | ||
430 | ENDPROC(__fixup_smp) | 420 | ENDPROC(__fixup_smp) |
431 | 421 | ||
432 | .align | 422 | .align |
@@ -440,7 +430,31 @@ smp_on_up: | |||
440 | ALT_SMP(.long 1) | 430 | ALT_SMP(.long 1) |
441 | ALT_UP(.long 0) | 431 | ALT_UP(.long 0) |
442 | .popsection | 432 | .popsection |
433 | #endif | ||
443 | 434 | ||
435 | .text | ||
436 | __do_fixup_smp_on_up: | ||
437 | cmp r4, r5 | ||
438 | movhs pc, lr | ||
439 | ldmia r4!, {r0, r6} | ||
440 | ARM( str r6, [r0, r3] ) | ||
441 | THUMB( add r0, r0, r3 ) | ||
442 | #ifdef __ARMEB__ | ||
443 | THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. | ||
444 | #endif | 444 | #endif |
445 | THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords | ||
446 | THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. | ||
447 | THUMB( strh r6, [r0] ) | ||
448 | b __do_fixup_smp_on_up | ||
449 | ENDPROC(__do_fixup_smp_on_up) | ||
450 | |||
451 | ENTRY(fixup_smp) | ||
452 | stmfd sp!, {r4 - r6, lr} | ||
453 | mov r4, r0 | ||
454 | add r5, r0, r1 | ||
455 | mov r3, #0 | ||
456 | bl __do_fixup_smp_on_up | ||
457 | ldmfd sp!, {r4 - r6, pc} | ||
458 | ENDPROC(fixup_smp) | ||
445 | 459 | ||
446 | #include "head-common.S" | 460 | #include "head-common.S" |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index c9f3f046757..d600bd35070 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void) | |||
137 | u32 didr; | 137 | u32 didr; |
138 | 138 | ||
139 | /* Do we implement the extended CPUID interface? */ | 139 | /* Do we implement the extended CPUID interface? */ |
140 | if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { | 140 | if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), |
141 | pr_warning("CPUID feature registers not supported. " | 141 | "CPUID feature registers not supported. " |
142 | "Assuming v6 debug is present.\n"); | 142 | "Assuming v6 debug is present.\n")) |
143 | return ARM_DEBUG_ARCH_V6; | 143 | return ARM_DEBUG_ARCH_V6; |
144 | } | ||
145 | 144 | ||
146 | ARM_DBG_READ(c0, 0, didr); | 145 | ARM_DBG_READ(c0, 0, didr); |
147 | return (didr >> 16) & 0xf; | 146 | return (didr >> 16) & 0xf; |
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void) | |||
152 | return debug_arch; | 151 | return debug_arch; |
153 | } | 152 | } |
154 | 153 | ||
154 | static int debug_arch_supported(void) | ||
155 | { | ||
156 | u8 arch = get_debug_arch(); | ||
157 | return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; | ||
158 | } | ||
159 | |||
155 | /* Determine number of BRP register available. */ | 160 | /* Determine number of BRP register available. */ |
156 | static int get_num_brp_resources(void) | 161 | static int get_num_brp_resources(void) |
157 | { | 162 | { |
@@ -268,6 +273,9 @@ out: | |||
268 | 273 | ||
269 | int hw_breakpoint_slots(int type) | 274 | int hw_breakpoint_slots(int type) |
270 | { | 275 | { |
276 | if (!debug_arch_supported()) | ||
277 | return 0; | ||
278 | |||
271 | /* | 279 | /* |
272 | * We can be called early, so don't rely on | 280 | * We can be called early, so don't rely on |
273 | * our static variables being initialised. | 281 | * our static variables being initialised. |
@@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused) | |||
834 | 842 | ||
835 | /* | 843 | /* |
836 | * v7 debug contains save and restore registers so that debug state | 844 | * v7 debug contains save and restore registers so that debug state |
837 | * can be maintained across low-power modes without leaving | 845 | * can be maintained across low-power modes without leaving the debug |
838 | * the debug logic powered up. It is IMPLEMENTATION DEFINED whether | 846 | * logic powered up. It is IMPLEMENTATION DEFINED whether we can access |
839 | * we can write to the debug registers out of reset, so we must | 847 | * the debug registers out of reset, so we must unlock the OS Lock |
840 | * unlock the OS Lock Access Register to avoid taking undefined | 848 | * Access Register to avoid taking undefined instruction exceptions |
841 | * instruction exceptions later on. | 849 | * later on. |
842 | */ | 850 | */ |
843 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 851 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { |
844 | /* | 852 | /* |
@@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void) | |||
882 | 890 | ||
883 | debug_arch = get_debug_arch(); | 891 | debug_arch = get_debug_arch(); |
884 | 892 | ||
885 | if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { | 893 | if (!debug_arch_supported()) { |
886 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); | 894 | pr_info("debug architecture 0x%x unsupported.\n", debug_arch); |
887 | return 0; | 895 | return 0; |
888 | } | 896 | } |
@@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void) | |||
899 | pr_info("%d breakpoint(s) reserved for watchpoint " | 907 | pr_info("%d breakpoint(s) reserved for watchpoint " |
900 | "single-step.\n", core_num_reserved_brps); | 908 | "single-step.\n", core_num_reserved_brps); |
901 | 909 | ||
910 | /* | ||
911 | * Reset the breakpoint resources. We assume that a halting | ||
912 | * debugger will leave the world in a nice state for us. | ||
913 | */ | ||
914 | on_each_cpu(reset_ctrl_regs, NULL, 1); | ||
915 | |||
902 | ARM_DBG_READ(c1, 0, dscr); | 916 | ARM_DBG_READ(c1, 0, dscr); |
903 | if (dscr & ARM_DSCR_HDBGEN) { | 917 | if (dscr & ARM_DSCR_HDBGEN) { |
918 | max_watchpoint_len = 4; | ||
904 | pr_warning("halting debug mode enabled. Assuming maximum " | 919 | pr_warning("halting debug mode enabled. Assuming maximum " |
905 | "watchpoint size of 4 bytes."); | 920 | "watchpoint size of %u bytes.", max_watchpoint_len); |
906 | } else { | 921 | } else { |
907 | /* | ||
908 | * Reset the breakpoint resources. We assume that a halting | ||
909 | * debugger will leave the world in a nice state for us. | ||
910 | */ | ||
911 | smp_call_function(reset_ctrl_regs, NULL, 1); | ||
912 | reset_ctrl_regs(NULL); | ||
913 | |||
914 | /* Work out the maximum supported watchpoint length. */ | 922 | /* Work out the maximum supported watchpoint length. */ |
915 | max_watchpoint_len = get_max_wp_len(); | 923 | max_watchpoint_len = get_max_wp_len(); |
916 | pr_info("maximum watchpoint size is %u bytes.\n", | 924 | pr_info("maximum watchpoint size is %u bytes.\n", |
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 2cfe8161b47..6d4105e6872 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/sections.h> | 24 | #include <asm/sections.h> |
25 | #include <asm/smp_plat.h> | ||
25 | #include <asm/unwind.h> | 26 | #include <asm/unwind.h> |
26 | 27 | ||
27 | #ifdef CONFIG_XIP_KERNEL | 28 | #ifdef CONFIG_XIP_KERNEL |
@@ -268,12 +269,28 @@ struct mod_unwind_map { | |||
268 | const Elf_Shdr *txt_sec; | 269 | const Elf_Shdr *txt_sec; |
269 | }; | 270 | }; |
270 | 271 | ||
272 | static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, | ||
273 | const Elf_Shdr *sechdrs, const char *name) | ||
274 | { | ||
275 | const Elf_Shdr *s, *se; | ||
276 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
277 | |||
278 | for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) | ||
279 | if (strcmp(name, secstrs + s->sh_name) == 0) | ||
280 | return s; | ||
281 | |||
282 | return NULL; | ||
283 | } | ||
284 | |||
285 | extern void fixup_smp(const void *, unsigned long); | ||
286 | |||
271 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | 287 | int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, |
272 | struct module *mod) | 288 | struct module *mod) |
273 | { | 289 | { |
290 | const Elf_Shdr * __maybe_unused s = NULL; | ||
274 | #ifdef CONFIG_ARM_UNWIND | 291 | #ifdef CONFIG_ARM_UNWIND |
275 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | 292 | const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; |
276 | const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; | 293 | const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; |
277 | struct mod_unwind_map maps[ARM_SEC_MAX]; | 294 | struct mod_unwind_map maps[ARM_SEC_MAX]; |
278 | int i; | 295 | int i; |
279 | 296 | ||
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, | |||
315 | maps[i].txt_sec->sh_addr, | 332 | maps[i].txt_sec->sh_addr, |
316 | maps[i].txt_sec->sh_size); | 333 | maps[i].txt_sec->sh_size); |
317 | #endif | 334 | #endif |
335 | s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); | ||
336 | if (s && !is_smp()) | ||
337 | fixup_smp((void *)s->sh_addr, s->sh_size); | ||
318 | return 0; | 338 | return 0; |
319 | } | 339 | } |
320 | 340 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5efa2647a2f..d150ad1ccb5 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail, | |||
700 | * Frame pointers should strictly progress back up the stack | 700 | * Frame pointers should strictly progress back up the stack |
701 | * (towards higher addresses). | 701 | * (towards higher addresses). |
702 | */ | 702 | */ |
703 | if (tail >= buftail.fp) | 703 | if (tail + 1 >= buftail.fp) |
704 | return NULL; | 704 | return NULL; |
705 | 705 | ||
706 | return buftail.fp - 1; | 706 | return buftail.fp - 1; |
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c index 6b2c800a113..28f667e52ef 100644 --- a/arch/arm/mach-pxa/colibri-evalboard.c +++ b/arch/arm/mach-pxa/colibri-evalboard.c | |||
@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void) | |||
50 | GPIO0_COLIBRI_PXA270_SD_DETECT; | 50 | GPIO0_COLIBRI_PXA270_SD_DETECT; |
51 | if (machine_is_colibri300()) /* PXA300 Colibri */ | 51 | if (machine_is_colibri300()) /* PXA300 Colibri */ |
52 | colibri_mci_platform_data.gpio_card_detect = | 52 | colibri_mci_platform_data.gpio_card_detect = |
53 | GPIO39_COLIBRI_PXA300_SD_DETECT; | 53 | GPIO13_COLIBRI_PXA300_SD_DETECT; |
54 | else /* PXA320 Colibri */ | 54 | else /* PXA320 Colibri */ |
55 | colibri_mci_platform_data.gpio_card_detect = | 55 | colibri_mci_platform_data.gpio_card_detect = |
56 | GPIO28_COLIBRI_PXA320_SD_DETECT; | 56 | GPIO28_COLIBRI_PXA320_SD_DETECT; |
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c index fddb16d07eb..66dd81cbc8a 100644 --- a/arch/arm/mach-pxa/colibri-pxa300.c +++ b/arch/arm/mach-pxa/colibri-pxa300.c | |||
@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = { | |||
41 | GPIO4_MMC1_DAT1, | 41 | GPIO4_MMC1_DAT1, |
42 | GPIO5_MMC1_DAT2, | 42 | GPIO5_MMC1_DAT2, |
43 | GPIO6_MMC1_DAT3, | 43 | GPIO6_MMC1_DAT3, |
44 | GPIO39_GPIO, /* SD detect */ | 44 | GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */ |
45 | 45 | ||
46 | /* UHC */ | 46 | /* UHC */ |
47 | GPIO0_2_USBH_PEN, | 47 | GPIO0_2_USBH_PEN, |
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index 388a96f1ef9..cb4236e98a0 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h | |||
@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {} | |||
60 | #define GPIO113_COLIBRI_PXA270_TS_IRQ 113 | 60 | #define GPIO113_COLIBRI_PXA270_TS_IRQ 113 |
61 | 61 | ||
62 | /* GPIO definitions for Colibri PXA300/310 */ | 62 | /* GPIO definitions for Colibri PXA300/310 */ |
63 | #define GPIO39_COLIBRI_PXA300_SD_DETECT 39 | 63 | #define GPIO13_COLIBRI_PXA300_SD_DETECT 13 |
64 | 64 | ||
65 | /* GPIO definitions for Colibri PXA320 */ | 65 | /* GPIO definitions for Colibri PXA320 */ |
66 | #define GPIO28_COLIBRI_PXA320_SD_DETECT 28 | 66 | #define GPIO28_COLIBRI_PXA320_SD_DETECT 28 |
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c index 405b92a2979..35572c427fa 100644 --- a/arch/arm/mach-pxa/palm27x.c +++ b/arch/arm/mach-pxa/palm27x.c | |||
@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = { | |||
323 | .pwm_id = 0, | 323 | .pwm_id = 0, |
324 | .max_brightness = 0xfe, | 324 | .max_brightness = 0xfe, |
325 | .dft_brightness = 0x7e, | 325 | .dft_brightness = 0x7e, |
326 | .pwm_period_ns = 3500, | 326 | .pwm_period_ns = 3500 * 1024, |
327 | .init = palm27x_backlight_init, | 327 | .init = palm27x_backlight_init, |
328 | .notify = palm27x_backlight_notify, | 328 | .notify = palm27x_backlight_notify, |
329 | .exit = palm27x_backlight_exit, | 329 | .exit = palm27x_backlight_exit, |
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 978e1b28954..1807c9abdde 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c | |||
@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state) | |||
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | /* skip registers saving for standby */ | 35 | /* skip registers saving for standby */ |
36 | if (state != PM_SUSPEND_STANDBY) { | 36 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) { |
37 | pxa_cpu_pm_fns->save(sleep_save); | 37 | pxa_cpu_pm_fns->save(sleep_save); |
38 | /* before sleeping, calculate and save a checksum */ | 38 | /* before sleeping, calculate and save a checksum */ |
39 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) | 39 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) |
@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state) | |||
44 | pxa_cpu_pm_fns->enter(state); | 44 | pxa_cpu_pm_fns->enter(state); |
45 | cpu_init(); | 45 | cpu_init(); |
46 | 46 | ||
47 | if (state != PM_SUSPEND_STANDBY) { | 47 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { |
48 | /* after sleeping, validate the checksum */ | 48 | /* after sleeping, validate the checksum */ |
49 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) | 49 | for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) |
50 | checksum += sleep_save[i]; | 50 | checksum += sleep_save[i]; |
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c index d43c5ef58eb..bd3e1bfdd6a 100644 --- a/arch/arm/mach-sa1100/collie.c +++ b/arch/arm/mach-sa1100/collie.c | |||
@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = { | |||
241 | struct platform_device collie_locomo_device = { | 241 | struct platform_device collie_locomo_device = { |
242 | .name = "locomo", | 242 | .name = "locomo", |
243 | .id = 0, | 243 | .id = 0, |
244 | .dev = { | ||
245 | .platform_data = &locomo_info, | ||
246 | }, | ||
244 | .num_resources = ARRAY_SIZE(locomo_resources), | 247 | .num_resources = ARRAY_SIZE(locomo_resources), |
245 | .resource = locomo_resources, | 248 | .resource = locomo_resources, |
246 | }; | 249 | }; |
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9d30c6f804b..e4509bae8fc 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -405,7 +405,7 @@ config CPU_V6 | |||
405 | config CPU_32v6K | 405 | config CPU_32v6K |
406 | bool "Support ARM V6K processor extensions" if !SMP | 406 | bool "Support ARM V6K processor extensions" if !SMP |
407 | depends on CPU_V6 || CPU_V7 | 407 | depends on CPU_V6 || CPU_V7 |
408 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 408 | default y if SMP |
409 | help | 409 | help |
410 | Say Y here if your ARMv6 processor supports the 'K' extension. | 410 | Say Y here if your ARMv6 processor supports the 'K' extension. |
411 | This enables the kernel to use some instructions not present | 411 | This enables the kernel to use some instructions not present |
@@ -416,7 +416,7 @@ config CPU_32v6K | |||
416 | # ARMv7 | 416 | # ARMv7 |
417 | config CPU_V7 | 417 | config CPU_V7 |
418 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 418 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
419 | select CPU_32v6K if !ARCH_OMAP2 | 419 | select CPU_32v6K |
420 | select CPU_32v7 | 420 | select CPU_32v7 |
421 | select CPU_ABRT_EV7 | 421 | select CPU_ABRT_EV7 |
422 | select CPU_PABRT_V7 | 422 | select CPU_PABRT_V7 |
@@ -644,7 +644,7 @@ config ARM_THUMBEE | |||
644 | 644 | ||
645 | config SWP_EMULATE | 645 | config SWP_EMULATE |
646 | bool "Emulate SWP/SWPB instructions" | 646 | bool "Emulate SWP/SWPB instructions" |
647 | depends on CPU_V7 && !CPU_V6 | 647 | depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6 |
648 | select HAVE_PROC_CPU if PROC_FS | 648 | select HAVE_PROC_CPU if PROC_FS |
649 | default y if SMP | 649 | default y if SMP |
650 | help | 650 | help |
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 8aa974491df..c074e66ad22 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c | |||
@@ -10,8 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/err.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/init.h> | 13 | #include <linux/init.h> |
16 | #include <linux/mutex.h> | 14 | #include <linux/mutex.h> |
17 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void) | |||
46 | return NULL; | 44 | return NULL; |
47 | } | 45 | } |
48 | } | 46 | } |
47 | #endif | ||
49 | 48 | ||
50 | static int report_trace(struct stackframe *frame, void *d) | 49 | static int report_trace(struct stackframe *frame, void *d) |
51 | { | 50 | { |
@@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail) | |||
85 | 84 | ||
86 | /* frame pointers should strictly progress back up the stack | 85 | /* frame pointers should strictly progress back up the stack |
87 | * (towards higher addresses) */ | 86 | * (towards higher addresses) */ |
88 | if (tail >= buftail[0].fp) | 87 | if (tail + 1 >= buftail[0].fp) |
89 | return NULL; | 88 | return NULL; |
90 | 89 | ||
91 | return buftail[0].fp-1; | 90 | return buftail[0].fp-1; |
@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
111 | 110 | ||
112 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 111 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
113 | { | 112 | { |
113 | /* provide backtrace support also in timer mode: */ | ||
114 | ops->backtrace = arm_backtrace; | 114 | ops->backtrace = arm_backtrace; |
115 | 115 | ||
116 | return oprofile_perf_init(ops); | 116 | return oprofile_perf_init(ops); |
@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void) | |||
120 | { | 120 | { |
121 | oprofile_perf_exit(); | 121 | oprofile_perf_exit(); |
122 | } | 122 | } |
123 | #else | ||
124 | int __init oprofile_arch_init(struct oprofile_operations *ops) | ||
125 | { | ||
126 | pr_info("oprofile: hardware counters not available\n"); | ||
127 | return -ENODEV; | ||
128 | } | ||
129 | void __exit oprofile_arch_exit(void) {} | ||
130 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index b77e018d36c..a9aa5ad3f4e 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c | |||
@@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = { | |||
139 | #define mfp_configured(p) ((p)->config != -1) | 139 | #define mfp_configured(p) ((p)->config != -1) |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * perform a read-back of any MFPR register to make sure the | 142 | * perform a read-back of any valid MFPR register to make sure the |
143 | * previous writings are finished | 143 | * previous writings are finished |
144 | */ | 144 | */ |
145 | #define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0) | 145 | static unsigned long mfpr_off_readback; |
146 | #define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback) | ||
146 | 147 | ||
147 | static inline void __mfp_config_run(struct mfp_pin *p) | 148 | static inline void __mfp_config_run(struct mfp_pin *p) |
148 | { | 149 | { |
@@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map) | |||
248 | 249 | ||
249 | spin_lock_irqsave(&mfp_spin_lock, flags); | 250 | spin_lock_irqsave(&mfp_spin_lock, flags); |
250 | 251 | ||
252 | /* mfp offset for readback */ | ||
253 | mfpr_off_readback = map[0].offset; | ||
254 | |||
251 | for (p = map; p->start != MFP_PIN_INVALID; p++) { | 255 | for (p = map; p->start != MFP_PIN_INVALID; p++) { |
252 | offset = p->offset; | 256 | offset = p->offset; |
253 | i = p->start; | 257 | i = p->start; |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index bf3de04170a..2c79b641627 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | |||
148 | */ | 148 | */ |
149 | extern unsigned long thread_saved_pc(struct task_struct *t); | 149 | extern unsigned long thread_saved_pc(struct task_struct *t); |
150 | 150 | ||
151 | /* | ||
152 | * Print register of task into buffer. Used in fs/proc/array.c. | ||
153 | */ | ||
154 | extern void task_show_regs(struct seq_file *m, struct task_struct *task); | ||
155 | |||
156 | extern void show_code(struct pt_regs *regs); | 151 | extern void show_code(struct pt_regs *regs); |
157 | 152 | ||
158 | unsigned long get_wchan(struct task_struct *p); | 153 | unsigned long get_wchan(struct task_struct *p); |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5eb78dd584c..b5a4a739b47 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs) | |||
237 | show_last_breaking_event(regs); | 237 | show_last_breaking_event(regs); |
238 | } | 238 | } |
239 | 239 | ||
240 | /* This is called from fs/proc/array.c */ | ||
241 | void task_show_regs(struct seq_file *m, struct task_struct *task) | ||
242 | { | ||
243 | struct pt_regs *regs; | ||
244 | |||
245 | regs = task_pt_regs(task); | ||
246 | seq_printf(m, "task: %p, ksp: %p\n", | ||
247 | task, (void *)task->thread.ksp); | ||
248 | seq_printf(m, "User PSW : %p %p\n", | ||
249 | (void *) regs->psw.mask, (void *)regs->psw.addr); | ||
250 | |||
251 | seq_printf(m, "User GPRS: " FOURLONG, | ||
252 | regs->gprs[0], regs->gprs[1], | ||
253 | regs->gprs[2], regs->gprs[3]); | ||
254 | seq_printf(m, " " FOURLONG, | ||
255 | regs->gprs[4], regs->gprs[5], | ||
256 | regs->gprs[6], regs->gprs[7]); | ||
257 | seq_printf(m, " " FOURLONG, | ||
258 | regs->gprs[8], regs->gprs[9], | ||
259 | regs->gprs[10], regs->gprs[11]); | ||
260 | seq_printf(m, " " FOURLONG, | ||
261 | regs->gprs[12], regs->gprs[13], | ||
262 | regs->gprs[14], regs->gprs[15]); | ||
263 | seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", | ||
264 | task->thread.acrs[0], task->thread.acrs[1], | ||
265 | task->thread.acrs[2], task->thread.acrs[3]); | ||
266 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
267 | task->thread.acrs[4], task->thread.acrs[5], | ||
268 | task->thread.acrs[6], task->thread.acrs[7]); | ||
269 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
270 | task->thread.acrs[8], task->thread.acrs[9], | ||
271 | task->thread.acrs[10], task->thread.acrs[11]); | ||
272 | seq_printf(m, " %08x %08x %08x %08x\n", | ||
273 | task->thread.acrs[12], task->thread.acrs[13], | ||
274 | task->thread.acrs[14], task->thread.acrs[15]); | ||
275 | } | ||
276 | |||
277 | static DEFINE_SPINLOCK(die_lock); | 240 | static DEFINE_SPINLOCK(die_lock); |
278 | 241 | ||
279 | void die(const char * str, struct pt_regs * regs, long err) | 242 | void die(const char * str, struct pt_regs * regs, long err) |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5e3969c36d7..3c896946f4c 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void); | |||
233 | extern void init_bsp_APIC(void); | 233 | extern void init_bsp_APIC(void); |
234 | extern void setup_local_APIC(void); | 234 | extern void setup_local_APIC(void); |
235 | extern void end_local_APIC_setup(void); | 235 | extern void end_local_APIC_setup(void); |
236 | extern void bsp_end_local_APIC_setup(void); | ||
236 | extern void init_apic_mappings(void); | 237 | extern void init_apic_mappings(void); |
237 | void register_lapic_address(unsigned long address); | 238 | void register_lapic_address(unsigned long address); |
238 | extern void setup_boot_APIC_clock(void); | 239 | extern void setup_boot_APIC_clock(void); |
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 6e6e7558e70..4564c8e28a3 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int); | |||
32 | 32 | ||
33 | DECLARE_PER_CPU(int, cpu_state); | 33 | DECLARE_PER_CPU(int, cpu_state); |
34 | 34 | ||
35 | int __cpuinit mwait_usable(const struct cpuinfo_x86 *); | 35 | int mwait_usable(const struct cpuinfo_x86 *); |
36 | 36 | ||
37 | #endif /* _ASM_X86_CPU_H */ | 37 | #endif /* _ASM_X86_CPU_H */ |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 123608531c8..7038b95d363 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | |||
671 | 671 | ||
672 | atomic_set(&stop_machine_first, 1); | 672 | atomic_set(&stop_machine_first, 1); |
673 | wrote_text = 0; | 673 | wrote_text = 0; |
674 | stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); | 674 | __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); |
675 | } | 675 | } |
676 | 676 | ||
677 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) | 677 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 06c196d7e59..76b96d74978 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1381,12 +1381,17 @@ void __cpuinit end_local_APIC_setup(void) | |||
1381 | #endif | 1381 | #endif |
1382 | 1382 | ||
1383 | apic_pm_activate(); | 1383 | apic_pm_activate(); |
1384 | } | ||
1385 | |||
1386 | void __init bsp_end_local_APIC_setup(void) | ||
1387 | { | ||
1388 | end_local_APIC_setup(); | ||
1384 | 1389 | ||
1385 | /* | 1390 | /* |
1386 | * Now that local APIC setup is completed for BP, configure the fault | 1391 | * Now that local APIC setup is completed for BP, configure the fault |
1387 | * handling for interrupt remapping. | 1392 | * handling for interrupt remapping. |
1388 | */ | 1393 | */ |
1389 | if (!smp_processor_id() && intr_remapping_enabled) | 1394 | if (intr_remapping_enabled) |
1390 | enable_drhd_fault_handling(); | 1395 | enable_drhd_fault_handling(); |
1391 | 1396 | ||
1392 | } | 1397 | } |
@@ -1756,7 +1761,7 @@ int __init APIC_init_uniprocessor(void) | |||
1756 | enable_IO_APIC(); | 1761 | enable_IO_APIC(); |
1757 | #endif | 1762 | #endif |
1758 | 1763 | ||
1759 | end_local_APIC_setup(); | 1764 | bsp_end_local_APIC_setup(); |
1760 | 1765 | ||
1761 | #ifdef CONFIG_X86_IO_APIC | 1766 | #ifdef CONFIG_X86_IO_APIC |
1762 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) | 1767 | if (smp_found_config && !skip_ioapic_setup && nr_ioapics) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 697dc34b7b8..ca9e2a3545a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi) | |||
4002 | { | 4002 | { |
4003 | int i = 0; | 4003 | int i = 0; |
4004 | 4004 | ||
4005 | if (nr_ioapics == 0) | ||
4006 | return -1; | ||
4007 | |||
4005 | /* Find the IOAPIC that manages this GSI. */ | 4008 | /* Find the IOAPIC that manages this GSI. */ |
4006 | for (i = 0; i < nr_ioapics; i++) { | 4009 | for (i = 0; i < nr_ioapics; i++) { |
4007 | if ((gsi >= mp_gsi_routing[i].gsi_base) | 4010 | if ((gsi >= mp_gsi_routing[i].gsi_base) |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 52945da52a9..387b6a0c9e8 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -367,7 +367,8 @@ void fixup_irqs(void) | |||
367 | if (irr & (1 << (vector % 32))) { | 367 | if (irr & (1 << (vector % 32))) { |
368 | irq = __this_cpu_read(vector_irq[vector]); | 368 | irq = __this_cpu_read(vector_irq[vector]); |
369 | 369 | ||
370 | data = irq_get_irq_data(irq); | 370 | desc = irq_to_desc(irq); |
371 | data = &desc->irq_data; | ||
371 | raw_spin_lock(&desc->lock); | 372 | raw_spin_lock(&desc->lock); |
372 | if (data->chip->irq_retrigger) | 373 | if (data->chip->irq_retrigger) |
373 | data->chip->irq_retrigger(data); | 374 | data->chip->irq_retrigger(data); |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e764fc05d70..ff455419898 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -92,21 +92,31 @@ void show_regs(struct pt_regs *regs) | |||
92 | 92 | ||
93 | void show_regs_common(void) | 93 | void show_regs_common(void) |
94 | { | 94 | { |
95 | const char *board, *product; | 95 | const char *vendor, *product, *board; |
96 | 96 | ||
97 | board = dmi_get_system_info(DMI_BOARD_NAME); | 97 | vendor = dmi_get_system_info(DMI_SYS_VENDOR); |
98 | if (!board) | 98 | if (!vendor) |
99 | board = ""; | 99 | vendor = ""; |
100 | product = dmi_get_system_info(DMI_PRODUCT_NAME); | 100 | product = dmi_get_system_info(DMI_PRODUCT_NAME); |
101 | if (!product) | 101 | if (!product) |
102 | product = ""; | 102 | product = ""; |
103 | 103 | ||
104 | /* Board Name is optional */ | ||
105 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
106 | |||
104 | printk(KERN_CONT "\n"); | 107 | printk(KERN_CONT "\n"); |
105 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", | 108 | printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", |
106 | current->pid, current->comm, print_tainted(), | 109 | current->pid, current->comm, print_tainted(), |
107 | init_utsname()->release, | 110 | init_utsname()->release, |
108 | (int)strcspn(init_utsname()->version, " "), | 111 | (int)strcspn(init_utsname()->version, " "), |
109 | init_utsname()->version, board, product); | 112 | init_utsname()->version); |
113 | printk(KERN_CONT " "); | ||
114 | printk(KERN_CONT "%s %s", vendor, product); | ||
115 | if (board) { | ||
116 | printk(KERN_CONT "/"); | ||
117 | printk(KERN_CONT "%s", board); | ||
118 | } | ||
119 | printk(KERN_CONT "\n"); | ||
110 | } | 120 | } |
111 | 121 | ||
112 | void flush_thread(void) | 122 | void flush_thread(void) |
@@ -506,7 +516,7 @@ static void poll_idle(void) | |||
506 | #define MWAIT_ECX_EXTENDED_INFO 0x01 | 516 | #define MWAIT_ECX_EXTENDED_INFO 0x01 |
507 | #define MWAIT_EDX_C1 0xf0 | 517 | #define MWAIT_EDX_C1 0xf0 |
508 | 518 | ||
509 | int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | 519 | int mwait_usable(const struct cpuinfo_x86 *c) |
510 | { | 520 | { |
511 | u32 eax, ebx, ecx, edx; | 521 | u32 eax, ebx, ecx, edx; |
512 | 522 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 03273b6c272..08776a95348 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1060,7 +1060,7 @@ static int __init smp_sanity_check(unsigned max_cpus) | |||
1060 | 1060 | ||
1061 | connect_bsp_APIC(); | 1061 | connect_bsp_APIC(); |
1062 | setup_local_APIC(); | 1062 | setup_local_APIC(); |
1063 | end_local_APIC_setup(); | 1063 | bsp_end_local_APIC_setup(); |
1064 | return -1; | 1064 | return -1; |
1065 | } | 1065 | } |
1066 | 1066 | ||
@@ -1137,7 +1137,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) | |||
1137 | if (!skip_ioapic_setup && nr_ioapics) | 1137 | if (!skip_ioapic_setup && nr_ioapics) |
1138 | enable_IO_APIC(); | 1138 | enable_IO_APIC(); |
1139 | 1139 | ||
1140 | end_local_APIC_setup(); | 1140 | bsp_end_local_APIC_setup(); |
1141 | 1141 | ||
1142 | map_cpu_to_logical_apicid(); | 1142 | map_cpu_to_logical_apicid(); |
1143 | 1143 | ||
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index e9562a7cb2f..3b20a3401b6 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -212,37 +212,40 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
212 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 212 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Validate wake_device is of type Device */ | ||
216 | |||
217 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); | ||
218 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
219 | return_ACPI_STATUS(AE_BAD_PARAMETER); | ||
220 | } | ||
221 | |||
222 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 215 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
223 | 216 | ||
224 | /* Ensure that we have a valid GPE number */ | 217 | /* Ensure that we have a valid GPE number */ |
225 | 218 | ||
226 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); | 219 | gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); |
227 | if (gpe_event_info) { | 220 | if (!gpe_event_info) { |
228 | /* | 221 | goto unlock_and_exit; |
229 | * If there is no method or handler for this GPE, then the | 222 | } |
230 | * wake_device will be notified whenever this GPE fires (aka | 223 | |
231 | * "implicit notify") Note: The GPE is assumed to be | 224 | /* |
232 | * level-triggered (for windows compatibility). | 225 | * If there is no method or handler for this GPE, then the |
233 | */ | 226 | * wake_device will be notified whenever this GPE fires (aka |
234 | if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 227 | * "implicit notify") Note: The GPE is assumed to be |
235 | ACPI_GPE_DISPATCH_NONE) { | 228 | * level-triggered (for windows compatibility). |
236 | gpe_event_info->flags = | 229 | */ |
237 | (ACPI_GPE_DISPATCH_NOTIFY | | 230 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == |
238 | ACPI_GPE_LEVEL_TRIGGERED); | 231 | ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { |
239 | gpe_event_info->dispatch.device_node = device_node; | ||
240 | } | ||
241 | 232 | ||
242 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | 233 | /* Validate wake_device is of type Device */ |
243 | status = AE_OK; | 234 | |
235 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, | ||
236 | wake_device); | ||
237 | if (device_node->type != ACPI_TYPE_DEVICE) { | ||
238 | goto unlock_and_exit; | ||
239 | } | ||
240 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | | ||
241 | ACPI_GPE_LEVEL_TRIGGERED); | ||
242 | gpe_event_info->dispatch.device_node = device_node; | ||
244 | } | 243 | } |
245 | 244 | ||
245 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | ||
246 | status = AE_OK; | ||
247 | |||
248 | unlock_and_exit: | ||
246 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); | 249 | acpi_os_release_lock(acpi_gbl_gpe_lock, flags); |
247 | return_ACPI_STATUS(status); | 250 | return_ACPI_STATUS(status); |
248 | } | 251 | } |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index b0931818cf9..c90c76aa7f8 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port); | |||
636 | acpi_status | 636 | acpi_status |
637 | acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | 637 | acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) |
638 | { | 638 | { |
639 | u32 dummy; | ||
640 | void __iomem *virt_addr; | 639 | void __iomem *virt_addr; |
641 | int size = width / 8, unmap = 0; | 640 | unsigned int size = width / 8; |
641 | bool unmap = false; | ||
642 | u32 dummy; | ||
642 | 643 | ||
643 | rcu_read_lock(); | 644 | rcu_read_lock(); |
644 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 645 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
645 | rcu_read_unlock(); | ||
646 | if (!virt_addr) { | 646 | if (!virt_addr) { |
647 | rcu_read_unlock(); | ||
647 | virt_addr = acpi_os_ioremap(phys_addr, size); | 648 | virt_addr = acpi_os_ioremap(phys_addr, size); |
648 | unmap = 1; | 649 | if (!virt_addr) |
650 | return AE_BAD_ADDRESS; | ||
651 | unmap = true; | ||
649 | } | 652 | } |
653 | |||
650 | if (!value) | 654 | if (!value) |
651 | value = &dummy; | 655 | value = &dummy; |
652 | 656 | ||
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
666 | 670 | ||
667 | if (unmap) | 671 | if (unmap) |
668 | iounmap(virt_addr); | 672 | iounmap(virt_addr); |
673 | else | ||
674 | rcu_read_unlock(); | ||
669 | 675 | ||
670 | return AE_OK; | 676 | return AE_OK; |
671 | } | 677 | } |
@@ -674,14 +680,17 @@ acpi_status | |||
674 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | 680 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) |
675 | { | 681 | { |
676 | void __iomem *virt_addr; | 682 | void __iomem *virt_addr; |
677 | int size = width / 8, unmap = 0; | 683 | unsigned int size = width / 8; |
684 | bool unmap = false; | ||
678 | 685 | ||
679 | rcu_read_lock(); | 686 | rcu_read_lock(); |
680 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); | 687 | virt_addr = acpi_map_vaddr_lookup(phys_addr, size); |
681 | rcu_read_unlock(); | ||
682 | if (!virt_addr) { | 688 | if (!virt_addr) { |
689 | rcu_read_unlock(); | ||
683 | virt_addr = acpi_os_ioremap(phys_addr, size); | 690 | virt_addr = acpi_os_ioremap(phys_addr, size); |
684 | unmap = 1; | 691 | if (!virt_addr) |
692 | return AE_BAD_ADDRESS; | ||
693 | unmap = true; | ||
685 | } | 694 | } |
686 | 695 | ||
687 | switch (width) { | 696 | switch (width) { |
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | |||
700 | 709 | ||
701 | if (unmap) | 710 | if (unmap) |
702 | iounmap(virt_addr); | 711 | iounmap(virt_addr); |
712 | else | ||
713 | rcu_read_unlock(); | ||
703 | 714 | ||
704 | return AE_OK; | 715 | return AE_OK; |
705 | } | 716 | } |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 42d3d72dae8..5af3479714f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device) | |||
82 | if (!device) | 82 | if (!device) |
83 | return 0; | 83 | return 0; |
84 | 84 | ||
85 | /* Is this device able to support video switching ? */ | ||
86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || | ||
87 | ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) | ||
88 | video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; | ||
89 | |||
85 | /* Is this device able to retrieve a video ROM ? */ | 90 | /* Is this device able to retrieve a video ROM ? */ |
86 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) | 91 | if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) |
87 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; | 92 | video_caps |= ACPI_VIDEO_ROM_AVAILABLE; |
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index ed650145250..7bfbe40bc43 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c | |||
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void) | |||
86 | struct acpi_device *dev = container_of(node, | 86 | struct acpi_device *dev = container_of(node, |
87 | struct acpi_device, | 87 | struct acpi_device, |
88 | wakeup_list); | 88 | wakeup_list); |
89 | if (device_can_wakeup(&dev->dev)) | 89 | if (device_can_wakeup(&dev->dev)) { |
90 | /* Button GPEs are supposed to be always enabled. */ | ||
91 | acpi_enable_gpe(dev->wakeup.gpe_device, | ||
92 | dev->wakeup.gpe_number); | ||
90 | device_set_wakeup_enable(&dev->dev, true); | 93 | device_set_wakeup_enable(&dev->dev, true); |
94 | } | ||
91 | } | 95 | } |
92 | mutex_unlock(&acpi_device_lock); | 96 | mutex_unlock(&acpi_device_lock); |
93 | return 0; | 97 | return 0; |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 297f48b0cba..07bca4970e5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -79,6 +79,7 @@ | |||
79 | #include <linux/module.h> | 79 | #include <linux/module.h> |
80 | #include <linux/interrupt.h> | 80 | #include <linux/interrupt.h> |
81 | #include <linux/slab.h> | 81 | #include <linux/slab.h> |
82 | #include <linux/delay.h> | ||
82 | #include <linux/dmapool.h> | 83 | #include <linux/dmapool.h> |
83 | #include <linux/dmaengine.h> | 84 | #include <linux/dmaengine.h> |
84 | #include <linux/amba/bus.h> | 85 | #include <linux/amba/bus.h> |
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan, | |||
235 | } | 236 | } |
236 | 237 | ||
237 | /* | 238 | /* |
238 | * Overall DMAC remains enabled always. | 239 | * Pause the channel by setting the HALT bit. |
239 | * | 240 | * |
240 | * Disabling individual channels could lose data. | 241 | * For M->P transfers, pause the DMAC first and then stop the peripheral - |
242 | * the FIFO can only drain if the peripheral is still requesting data. | ||
243 | * (note: this can still timeout if the DMAC FIFO never drains of data.) | ||
241 | * | 244 | * |
242 | * Disable the peripheral DMA after disabling the DMAC in order to allow | 245 | * For P->M transfers, disable the peripheral first to stop it filling |
243 | * the DMAC FIFO to drain, and hence allow the channel to show inactive | 246 | * the DMAC FIFO, and then pause the DMAC. |
244 | */ | 247 | */ |
245 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | 248 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) |
246 | { | 249 | { |
247 | u32 val; | 250 | u32 val; |
251 | int timeout; | ||
248 | 252 | ||
249 | /* Set the HALT bit and wait for the FIFO to drain */ | 253 | /* Set the HALT bit and wait for the FIFO to drain */ |
250 | val = readl(ch->base + PL080_CH_CONFIG); | 254 | val = readl(ch->base + PL080_CH_CONFIG); |
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | |||
252 | writel(val, ch->base + PL080_CH_CONFIG); | 256 | writel(val, ch->base + PL080_CH_CONFIG); |
253 | 257 | ||
254 | /* Wait for channel inactive */ | 258 | /* Wait for channel inactive */ |
255 | while (pl08x_phy_channel_busy(ch)) | 259 | for (timeout = 1000; timeout; timeout--) { |
256 | cpu_relax(); | 260 | if (!pl08x_phy_channel_busy(ch)) |
261 | break; | ||
262 | udelay(1); | ||
263 | } | ||
264 | if (pl08x_phy_channel_busy(ch)) | ||
265 | pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id); | ||
257 | } | 266 | } |
258 | 267 | ||
259 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | 268 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) |
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | |||
267 | } | 276 | } |
268 | 277 | ||
269 | 278 | ||
270 | /* Stops the channel */ | 279 | /* |
271 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | 280 | * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and |
281 | * clears any pending interrupt status. This should not be used for | ||
282 | * an on-going transfer, but as a method of shutting down a channel | ||
283 | * (eg, when it's no longer used) or terminating a transfer. | ||
284 | */ | ||
285 | static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x, | ||
286 | struct pl08x_phy_chan *ch) | ||
272 | { | 287 | { |
273 | u32 val; | 288 | u32 val = readl(ch->base + PL080_CH_CONFIG); |
274 | 289 | ||
275 | pl08x_pause_phy_chan(ch); | 290 | val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK | |
291 | PL080_CONFIG_TC_IRQ_MASK); | ||
276 | 292 | ||
277 | /* Disable channel */ | ||
278 | val = readl(ch->base + PL080_CH_CONFIG); | ||
279 | val &= ~PL080_CONFIG_ENABLE; | ||
280 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | ||
281 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | ||
282 | writel(val, ch->base + PL080_CH_CONFIG); | 293 | writel(val, ch->base + PL080_CH_CONFIG); |
294 | |||
295 | writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR); | ||
296 | writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR); | ||
283 | } | 297 | } |
284 | 298 | ||
285 | static inline u32 get_bytes_in_cctl(u32 cctl) | 299 | static inline u32 get_bytes_in_cctl(u32 cctl) |
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | |||
404 | { | 418 | { |
405 | unsigned long flags; | 419 | unsigned long flags; |
406 | 420 | ||
421 | spin_lock_irqsave(&ch->lock, flags); | ||
422 | |||
407 | /* Stop the channel and clear its interrupts */ | 423 | /* Stop the channel and clear its interrupts */ |
408 | pl08x_stop_phy_chan(ch); | 424 | pl08x_terminate_phy_chan(pl08x, ch); |
409 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | ||
410 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | ||
411 | 425 | ||
412 | /* Mark it as free */ | 426 | /* Mark it as free */ |
413 | spin_lock_irqsave(&ch->lock, flags); | ||
414 | ch->serving = NULL; | 427 | ch->serving = NULL; |
415 | spin_unlock_irqrestore(&ch->lock, flags); | 428 | spin_unlock_irqrestore(&ch->lock, flags); |
416 | } | 429 | } |
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1449 | plchan->state = PL08X_CHAN_IDLE; | 1462 | plchan->state = PL08X_CHAN_IDLE; |
1450 | 1463 | ||
1451 | if (plchan->phychan) { | 1464 | if (plchan->phychan) { |
1452 | pl08x_stop_phy_chan(plchan->phychan); | 1465 | pl08x_terminate_phy_chan(pl08x, plchan->phychan); |
1453 | 1466 | ||
1454 | /* | 1467 | /* |
1455 | * Mark physical channel as free and free any slave | 1468 | * Mark physical channel as free and free any slave |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index e53d438142b..e18eaabe92b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -49,6 +49,7 @@ struct imxdma_channel { | |||
49 | 49 | ||
50 | struct imxdma_engine { | 50 | struct imxdma_engine { |
51 | struct device *dev; | 51 | struct device *dev; |
52 | struct device_dma_parameters dma_parms; | ||
52 | struct dma_device dma_device; | 53 | struct dma_device dma_device; |
53 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | 54 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; |
54 | }; | 55 | }; |
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
242 | else | 243 | else |
243 | dmamode = DMA_MODE_WRITE; | 244 | dmamode = DMA_MODE_WRITE; |
244 | 245 | ||
246 | switch (imxdmac->word_size) { | ||
247 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
248 | if (sgl->length & 3 || sgl->dma_address & 3) | ||
249 | return NULL; | ||
250 | break; | ||
251 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
252 | if (sgl->length & 1 || sgl->dma_address & 1) | ||
253 | return NULL; | ||
254 | break; | ||
255 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
256 | break; | ||
257 | default: | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
245 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | 261 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, |
246 | dma_length, imxdmac->per_address, dmamode); | 262 | dma_length, imxdmac->per_address, dmamode); |
247 | if (ret) | 263 | if (ret) |
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
329 | 345 | ||
330 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | 346 | INIT_LIST_HEAD(&imxdma->dma_device.channels); |
331 | 347 | ||
348 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
349 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
350 | |||
332 | /* Initialize channel parameters */ | 351 | /* Initialize channel parameters */ |
333 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 352 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
334 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | 353 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
346 | imxdmac->imxdma = imxdma; | 365 | imxdmac->imxdma = imxdma; |
347 | spin_lock_init(&imxdmac->lock); | 366 | spin_lock_init(&imxdmac->lock); |
348 | 367 | ||
349 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
350 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
351 | |||
352 | imxdmac->chan.device = &imxdma->dma_device; | 368 | imxdmac->chan.device = &imxdma->dma_device; |
353 | imxdmac->chan.chan_id = i; | ||
354 | imxdmac->channel = i; | 369 | imxdmac->channel = i; |
355 | 370 | ||
356 | /* Add the channel to the DMAC list */ | 371 | /* Add the channel to the DMAC list */ |
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev) | |||
370 | 385 | ||
371 | platform_set_drvdata(pdev, imxdma); | 386 | platform_set_drvdata(pdev, imxdma); |
372 | 387 | ||
388 | imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; | ||
389 | dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); | ||
390 | |||
373 | ret = dma_async_device_register(&imxdma->dma_device); | 391 | ret = dma_async_device_register(&imxdma->dma_device); |
374 | if (ret) { | 392 | if (ret) { |
375 | dev_err(&pdev->dev, "unable to register\n"); | 393 | dev_err(&pdev->dev, "unable to register\n"); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d5a5d4d9c19..b6d1455fa93 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -230,7 +230,7 @@ struct sdma_engine; | |||
230 | * struct sdma_channel - housekeeping for a SDMA channel | 230 | * struct sdma_channel - housekeeping for a SDMA channel |
231 | * | 231 | * |
232 | * @sdma pointer to the SDMA engine for this channel | 232 | * @sdma pointer to the SDMA engine for this channel |
233 | * @channel the channel number, matches dmaengine chan_id | 233 | * @channel the channel number, matches dmaengine chan_id + 1 |
234 | * @direction transfer type. Needed for setting SDMA script | 234 | * @direction transfer type. Needed for setting SDMA script |
235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | 235 | * @peripheral_type Peripheral type. Needed for setting SDMA script |
236 | * @event_id0 aka dma request line | 236 | * @event_id0 aka dma request line |
@@ -301,6 +301,7 @@ struct sdma_firmware_header { | |||
301 | 301 | ||
302 | struct sdma_engine { | 302 | struct sdma_engine { |
303 | struct device *dev; | 303 | struct device *dev; |
304 | struct device_dma_parameters dma_parms; | ||
304 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | 305 | struct sdma_channel channel[MAX_DMA_CHANNELS]; |
305 | struct sdma_channel_control *channel_control; | 306 | struct sdma_channel_control *channel_control; |
306 | void __iomem *regs; | 307 | void __iomem *regs; |
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | |||
449 | if (bd->mode.status & BD_RROR) | 450 | if (bd->mode.status & BD_RROR) |
450 | sdmac->status = DMA_ERROR; | 451 | sdmac->status = DMA_ERROR; |
451 | else | 452 | else |
452 | sdmac->status = DMA_SUCCESS; | 453 | sdmac->status = DMA_IN_PROGRESS; |
453 | 454 | ||
454 | bd->mode.status |= BD_DONE; | 455 | bd->mode.status |= BD_DONE; |
455 | sdmac->buf_tail++; | 456 | sdmac->buf_tail++; |
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | |||
770 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | 771 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); |
771 | } | 772 | } |
772 | 773 | ||
773 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) | 774 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac) |
774 | { | 775 | { |
775 | dma_cookie_t cookie = sdma->chan.cookie; | 776 | dma_cookie_t cookie = sdmac->chan.cookie; |
776 | 777 | ||
777 | if (++cookie < 0) | 778 | if (++cookie < 0) |
778 | cookie = 1; | 779 | cookie = 1; |
779 | 780 | ||
780 | sdma->chan.cookie = cookie; | 781 | sdmac->chan.cookie = cookie; |
781 | sdma->desc.cookie = cookie; | 782 | sdmac->desc.cookie = cookie; |
782 | 783 | ||
783 | return cookie; | 784 | return cookie; |
784 | } | 785 | } |
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
798 | 799 | ||
799 | cookie = sdma_assign_cookie(sdmac); | 800 | cookie = sdma_assign_cookie(sdmac); |
800 | 801 | ||
801 | sdma_enable_channel(sdma, tx->chan->chan_id); | 802 | sdma_enable_channel(sdma, sdmac->channel); |
802 | 803 | ||
803 | spin_unlock_irq(&sdmac->lock); | 804 | spin_unlock_irq(&sdmac->lock); |
804 | 805 | ||
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan) | |||
811 | struct imx_dma_data *data = chan->private; | 812 | struct imx_dma_data *data = chan->private; |
812 | int prio, ret; | 813 | int prio, ret; |
813 | 814 | ||
814 | /* No need to execute this for internal channel 0 */ | ||
815 | if (chan->chan_id == 0) | ||
816 | return 0; | ||
817 | |||
818 | if (!data) | 815 | if (!data) |
819 | return -EINVAL; | 816 | return -EINVAL; |
820 | 817 | ||
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
879 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 876 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
880 | struct sdma_engine *sdma = sdmac->sdma; | 877 | struct sdma_engine *sdma = sdmac->sdma; |
881 | int ret, i, count; | 878 | int ret, i, count; |
882 | int channel = chan->chan_id; | 879 | int channel = sdmac->channel; |
883 | struct scatterlist *sg; | 880 | struct scatterlist *sg; |
884 | 881 | ||
885 | if (sdmac->status == DMA_IN_PROGRESS) | 882 | if (sdmac->status == DMA_IN_PROGRESS) |
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
924 | ret = -EINVAL; | 921 | ret = -EINVAL; |
925 | goto err_out; | 922 | goto err_out; |
926 | } | 923 | } |
927 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | 924 | |
925 | switch (sdmac->word_size) { | ||
926 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
928 | bd->mode.command = 0; | 927 | bd->mode.command = 0; |
929 | else | 928 | if (count & 3 || sg->dma_address & 3) |
930 | bd->mode.command = sdmac->word_size; | 929 | return NULL; |
930 | break; | ||
931 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
932 | bd->mode.command = 2; | ||
933 | if (count & 1 || sg->dma_address & 1) | ||
934 | return NULL; | ||
935 | break; | ||
936 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
937 | bd->mode.command = 1; | ||
938 | break; | ||
939 | default: | ||
940 | return NULL; | ||
941 | } | ||
931 | 942 | ||
932 | param = BD_DONE | BD_EXTD | BD_CONT; | 943 | param = BD_DONE | BD_EXTD | BD_CONT; |
933 | 944 | ||
934 | if (sdmac->flags & IMX_DMA_SG_LOOP) { | 945 | if (i + 1 == sg_len) { |
935 | param |= BD_INTR; | 946 | param |= BD_INTR; |
936 | if (i + 1 == sg_len) | 947 | param |= BD_LAST; |
937 | param |= BD_WRAP; | 948 | param &= ~BD_CONT; |
938 | } | 949 | } |
939 | 950 | ||
940 | if (i + 1 == sg_len) | ||
941 | param |= BD_INTR; | ||
942 | |||
943 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | 951 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", |
944 | i, count, sg->dma_address, | 952 | i, count, sg->dma_address, |
945 | param & BD_WRAP ? "wrap" : "", | 953 | param & BD_WRAP ? "wrap" : "", |
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
953 | 961 | ||
954 | return &sdmac->desc; | 962 | return &sdmac->desc; |
955 | err_out: | 963 | err_out: |
964 | sdmac->status = DMA_ERROR; | ||
956 | return NULL; | 965 | return NULL; |
957 | } | 966 | } |
958 | 967 | ||
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | |||
963 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 972 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
964 | struct sdma_engine *sdma = sdmac->sdma; | 973 | struct sdma_engine *sdma = sdmac->sdma; |
965 | int num_periods = buf_len / period_len; | 974 | int num_periods = buf_len / period_len; |
966 | int channel = chan->chan_id; | 975 | int channel = sdmac->channel; |
967 | int ret, i = 0, buf = 0; | 976 | int ret, i = 0, buf = 0; |
968 | 977 | ||
969 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | 978 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); |
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1066 | { | 1075 | { |
1067 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1076 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1068 | dma_cookie_t last_used; | 1077 | dma_cookie_t last_used; |
1069 | enum dma_status ret; | ||
1070 | 1078 | ||
1071 | last_used = chan->cookie; | 1079 | last_used = chan->cookie; |
1072 | 1080 | ||
1073 | ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); | ||
1074 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | 1081 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); |
1075 | 1082 | ||
1076 | return ret; | 1083 | return sdmac->status; |
1077 | } | 1084 | } |
1078 | 1085 | ||
1079 | static void sdma_issue_pending(struct dma_chan *chan) | 1086 | static void sdma_issue_pending(struct dma_chan *chan) |
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma, | |||
1135 | /* download the RAM image for SDMA */ | 1142 | /* download the RAM image for SDMA */ |
1136 | sdma_load_script(sdma, ram_code, | 1143 | sdma_load_script(sdma, ram_code, |
1137 | header->ram_code_size, | 1144 | header->ram_code_size, |
1138 | sdma->script_addrs->ram_code_start_addr); | 1145 | addr->ram_code_start_addr); |
1139 | clk_disable(sdma->clk); | 1146 | clk_disable(sdma->clk); |
1140 | 1147 | ||
1141 | sdma_add_scripts(sdma, addr); | 1148 | sdma_add_scripts(sdma, addr); |
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1237 | struct resource *iores; | 1244 | struct resource *iores; |
1238 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | 1245 | struct sdma_platform_data *pdata = pdev->dev.platform_data; |
1239 | int i; | 1246 | int i; |
1240 | dma_cap_mask_t mask; | ||
1241 | struct sdma_engine *sdma; | 1247 | struct sdma_engine *sdma; |
1242 | 1248 | ||
1243 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | 1249 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); |
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1280 | 1286 | ||
1281 | sdma->version = pdata->sdma_version; | 1287 | sdma->version = pdata->sdma_version; |
1282 | 1288 | ||
1289 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1290 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1291 | |||
1283 | INIT_LIST_HEAD(&sdma->dma_device.channels); | 1292 | INIT_LIST_HEAD(&sdma->dma_device.channels); |
1284 | /* Initialize channel parameters */ | 1293 | /* Initialize channel parameters */ |
1285 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | 1294 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1288 | sdmac->sdma = sdma; | 1297 | sdmac->sdma = sdma; |
1289 | spin_lock_init(&sdmac->lock); | 1298 | spin_lock_init(&sdmac->lock); |
1290 | 1299 | ||
1291 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
1292 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
1293 | |||
1294 | sdmac->chan.device = &sdma->dma_device; | 1300 | sdmac->chan.device = &sdma->dma_device; |
1295 | sdmac->chan.chan_id = i; | ||
1296 | sdmac->channel = i; | 1301 | sdmac->channel = i; |
1297 | 1302 | ||
1298 | /* Add the channel to the DMAC list */ | 1303 | /* |
1299 | list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); | 1304 | * Add the channel to the DMAC list. Do not add channel 0 though |
1305 | * because we need it internally in the SDMA driver. This also means | ||
1306 | * that channel 0 in dmaengine counting matches sdma channel 1. | ||
1307 | */ | ||
1308 | if (i) | ||
1309 | list_add_tail(&sdmac->chan.device_node, | ||
1310 | &sdma->dma_device.channels); | ||
1300 | } | 1311 | } |
1301 | 1312 | ||
1302 | ret = sdma_init(sdma); | 1313 | ret = sdma_init(sdma); |
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1317 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | 1328 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; |
1318 | sdma->dma_device.device_control = sdma_control; | 1329 | sdma->dma_device.device_control = sdma_control; |
1319 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | 1330 | sdma->dma_device.device_issue_pending = sdma_issue_pending; |
1331 | sdma->dma_device.dev->dma_parms = &sdma->dma_parms; | ||
1332 | dma_set_max_seg_size(sdma->dma_device.dev, 65535); | ||
1320 | 1333 | ||
1321 | ret = dma_async_device_register(&sdma->dma_device); | 1334 | ret = dma_async_device_register(&sdma->dma_device); |
1322 | if (ret) { | 1335 | if (ret) { |
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev) | |||
1324 | goto err_init; | 1337 | goto err_init; |
1325 | } | 1338 | } |
1326 | 1339 | ||
1327 | /* request channel 0. This is an internal control channel | ||
1328 | * to the SDMA engine and not available to clients. | ||
1329 | */ | ||
1330 | dma_cap_zero(mask); | ||
1331 | dma_cap_set(DMA_SLAVE, mask); | ||
1332 | dma_request_channel(mask, NULL, NULL); | ||
1333 | |||
1334 | dev_info(sdma->dev, "initialized\n"); | 1340 | dev_info(sdma->dev, "initialized\n"); |
1335 | 1341 | ||
1336 | return 0; | 1342 | return 0; |
@@ -1348,7 +1354,7 @@ err_clk: | |||
1348 | err_request_region: | 1354 | err_request_region: |
1349 | err_irq: | 1355 | err_irq: |
1350 | kfree(sdma); | 1356 | kfree(sdma); |
1351 | return 0; | 1357 | return ret; |
1352 | } | 1358 | } |
1353 | 1359 | ||
1354 | static int __exit sdma_remove(struct platform_device *pdev) | 1360 | static int __exit sdma_remove(struct platform_device *pdev) |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index cb26ee9773d..c1a125e7d1d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | |||
1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | 1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); |
1147 | 1147 | ||
1148 | /* | ||
1149 | * Problem (observed with channel DMAIC_7): after enabling the channel | ||
1150 | * and initialising buffers, there comes an interrupt with current still | ||
1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only | ||
1152 | * generate an interrupt when it is done, then current should already | ||
1153 | * point to buffer 1. This spurious interrupt also comes on channel | ||
1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | ||
1155 | * first interrupt, there comes the second with current correctly | ||
1156 | * pointing to buffer 1 this time. But sometimes this second interrupt | ||
1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | ||
1158 | * the channel seems to prevent the channel from hanging, but it doesn't | ||
1159 | * prevent the spurious interrupt. This might also be unsafe. Think | ||
1160 | * about the IDMAC controller trying to switch to a buffer, when we | ||
1161 | * clear the ready bit, and re-enable it a moment later. | ||
1162 | */ | ||
1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | ||
1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | ||
1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | ||
1166 | |||
1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | ||
1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | ||
1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | ||
1170 | |||
1171 | spin_unlock_irqrestore(&ipu->lock, flags); | 1148 | spin_unlock_irqrestore(&ipu->lock, flags); |
1172 | 1149 | ||
1173 | return 0; | 1150 | return 0; |
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id) | |||
1246 | 1223 | ||
1247 | /* Other interrupts do not interfere with this channel */ | 1224 | /* Other interrupts do not interfere with this channel */ |
1248 | spin_lock(&ichan->lock); | 1225 | spin_lock(&ichan->lock); |
1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | ||
1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && | ||
1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { | ||
1252 | int i = 100; | ||
1253 | |||
1254 | /* This doesn't help. See comment in ipu_disable_channel() */ | ||
1255 | while (--i) { | ||
1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | ||
1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | ||
1258 | break; | ||
1259 | cpu_relax(); | ||
1260 | } | ||
1261 | |||
1262 | if (!i) { | ||
1263 | spin_unlock(&ichan->lock); | ||
1264 | dev_dbg(dev, | ||
1265 | "IRQ on active buffer on channel %x, active " | ||
1266 | "%d, ready %x, %x, current %x!\n", chan_id, | ||
1267 | ichan->active_buffer, ready0, ready1, curbuf); | ||
1268 | return IRQ_NONE; | ||
1269 | } else | ||
1270 | dev_dbg(dev, | ||
1271 | "Buffer deactivated on channel %x, active " | ||
1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, | ||
1273 | ichan->active_buffer, ready0, ready1, curbuf, i); | ||
1274 | } | ||
1275 | |||
1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | 1226 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || |
1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | 1227 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) |
1278 | )) { | 1228 | )) { |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index e28e4166817..bcb1126e3d0 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info) | |||
378 | 378 | ||
379 | static void __init dmi_dump_ids(void) | 379 | static void __init dmi_dump_ids(void) |
380 | { | 380 | { |
381 | const char *board; /* Board Name is optional */ | ||
382 | |||
381 | printk(KERN_DEBUG "DMI: "); | 383 | printk(KERN_DEBUG "DMI: "); |
382 | print_filtered(dmi_get_system_info(DMI_BOARD_NAME)); | 384 | print_filtered(dmi_get_system_info(DMI_SYS_VENDOR)); |
383 | printk(KERN_CONT "/"); | 385 | printk(KERN_CONT " "); |
384 | print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); | 386 | print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); |
387 | board = dmi_get_system_info(DMI_BOARD_NAME); | ||
388 | if (board) { | ||
389 | printk(KERN_CONT "/"); | ||
390 | print_filtered(board); | ||
391 | } | ||
385 | printk(KERN_CONT ", BIOS "); | 392 | printk(KERN_CONT ", BIOS "); |
386 | print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); | 393 | print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); |
387 | printk(KERN_CONT " "); | 394 | printk(KERN_CONT " "); |
diff --git a/drivers/input/input.c b/drivers/input/input.c index 7985114beac..11905b6a302 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) | |||
75 | * dev->event_lock held and interrupts disabled. | 75 | * dev->event_lock held and interrupts disabled. |
76 | */ | 76 | */ |
77 | static void input_pass_event(struct input_dev *dev, | 77 | static void input_pass_event(struct input_dev *dev, |
78 | struct input_handler *src_handler, | ||
79 | unsigned int type, unsigned int code, int value) | 78 | unsigned int type, unsigned int code, int value) |
80 | { | 79 | { |
81 | struct input_handler *handler; | 80 | struct input_handler *handler; |
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev, | |||
94 | continue; | 93 | continue; |
95 | 94 | ||
96 | handler = handle->handler; | 95 | handler = handle->handler; |
97 | |||
98 | /* | ||
99 | * If this is the handler that injected this | ||
100 | * particular event we want to skip it to avoid | ||
101 | * filters firing again and again. | ||
102 | */ | ||
103 | if (handler == src_handler) | ||
104 | continue; | ||
105 | |||
106 | if (!handler->filter) { | 96 | if (!handler->filter) { |
107 | if (filtered) | 97 | if (filtered) |
108 | break; | 98 | break; |
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data) | |||
132 | if (test_bit(dev->repeat_key, dev->key) && | 122 | if (test_bit(dev->repeat_key, dev->key) && |
133 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { | 123 | is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { |
134 | 124 | ||
135 | input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); | 125 | input_pass_event(dev, EV_KEY, dev->repeat_key, 2); |
136 | 126 | ||
137 | if (dev->sync) { | 127 | if (dev->sync) { |
138 | /* | 128 | /* |
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data) | |||
141 | * Otherwise assume that the driver will send | 131 | * Otherwise assume that the driver will send |
142 | * SYN_REPORT once it's done. | 132 | * SYN_REPORT once it's done. |
143 | */ | 133 | */ |
144 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 134 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
145 | } | 135 | } |
146 | 136 | ||
147 | if (dev->rep[REP_PERIOD]) | 137 | if (dev->rep[REP_PERIOD]) |
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev) | |||
174 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) | 164 | #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) |
175 | 165 | ||
176 | static int input_handle_abs_event(struct input_dev *dev, | 166 | static int input_handle_abs_event(struct input_dev *dev, |
177 | struct input_handler *src_handler, | ||
178 | unsigned int code, int *pval) | 167 | unsigned int code, int *pval) |
179 | { | 168 | { |
180 | bool is_mt_event; | 169 | bool is_mt_event; |
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev, | |||
218 | /* Flush pending "slot" event */ | 207 | /* Flush pending "slot" event */ |
219 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { | 208 | if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { |
220 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); | 209 | input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); |
221 | input_pass_event(dev, src_handler, | 210 | input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); |
222 | EV_ABS, ABS_MT_SLOT, dev->slot); | ||
223 | } | 211 | } |
224 | 212 | ||
225 | return INPUT_PASS_TO_HANDLERS; | 213 | return INPUT_PASS_TO_HANDLERS; |
226 | } | 214 | } |
227 | 215 | ||
228 | static void input_handle_event(struct input_dev *dev, | 216 | static void input_handle_event(struct input_dev *dev, |
229 | struct input_handler *src_handler, | ||
230 | unsigned int type, unsigned int code, int value) | 217 | unsigned int type, unsigned int code, int value) |
231 | { | 218 | { |
232 | int disposition = INPUT_IGNORE_EVENT; | 219 | int disposition = INPUT_IGNORE_EVENT; |
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev, | |||
279 | 266 | ||
280 | case EV_ABS: | 267 | case EV_ABS: |
281 | if (is_event_supported(code, dev->absbit, ABS_MAX)) | 268 | if (is_event_supported(code, dev->absbit, ABS_MAX)) |
282 | disposition = input_handle_abs_event(dev, src_handler, | 269 | disposition = input_handle_abs_event(dev, code, &value); |
283 | code, &value); | ||
284 | 270 | ||
285 | break; | 271 | break; |
286 | 272 | ||
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev, | |||
338 | dev->event(dev, type, code, value); | 324 | dev->event(dev, type, code, value); |
339 | 325 | ||
340 | if (disposition & INPUT_PASS_TO_HANDLERS) | 326 | if (disposition & INPUT_PASS_TO_HANDLERS) |
341 | input_pass_event(dev, src_handler, type, code, value); | 327 | input_pass_event(dev, type, code, value); |
342 | } | 328 | } |
343 | 329 | ||
344 | /** | 330 | /** |
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev, | |||
367 | 353 | ||
368 | spin_lock_irqsave(&dev->event_lock, flags); | 354 | spin_lock_irqsave(&dev->event_lock, flags); |
369 | add_input_randomness(type, code, value); | 355 | add_input_randomness(type, code, value); |
370 | input_handle_event(dev, NULL, type, code, value); | 356 | input_handle_event(dev, type, code, value); |
371 | spin_unlock_irqrestore(&dev->event_lock, flags); | 357 | spin_unlock_irqrestore(&dev->event_lock, flags); |
372 | } | 358 | } |
373 | } | 359 | } |
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle, | |||
397 | rcu_read_lock(); | 383 | rcu_read_lock(); |
398 | grab = rcu_dereference(dev->grab); | 384 | grab = rcu_dereference(dev->grab); |
399 | if (!grab || grab == handle) | 385 | if (!grab || grab == handle) |
400 | input_handle_event(dev, handle->handler, | 386 | input_handle_event(dev, type, code, value); |
401 | type, code, value); | ||
402 | rcu_read_unlock(); | 387 | rcu_read_unlock(); |
403 | 388 | ||
404 | spin_unlock_irqrestore(&dev->event_lock, flags); | 389 | spin_unlock_irqrestore(&dev->event_lock, flags); |
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev) | |||
611 | for (code = 0; code <= KEY_MAX; code++) { | 596 | for (code = 0; code <= KEY_MAX; code++) { |
612 | if (is_event_supported(code, dev->keybit, KEY_MAX) && | 597 | if (is_event_supported(code, dev->keybit, KEY_MAX) && |
613 | __test_and_clear_bit(code, dev->key)) { | 598 | __test_and_clear_bit(code, dev->key)) { |
614 | input_pass_event(dev, NULL, EV_KEY, code, 0); | 599 | input_pass_event(dev, EV_KEY, code, 0); |
615 | } | 600 | } |
616 | } | 601 | } |
617 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 602 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
618 | } | 603 | } |
619 | } | 604 | } |
620 | 605 | ||
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev, | |||
889 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && | 874 | !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && |
890 | __test_and_clear_bit(old_keycode, dev->key)) { | 875 | __test_and_clear_bit(old_keycode, dev->key)) { |
891 | 876 | ||
892 | input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); | 877 | input_pass_event(dev, EV_KEY, old_keycode, 0); |
893 | if (dev->sync) | 878 | if (dev->sync) |
894 | input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); | 879 | input_pass_event(dev, EV_SYN, SYN_REPORT, 1); |
895 | } | 880 | } |
896 | 881 | ||
897 | out: | 882 | out: |
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c index 1f8e0108962..7e64d01da2b 100644 --- a/drivers/input/misc/rotary_encoder.c +++ b/drivers/input/misc/rotary_encoder.c | |||
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) | |||
176 | 176 | ||
177 | /* request the IRQs */ | 177 | /* request the IRQs */ |
178 | err = request_irq(encoder->irq_a, &rotary_encoder_irq, | 178 | err = request_irq(encoder->irq_a, &rotary_encoder_irq, |
179 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | 179 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
180 | DRV_NAME, encoder); | 180 | DRV_NAME, encoder); |
181 | if (err) { | 181 | if (err) { |
182 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | 182 | dev_err(&pdev->dev, "unable to request IRQ %d\n", |
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | err = request_irq(encoder->irq_b, &rotary_encoder_irq, | 187 | err = request_irq(encoder->irq_b, &rotary_encoder_irq, |
188 | IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, | 188 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, |
189 | DRV_NAME, encoder); | 189 | DRV_NAME, encoder); |
190 | if (err) { | 190 | if (err) { |
191 | dev_err(&pdev->dev, "unable to request IRQ %d\n", | 191 | dev_err(&pdev->dev, "unable to request IRQ %d\n", |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index db5b0bca1a1..7c38d1fbabf 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event) | |||
188 | kfree(event); | 188 | kfree(event); |
189 | } | 189 | } |
190 | 190 | ||
191 | static void serio_remove_duplicate_events(struct serio_event *event) | 191 | static void serio_remove_duplicate_events(void *object, |
192 | enum serio_event_type type) | ||
192 | { | 193 | { |
193 | struct serio_event *e, *next; | 194 | struct serio_event *e, *next; |
194 | unsigned long flags; | 195 | unsigned long flags; |
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event) | |||
196 | spin_lock_irqsave(&serio_event_lock, flags); | 197 | spin_lock_irqsave(&serio_event_lock, flags); |
197 | 198 | ||
198 | list_for_each_entry_safe(e, next, &serio_event_list, node) { | 199 | list_for_each_entry_safe(e, next, &serio_event_list, node) { |
199 | if (event->object == e->object) { | 200 | if (object == e->object) { |
200 | /* | 201 | /* |
201 | * If this event is of different type we should not | 202 | * If this event is of different type we should not |
202 | * look further - we only suppress duplicate events | 203 | * look further - we only suppress duplicate events |
203 | * that were sent back-to-back. | 204 | * that were sent back-to-back. |
204 | */ | 205 | */ |
205 | if (event->type != e->type) | 206 | if (type != e->type) |
206 | break; | 207 | break; |
207 | 208 | ||
208 | list_del_init(&e->node); | 209 | list_del_init(&e->node); |
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work) | |||
245 | break; | 246 | break; |
246 | } | 247 | } |
247 | 248 | ||
248 | serio_remove_duplicate_events(event); | 249 | serio_remove_duplicate_events(event->object, event->type); |
249 | serio_free_event(event); | 250 | serio_free_event(event); |
250 | } | 251 | } |
251 | 252 | ||
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute * | |||
436 | } else if (!strncmp(buf, "rescan", count)) { | 437 | } else if (!strncmp(buf, "rescan", count)) { |
437 | serio_disconnect_port(serio); | 438 | serio_disconnect_port(serio); |
438 | serio_find_driver(serio); | 439 | serio_find_driver(serio); |
440 | serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); | ||
439 | } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { | 441 | } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { |
440 | serio_disconnect_port(serio); | 442 | serio_disconnect_port(serio); |
441 | error = serio_bind_driver(serio, to_serio_driver(drv)); | 443 | error = serio_bind_driver(serio, to_serio_driver(drv)); |
442 | put_driver(drv); | 444 | put_driver(drv); |
445 | serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); | ||
443 | } else { | 446 | } else { |
444 | error = -EINVAL; | 447 | error = -EINVAL; |
445 | } | 448 | } |
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index fc381498b79..cf8fb9f5d4a 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c | |||
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i | |||
519 | /* Retrieve the physical and logical size for OEM devices */ | 519 | /* Retrieve the physical and logical size for OEM devices */ |
520 | error = wacom_retrieve_hid_descriptor(intf, features); | 520 | error = wacom_retrieve_hid_descriptor(intf, features); |
521 | if (error) | 521 | if (error) |
522 | goto fail2; | 522 | goto fail3; |
523 | 523 | ||
524 | wacom_setup_device_quirks(features); | 524 | wacom_setup_device_quirks(features); |
525 | 525 | ||
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 14ea54b78e4..4bf2316e328 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784 | |||
941 | struct ads7846_platform_data *pdata = spi->dev.platform_data; | 941 | struct ads7846_platform_data *pdata = spi->dev.platform_data; |
942 | int err; | 942 | int err; |
943 | 943 | ||
944 | /* REVISIT when the irq can be triggered active-low, or if for some | 944 | /* |
945 | * REVISIT when the irq can be triggered active-low, or if for some | ||
945 | * reason the touchscreen isn't hooked up, we don't need to access | 946 | * reason the touchscreen isn't hooked up, we don't need to access |
946 | * the pendown state. | 947 | * the pendown state. |
947 | */ | 948 | */ |
948 | if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) { | ||
949 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); | ||
950 | return -EINVAL; | ||
951 | } | ||
952 | 949 | ||
953 | if (pdata->get_pendown_state) { | 950 | if (pdata->get_pendown_state) { |
954 | ts->get_pendown_state = pdata->get_pendown_state; | 951 | ts->get_pendown_state = pdata->get_pendown_state; |
955 | return 0; | 952 | } else if (gpio_is_valid(pdata->gpio_pendown)) { |
956 | } | ||
957 | 953 | ||
958 | err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); | 954 | err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); |
959 | if (err) { | 955 | if (err) { |
960 | dev_err(&spi->dev, "failed to request pendown GPIO%d\n", | 956 | dev_err(&spi->dev, "failed to request pendown GPIO%d\n", |
961 | pdata->gpio_pendown); | 957 | pdata->gpio_pendown); |
962 | return err; | 958 | return err; |
963 | } | 959 | } |
964 | 960 | ||
965 | ts->gpio_pendown = pdata->gpio_pendown; | 961 | ts->gpio_pendown = pdata->gpio_pendown; |
962 | |||
963 | } else { | ||
964 | dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); | ||
965 | return -EINVAL; | ||
966 | } | ||
966 | 967 | ||
967 | return 0; | 968 | return 0; |
968 | } | 969 | } |
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi) | |||
1353 | err_put_regulator: | 1354 | err_put_regulator: |
1354 | regulator_put(ts->reg); | 1355 | regulator_put(ts->reg); |
1355 | err_free_gpio: | 1356 | err_free_gpio: |
1356 | if (ts->gpio_pendown != -1) | 1357 | if (!ts->get_pendown_state) |
1357 | gpio_free(ts->gpio_pendown); | 1358 | gpio_free(ts->gpio_pendown); |
1358 | err_cleanup_filter: | 1359 | err_cleanup_filter: |
1359 | if (ts->filter_cleanup) | 1360 | if (ts->filter_cleanup) |
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi) | |||
1383 | regulator_disable(ts->reg); | 1384 | regulator_disable(ts->reg); |
1384 | regulator_put(ts->reg); | 1385 | regulator_put(ts->reg); |
1385 | 1386 | ||
1386 | if (ts->gpio_pendown != -1) | 1387 | if (!ts->get_pendown_state) { |
1388 | /* | ||
1389 | * If we are not using specialized pendown method we must | ||
1390 | * have been relying on gpio we set up ourselves. | ||
1391 | */ | ||
1387 | gpio_free(ts->gpio_pendown); | 1392 | gpio_free(ts->gpio_pendown); |
1393 | } | ||
1388 | 1394 | ||
1389 | if (ts->filter_cleanup) | 1395 | if (ts->filter_cleanup) |
1390 | ts->filter_cleanup(ts->filter_data); | 1396 | ts->filter_cleanup(ts->filter_data); |
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c index 5cb8449c909..c14412ef464 100644 --- a/drivers/input/touchscreen/wacom_w8001.c +++ b/drivers/input/touchscreen/wacom_w8001.c | |||
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL"); | |||
51 | #define W8001_PKTLEN_TPCCTL 11 /* control packet */ | 51 | #define W8001_PKTLEN_TPCCTL 11 /* control packet */ |
52 | #define W8001_PKTLEN_TOUCH2FG 13 | 52 | #define W8001_PKTLEN_TOUCH2FG 13 |
53 | 53 | ||
54 | /* resolution in points/mm */ | ||
55 | #define W8001_PEN_RESOLUTION 100 | ||
56 | #define W8001_TOUCH_RESOLUTION 10 | ||
57 | |||
54 | struct w8001_coord { | 58 | struct w8001_coord { |
55 | u8 rdy; | 59 | u8 rdy; |
56 | u8 tsw; | 60 | u8 tsw; |
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query) | |||
198 | query->y = 1024; | 202 | query->y = 1024; |
199 | if (query->panel_res) | 203 | if (query->panel_res) |
200 | query->x = query->y = (1 << query->panel_res); | 204 | query->x = query->y = (1 << query->panel_res); |
201 | query->panel_res = 10; | 205 | query->panel_res = W8001_TOUCH_RESOLUTION; |
202 | } | 206 | } |
203 | } | 207 | } |
204 | 208 | ||
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001) | |||
394 | 398 | ||
395 | input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); | 399 | input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); |
396 | input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); | 400 | input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); |
401 | input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION); | ||
402 | input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION); | ||
397 | input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); | 403 | input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); |
398 | if (coord.tilt_x && coord.tilt_y) { | 404 | if (coord.tilt_x && coord.tilt_y) { |
399 | input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); | 405 | input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); |
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001) | |||
418 | w8001->max_touch_x = touch.x; | 424 | w8001->max_touch_x = touch.x; |
419 | w8001->max_touch_y = touch.y; | 425 | w8001->max_touch_y = touch.y; |
420 | 426 | ||
421 | /* scale to pen maximum */ | ||
422 | if (w8001->max_pen_x && w8001->max_pen_y) { | 427 | if (w8001->max_pen_x && w8001->max_pen_y) { |
428 | /* if pen is supported scale to pen maximum */ | ||
423 | touch.x = w8001->max_pen_x; | 429 | touch.x = w8001->max_pen_x; |
424 | touch.y = w8001->max_pen_y; | 430 | touch.y = w8001->max_pen_y; |
431 | touch.panel_res = W8001_PEN_RESOLUTION; | ||
425 | } | 432 | } |
426 | 433 | ||
427 | input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); | 434 | input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); |
428 | input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); | 435 | input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); |
436 | input_abs_set_res(dev, ABS_X, touch.panel_res); | ||
437 | input_abs_set_res(dev, ABS_Y, touch.panel_res); | ||
429 | 438 | ||
430 | switch (touch.sensor_id) { | 439 | switch (touch.sensor_id) { |
431 | case 0: | 440 | case 0: |
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c index b2752b6e7a2..e725d51e773 100644 --- a/drivers/rtc/rtc-at32ap700x.c +++ b/drivers/rtc/rtc-at32ap700x.c | |||
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
134 | return ret; | 134 | return ret; |
135 | } | 135 | } |
136 | 136 | ||
137 | static int at32_rtc_ioctl(struct device *dev, unsigned int cmd, | 137 | static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
138 | unsigned long arg) | ||
139 | { | 138 | { |
140 | struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); | 139 | struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); |
141 | int ret = 0; | 140 | int ret = 0; |
142 | 141 | ||
143 | spin_lock_irq(&rtc->lock); | 142 | spin_lock_irq(&rtc->lock); |
144 | 143 | ||
145 | switch (cmd) { | 144 | if(enabled) { |
146 | case RTC_AIE_ON: | ||
147 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { | 145 | if (rtc_readl(rtc, VAL) > rtc->alarm_time) { |
148 | ret = -EINVAL; | 146 | ret = -EINVAL; |
149 | break; | 147 | goto out; |
150 | } | 148 | } |
151 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | 149 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) |
152 | | RTC_BIT(CTRL_TOPEN)); | 150 | | RTC_BIT(CTRL_TOPEN)); |
153 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); | 151 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); |
154 | rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); | 152 | rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); |
155 | break; | 153 | } else { |
156 | case RTC_AIE_OFF: | ||
157 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) | 154 | rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) |
158 | & ~RTC_BIT(CTRL_TOPEN)); | 155 | & ~RTC_BIT(CTRL_TOPEN)); |
159 | rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); | 156 | rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); |
160 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); | 157 | rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); |
161 | break; | ||
162 | default: | ||
163 | ret = -ENOIOCTLCMD; | ||
164 | break; | ||
165 | } | 158 | } |
166 | 159 | out: | |
167 | spin_unlock_irq(&rtc->lock); | 160 | spin_unlock_irq(&rtc->lock); |
168 | 161 | ||
169 | return ret; | 162 | return ret; |
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id) | |||
195 | } | 188 | } |
196 | 189 | ||
197 | static struct rtc_class_ops at32_rtc_ops = { | 190 | static struct rtc_class_ops at32_rtc_ops = { |
198 | .ioctl = at32_rtc_ioctl, | ||
199 | .read_time = at32_rtc_readtime, | 191 | .read_time = at32_rtc_readtime, |
200 | .set_time = at32_rtc_settime, | 192 | .set_time = at32_rtc_settime, |
201 | .read_alarm = at32_rtc_readalarm, | 193 | .read_alarm = at32_rtc_readalarm, |
202 | .set_alarm = at32_rtc_setalarm, | 194 | .set_alarm = at32_rtc_setalarm, |
195 | .alarm_irq_enable = at32_rtc_alarm_irq_enable, | ||
203 | }; | 196 | }; |
204 | 197 | ||
205 | static int __init at32_rtc_probe(struct platform_device *pdev) | 198 | static int __init at32_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index bc8bbca9a2e..26d1cf5d19a 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
195 | 195 | ||
196 | /* important: scrub old status before enabling IRQs */ | 196 | /* important: scrub old status before enabling IRQs */ |
197 | switch (cmd) { | 197 | switch (cmd) { |
198 | case RTC_AIE_OFF: /* alarm off */ | ||
199 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); | ||
200 | break; | ||
201 | case RTC_AIE_ON: /* alarm on */ | ||
202 | at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | ||
203 | at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); | ||
204 | break; | ||
205 | case RTC_UIE_OFF: /* update off */ | 198 | case RTC_UIE_OFF: /* update off */ |
206 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); | 199 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); |
207 | break; | 200 | break; |
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
217 | return ret; | 210 | return ret; |
218 | } | 211 | } |
219 | 212 | ||
213 | static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
214 | { | ||
215 | pr_debug("%s(): cmd=%08x\n", __func__, enabled); | ||
216 | |||
217 | if (enabled) { | ||
218 | at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | ||
219 | at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); | ||
220 | } else | ||
221 | at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
220 | /* | 225 | /* |
221 | * Provide additional RTC information in /proc/driver/rtc | 226 | * Provide additional RTC information in /proc/driver/rtc |
222 | */ | 227 | */ |
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
270 | .read_alarm = at91_rtc_readalarm, | 275 | .read_alarm = at91_rtc_readalarm, |
271 | .set_alarm = at91_rtc_setalarm, | 276 | .set_alarm = at91_rtc_setalarm, |
272 | .proc = at91_rtc_proc, | 277 | .proc = at91_rtc_proc, |
278 | .alarm_irq_enable = at91_rtc_alarm_irq_enable, | ||
273 | }; | 279 | }; |
274 | 280 | ||
275 | /* | 281 | /* |
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index f677e0710ca..c36749e4c92 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
229 | dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); | 229 | dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); |
230 | 230 | ||
231 | switch (cmd) { | 231 | switch (cmd) { |
232 | case RTC_AIE_OFF: /* alarm off */ | ||
233 | rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); | ||
234 | break; | ||
235 | case RTC_AIE_ON: /* alarm on */ | ||
236 | rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); | ||
237 | break; | ||
238 | case RTC_UIE_OFF: /* update off */ | 232 | case RTC_UIE_OFF: /* update off */ |
239 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); | 233 | rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); |
240 | break; | 234 | break; |
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
249 | return ret; | 243 | return ret; |
250 | } | 244 | } |
251 | 245 | ||
246 | static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
247 | { | ||
248 | struct sam9_rtc *rtc = dev_get_drvdata(dev); | ||
249 | u32 mr = rtt_readl(rtc, MR); | ||
250 | |||
251 | dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr); | ||
252 | if (enabled) | ||
253 | rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN); | ||
254 | else | ||
255 | rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
252 | /* | 259 | /* |
253 | * Provide additional RTC information in /proc/driver/rtc | 260 | * Provide additional RTC information in /proc/driver/rtc |
254 | */ | 261 | */ |
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
302 | .read_alarm = at91_rtc_readalarm, | 309 | .read_alarm = at91_rtc_readalarm, |
303 | .set_alarm = at91_rtc_setalarm, | 310 | .set_alarm = at91_rtc_setalarm, |
304 | .proc = at91_rtc_proc, | 311 | .proc = at91_rtc_proc, |
312 | .alarm_irq_enabled = at91_rtc_alarm_irq_enable, | ||
305 | }; | 313 | }; |
306 | 314 | ||
307 | /* | 315 | /* |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index b4b6087f223..17971d93354 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar | |||
259 | bfin_rtc_int_clear(~RTC_ISTAT_SEC); | 259 | bfin_rtc_int_clear(~RTC_ISTAT_SEC); |
260 | break; | 260 | break; |
261 | 261 | ||
262 | case RTC_AIE_ON: | ||
263 | dev_dbg_stamp(dev); | ||
264 | bfin_rtc_int_set_alarm(rtc); | ||
265 | break; | ||
266 | case RTC_AIE_OFF: | ||
267 | dev_dbg_stamp(dev); | ||
268 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | ||
269 | break; | ||
270 | |||
271 | default: | 262 | default: |
272 | dev_dbg_stamp(dev); | 263 | dev_dbg_stamp(dev); |
273 | ret = -ENOIOCTLCMD; | 264 | ret = -ENOIOCTLCMD; |
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar | |||
276 | return ret; | 267 | return ret; |
277 | } | 268 | } |
278 | 269 | ||
270 | static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
271 | { | ||
272 | struct bfin_rtc *rtc = dev_get_drvdata(dev); | ||
273 | |||
274 | dev_dbg_stamp(dev); | ||
275 | if (enabled) | ||
276 | bfin_rtc_int_set_alarm(rtc); | ||
277 | else | ||
278 | bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)); | ||
279 | } | ||
280 | |||
279 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) | 281 | static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) |
280 | { | 282 | { |
281 | struct bfin_rtc *rtc = dev_get_drvdata(dev); | 283 | struct bfin_rtc *rtc = dev_get_drvdata(dev); |
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = { | |||
362 | .read_alarm = bfin_rtc_read_alarm, | 364 | .read_alarm = bfin_rtc_read_alarm, |
363 | .set_alarm = bfin_rtc_set_alarm, | 365 | .set_alarm = bfin_rtc_set_alarm, |
364 | .proc = bfin_rtc_proc, | 366 | .proc = bfin_rtc_proc, |
367 | .alarm_irq_enable = bfin_rtc_alarm_irq_enable, | ||
365 | }; | 368 | }; |
366 | 369 | ||
367 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) | 370 | static int __devinit bfin_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 212b16edafc..37c3cc1b3dd 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -154,19 +154,7 @@ static long rtc_dev_ioctl(struct file *file, | |||
154 | if (err) | 154 | if (err) |
155 | goto done; | 155 | goto done; |
156 | 156 | ||
157 | /* try the driver's ioctl interface */ | 157 | /* |
158 | if (ops->ioctl) { | ||
159 | err = ops->ioctl(rtc->dev.parent, cmd, arg); | ||
160 | if (err != -ENOIOCTLCMD) { | ||
161 | mutex_unlock(&rtc->ops_lock); | ||
162 | return err; | ||
163 | } | ||
164 | } | ||
165 | |||
166 | /* if the driver does not provide the ioctl interface | ||
167 | * or if that particular ioctl was not implemented | ||
168 | * (-ENOIOCTLCMD), we will try to emulate here. | ||
169 | * | ||
170 | * Drivers *SHOULD NOT* provide ioctl implementations | 158 | * Drivers *SHOULD NOT* provide ioctl implementations |
171 | * for these requests. Instead, provide methods to | 159 | * for these requests. Instead, provide methods to |
172 | * support the following code, so that the RTC's main | 160 | * support the following code, so that the RTC's main |
@@ -329,7 +317,12 @@ static long rtc_dev_ioctl(struct file *file, | |||
329 | return err; | 317 | return err; |
330 | 318 | ||
331 | default: | 319 | default: |
332 | err = -ENOTTY; | 320 | /* Finally try the driver's ioctl interface */ |
321 | if (ops->ioctl) { | ||
322 | err = ops->ioctl(rtc->dev.parent, cmd, arg); | ||
323 | if (err == -ENOIOCTLCMD) | ||
324 | err = -ENOTTY; | ||
325 | } | ||
333 | break; | 326 | break; |
334 | } | 327 | } |
335 | 328 | ||
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index bf430f9091e..60ce6960082 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c | |||
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg) | |||
40 | __raw_writel(data, &priv->rtcregs[reg]); | 40 | __raw_writel(data, &priv->rtcregs[reg]); |
41 | } | 41 | } |
42 | 42 | ||
43 | |||
44 | static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
45 | { | ||
46 | struct ds1286_priv *priv = dev_get_drvdata(dev); | ||
47 | unsigned long flags; | ||
48 | unsigned char val; | ||
49 | |||
50 | /* Allow or mask alarm interrupts */ | ||
51 | spin_lock_irqsave(&priv->lock, flags); | ||
52 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
53 | if (enabled) | ||
54 | val &= ~RTC_TDM; | ||
55 | else | ||
56 | val |= RTC_TDM; | ||
57 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
58 | spin_unlock_irqrestore(&priv->lock, flags); | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | |||
43 | #ifdef CONFIG_RTC_INTF_DEV | 63 | #ifdef CONFIG_RTC_INTF_DEV |
44 | 64 | ||
45 | static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 65 | static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
49 | unsigned char val; | 69 | unsigned char val; |
50 | 70 | ||
51 | switch (cmd) { | 71 | switch (cmd) { |
52 | case RTC_AIE_OFF: | ||
53 | /* Mask alarm int. enab. bit */ | ||
54 | spin_lock_irqsave(&priv->lock, flags); | ||
55 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
56 | val |= RTC_TDM; | ||
57 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
58 | spin_unlock_irqrestore(&priv->lock, flags); | ||
59 | break; | ||
60 | case RTC_AIE_ON: | ||
61 | /* Allow alarm interrupts. */ | ||
62 | spin_lock_irqsave(&priv->lock, flags); | ||
63 | val = ds1286_rtc_read(priv, RTC_CMD); | ||
64 | val &= ~RTC_TDM; | ||
65 | ds1286_rtc_write(priv, val, RTC_CMD); | ||
66 | spin_unlock_irqrestore(&priv->lock, flags); | ||
67 | break; | ||
68 | case RTC_WIE_OFF: | 72 | case RTC_WIE_OFF: |
69 | /* Mask watchdog int. enab. bit */ | 73 | /* Mask watchdog int. enab. bit */ |
70 | spin_lock_irqsave(&priv->lock, flags); | 74 | spin_lock_irqsave(&priv->lock, flags); |
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
316 | } | 320 | } |
317 | 321 | ||
318 | static const struct rtc_class_ops ds1286_ops = { | 322 | static const struct rtc_class_ops ds1286_ops = { |
319 | .ioctl = ds1286_ioctl, | 323 | .ioctl = ds1286_ioctl, |
320 | .proc = ds1286_proc, | 324 | .proc = ds1286_proc, |
321 | .read_time = ds1286_read_time, | 325 | .read_time = ds1286_read_time, |
322 | .set_time = ds1286_set_time, | 326 | .set_time = ds1286_set_time, |
323 | .read_alarm = ds1286_read_alarm, | 327 | .read_alarm = ds1286_read_alarm, |
324 | .set_alarm = ds1286_set_alarm, | 328 | .set_alarm = ds1286_set_alarm, |
329 | .alarm_irq_enable = ds1286_alarm_irq_enable, | ||
325 | }; | 330 | }; |
326 | 331 | ||
327 | static int __devinit ds1286_probe(struct platform_device *pdev) | 332 | static int __devinit ds1286_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 077af1d7b9e..57fbcc149ba 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c | |||
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour) | |||
139 | * Interface to RTC framework | 139 | * Interface to RTC framework |
140 | */ | 140 | */ |
141 | 141 | ||
142 | #ifdef CONFIG_RTC_INTF_DEV | 142 | static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled) |
143 | |||
144 | /* | ||
145 | * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl) | ||
146 | */ | ||
147 | static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg) | ||
148 | { | 143 | { |
149 | struct ds1305 *ds1305 = dev_get_drvdata(dev); | 144 | struct ds1305 *ds1305 = dev_get_drvdata(dev); |
150 | u8 buf[2]; | 145 | u8 buf[2]; |
151 | int status = -ENOIOCTLCMD; | 146 | long err = -EINVAL; |
152 | 147 | ||
153 | buf[0] = DS1305_WRITE | DS1305_CONTROL; | 148 | buf[0] = DS1305_WRITE | DS1305_CONTROL; |
154 | buf[1] = ds1305->ctrl[0]; | 149 | buf[1] = ds1305->ctrl[0]; |
155 | 150 | ||
156 | switch (cmd) { | 151 | if (enabled) { |
157 | case RTC_AIE_OFF: | ||
158 | status = 0; | ||
159 | if (!(buf[1] & DS1305_AEI0)) | ||
160 | goto done; | ||
161 | buf[1] &= ~DS1305_AEI0; | ||
162 | break; | ||
163 | |||
164 | case RTC_AIE_ON: | ||
165 | status = 0; | ||
166 | if (ds1305->ctrl[0] & DS1305_AEI0) | 152 | if (ds1305->ctrl[0] & DS1305_AEI0) |
167 | goto done; | 153 | goto done; |
168 | buf[1] |= DS1305_AEI0; | 154 | buf[1] |= DS1305_AEI0; |
169 | break; | 155 | } else { |
170 | } | 156 | if (!(buf[1] & DS1305_AEI0)) |
171 | if (status == 0) { | 157 | goto done; |
172 | status = spi_write_then_read(ds1305->spi, buf, sizeof buf, | 158 | buf[1] &= ~DS1305_AEI0; |
173 | NULL, 0); | ||
174 | if (status >= 0) | ||
175 | ds1305->ctrl[0] = buf[1]; | ||
176 | } | 159 | } |
177 | 160 | err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0); | |
161 | if (err >= 0) | ||
162 | ds1305->ctrl[0] = buf[1]; | ||
178 | done: | 163 | done: |
179 | return status; | 164 | return err; |
165 | |||
180 | } | 166 | } |
181 | 167 | ||
182 | #else | ||
183 | #define ds1305_ioctl NULL | ||
184 | #endif | ||
185 | 168 | ||
186 | /* | 169 | /* |
187 | * Get/set of date and time is pretty normal. | 170 | * Get/set of date and time is pretty normal. |
@@ -460,12 +443,12 @@ done: | |||
460 | #endif | 443 | #endif |
461 | 444 | ||
462 | static const struct rtc_class_ops ds1305_ops = { | 445 | static const struct rtc_class_ops ds1305_ops = { |
463 | .ioctl = ds1305_ioctl, | ||
464 | .read_time = ds1305_get_time, | 446 | .read_time = ds1305_get_time, |
465 | .set_time = ds1305_set_time, | 447 | .set_time = ds1305_set_time, |
466 | .read_alarm = ds1305_get_alarm, | 448 | .read_alarm = ds1305_get_alarm, |
467 | .set_alarm = ds1305_set_alarm, | 449 | .set_alarm = ds1305_set_alarm, |
468 | .proc = ds1305_proc, | 450 | .proc = ds1305_proc, |
451 | .alarm_irq_enable = ds1305_alarm_irq_enable, | ||
469 | }; | 452 | }; |
470 | 453 | ||
471 | static void ds1305_work(struct work_struct *work) | 454 | static void ds1305_work(struct work_struct *work) |
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 0d559b6416d..4724ba3acf1 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c | |||
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) | |||
495 | return 0; | 495 | return 0; |
496 | } | 496 | } |
497 | 497 | ||
498 | static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 498 | static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled) |
499 | { | 499 | { |
500 | struct i2c_client *client = to_i2c_client(dev); | 500 | struct i2c_client *client = to_i2c_client(dev); |
501 | struct ds1307 *ds1307 = i2c_get_clientdata(client); | 501 | struct ds1307 *ds1307 = i2c_get_clientdata(client); |
502 | int ret; | 502 | int ret; |
503 | 503 | ||
504 | switch (cmd) { | 504 | if (!test_bit(HAS_ALARM, &ds1307->flags)) |
505 | case RTC_AIE_OFF: | 505 | return -ENOTTY; |
506 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
507 | return -ENOTTY; | ||
508 | |||
509 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); | ||
510 | if (ret < 0) | ||
511 | return ret; | ||
512 | |||
513 | ret &= ~DS1337_BIT_A1IE; | ||
514 | |||
515 | ret = i2c_smbus_write_byte_data(client, | ||
516 | DS1337_REG_CONTROL, ret); | ||
517 | if (ret < 0) | ||
518 | return ret; | ||
519 | |||
520 | break; | ||
521 | |||
522 | case RTC_AIE_ON: | ||
523 | if (!test_bit(HAS_ALARM, &ds1307->flags)) | ||
524 | return -ENOTTY; | ||
525 | 506 | ||
526 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); | 507 | ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); |
527 | if (ret < 0) | 508 | if (ret < 0) |
528 | return ret; | 509 | return ret; |
529 | 510 | ||
511 | if (enabled) | ||
530 | ret |= DS1337_BIT_A1IE; | 512 | ret |= DS1337_BIT_A1IE; |
513 | else | ||
514 | ret &= ~DS1337_BIT_A1IE; | ||
531 | 515 | ||
532 | ret = i2c_smbus_write_byte_data(client, | 516 | ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret); |
533 | DS1337_REG_CONTROL, ret); | 517 | if (ret < 0) |
534 | if (ret < 0) | 518 | return ret; |
535 | return ret; | ||
536 | |||
537 | break; | ||
538 | |||
539 | default: | ||
540 | return -ENOIOCTLCMD; | ||
541 | } | ||
542 | 519 | ||
543 | return 0; | 520 | return 0; |
544 | } | 521 | } |
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { | |||
548 | .set_time = ds1307_set_time, | 525 | .set_time = ds1307_set_time, |
549 | .read_alarm = ds1337_read_alarm, | 526 | .read_alarm = ds1337_read_alarm, |
550 | .set_alarm = ds1337_set_alarm, | 527 | .set_alarm = ds1337_set_alarm, |
551 | .ioctl = ds1307_ioctl, | 528 | .alarm_irq_enable = ds1307_alarm_irq_enable, |
552 | }; | 529 | }; |
553 | 530 | ||
554 | /*----------------------------------------------------------------------*/ | 531 | /*----------------------------------------------------------------------*/ |
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 47fb6357c34..d834a63ec4b 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c | |||
@@ -307,42 +307,25 @@ unlock: | |||
307 | mutex_unlock(&ds1374->mutex); | 307 | mutex_unlock(&ds1374->mutex); |
308 | } | 308 | } |
309 | 309 | ||
310 | static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 310 | static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled) |
311 | { | 311 | { |
312 | struct i2c_client *client = to_i2c_client(dev); | 312 | struct i2c_client *client = to_i2c_client(dev); |
313 | struct ds1374 *ds1374 = i2c_get_clientdata(client); | 313 | struct ds1374 *ds1374 = i2c_get_clientdata(client); |
314 | int ret = -ENOIOCTLCMD; | 314 | int ret; |
315 | 315 | ||
316 | mutex_lock(&ds1374->mutex); | 316 | mutex_lock(&ds1374->mutex); |
317 | 317 | ||
318 | switch (cmd) { | 318 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); |
319 | case RTC_AIE_OFF: | 319 | if (ret < 0) |
320 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); | 320 | goto out; |
321 | if (ret < 0) | ||
322 | goto out; | ||
323 | |||
324 | ret &= ~DS1374_REG_CR_WACE; | ||
325 | |||
326 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | ||
327 | if (ret < 0) | ||
328 | goto out; | ||
329 | |||
330 | break; | ||
331 | |||
332 | case RTC_AIE_ON: | ||
333 | ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); | ||
334 | if (ret < 0) | ||
335 | goto out; | ||
336 | 321 | ||
322 | if (enabled) { | ||
337 | ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; | 323 | ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; |
338 | ret &= ~DS1374_REG_CR_WDALM; | 324 | ret &= ~DS1374_REG_CR_WDALM; |
339 | 325 | } else { | |
340 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | 326 | ret &= ~DS1374_REG_CR_WACE; |
341 | if (ret < 0) | ||
342 | goto out; | ||
343 | |||
344 | break; | ||
345 | } | 327 | } |
328 | ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); | ||
346 | 329 | ||
347 | out: | 330 | out: |
348 | mutex_unlock(&ds1374->mutex); | 331 | mutex_unlock(&ds1374->mutex); |
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = { | |||
354 | .set_time = ds1374_set_time, | 337 | .set_time = ds1374_set_time, |
355 | .read_alarm = ds1374_read_alarm, | 338 | .read_alarm = ds1374_read_alarm, |
356 | .set_alarm = ds1374_set_alarm, | 339 | .set_alarm = ds1374_set_alarm, |
357 | .ioctl = ds1374_ioctl, | 340 | .alarm_irq_enable = ds1374_alarm_irq_enable, |
358 | }; | 341 | }; |
359 | 342 | ||
360 | static int ds1374_probe(struct i2c_client *client, | 343 | static int ds1374_probe(struct i2c_client *client, |
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 5a8daa35806..69fe664a222 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c | |||
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
213 | return m41t80_set_datetime(to_i2c_client(dev), tm); | 213 | return m41t80_set_datetime(to_i2c_client(dev), tm); |
214 | } | 214 | } |
215 | 215 | ||
216 | #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) | 216 | static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
217 | static int | ||
218 | m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | ||
219 | { | 217 | { |
220 | struct i2c_client *client = to_i2c_client(dev); | 218 | struct i2c_client *client = to_i2c_client(dev); |
221 | int rc; | 219 | int rc; |
222 | 220 | ||
223 | switch (cmd) { | ||
224 | case RTC_AIE_OFF: | ||
225 | case RTC_AIE_ON: | ||
226 | break; | ||
227 | default: | ||
228 | return -ENOIOCTLCMD; | ||
229 | } | ||
230 | |||
231 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); | 221 | rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); |
232 | if (rc < 0) | 222 | if (rc < 0) |
233 | goto err; | 223 | goto err; |
234 | switch (cmd) { | 224 | |
235 | case RTC_AIE_OFF: | 225 | if (enabled) |
236 | rc &= ~M41T80_ALMON_AFE; | ||
237 | break; | ||
238 | case RTC_AIE_ON: | ||
239 | rc |= M41T80_ALMON_AFE; | 226 | rc |= M41T80_ALMON_AFE; |
240 | break; | 227 | else |
241 | } | 228 | rc &= ~M41T80_ALMON_AFE; |
229 | |||
242 | if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) | 230 | if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) |
243 | goto err; | 231 | goto err; |
232 | |||
244 | return 0; | 233 | return 0; |
245 | err: | 234 | err: |
246 | return -EIO; | 235 | return -EIO; |
247 | } | 236 | } |
248 | #else | ||
249 | #define m41t80_rtc_ioctl NULL | ||
250 | #endif | ||
251 | 237 | ||
252 | static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) | 238 | static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) |
253 | { | 239 | { |
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = { | |||
374 | .read_alarm = m41t80_rtc_read_alarm, | 360 | .read_alarm = m41t80_rtc_read_alarm, |
375 | .set_alarm = m41t80_rtc_set_alarm, | 361 | .set_alarm = m41t80_rtc_set_alarm, |
376 | .proc = m41t80_rtc_proc, | 362 | .proc = m41t80_rtc_proc, |
377 | .ioctl = m41t80_rtc_ioctl, | 363 | .alarm_irq_enable = m41t80_rtc_alarm_irq_enable, |
378 | }; | 364 | }; |
379 | 365 | ||
380 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | 366 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) |
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index a99a0b554eb..3978f4caf72 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c | |||
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
263 | /* | 263 | /* |
264 | * Handle commands from user-space | 264 | * Handle commands from user-space |
265 | */ | 265 | */ |
266 | static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd, | 266 | static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
267 | unsigned long arg) | ||
268 | { | 267 | { |
269 | struct platform_device *pdev = to_platform_device(dev); | 268 | struct platform_device *pdev = to_platform_device(dev); |
270 | struct m48t59_plat_data *pdata = pdev->dev.platform_data; | 269 | struct m48t59_plat_data *pdata = pdev->dev.platform_data; |
271 | struct m48t59_private *m48t59 = platform_get_drvdata(pdev); | 270 | struct m48t59_private *m48t59 = platform_get_drvdata(pdev); |
272 | unsigned long flags; | 271 | unsigned long flags; |
273 | int ret = 0; | ||
274 | 272 | ||
275 | spin_lock_irqsave(&m48t59->lock, flags); | 273 | spin_lock_irqsave(&m48t59->lock, flags); |
276 | switch (cmd) { | 274 | if (enabled) |
277 | case RTC_AIE_OFF: /* alarm interrupt off */ | ||
278 | M48T59_WRITE(0x00, M48T59_INTR); | ||
279 | break; | ||
280 | case RTC_AIE_ON: /* alarm interrupt on */ | ||
281 | M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); | 275 | M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); |
282 | break; | 276 | else |
283 | default: | 277 | M48T59_WRITE(0x00, M48T59_INTR); |
284 | ret = -ENOIOCTLCMD; | ||
285 | break; | ||
286 | } | ||
287 | spin_unlock_irqrestore(&m48t59->lock, flags); | 278 | spin_unlock_irqrestore(&m48t59->lock, flags); |
288 | 279 | ||
289 | return ret; | 280 | return 0; |
290 | } | 281 | } |
291 | 282 | ||
292 | static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) | 283 | static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) |
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id) | |||
330 | } | 321 | } |
331 | 322 | ||
332 | static const struct rtc_class_ops m48t59_rtc_ops = { | 323 | static const struct rtc_class_ops m48t59_rtc_ops = { |
333 | .ioctl = m48t59_rtc_ioctl, | ||
334 | .read_time = m48t59_rtc_read_time, | 324 | .read_time = m48t59_rtc_read_time, |
335 | .set_time = m48t59_rtc_set_time, | 325 | .set_time = m48t59_rtc_set_time, |
336 | .read_alarm = m48t59_rtc_readalarm, | 326 | .read_alarm = m48t59_rtc_readalarm, |
337 | .set_alarm = m48t59_rtc_setalarm, | 327 | .set_alarm = m48t59_rtc_setalarm, |
338 | .proc = m48t59_rtc_proc, | 328 | .proc = m48t59_rtc_proc, |
329 | .alarm_irq_enable = m48t59_rtc_alarm_irq_enable, | ||
339 | }; | 330 | }; |
340 | 331 | ||
341 | static const struct rtc_class_ops m48t02_rtc_ops = { | 332 | static const struct rtc_class_ops m48t02_rtc_ops = { |
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index bcd0cf63eb1..1db62db8469 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled) | |||
255 | return 0; | 255 | return 0; |
256 | } | 256 | } |
257 | 257 | ||
258 | #if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) | ||
259 | |||
260 | /* Currently, the vRTC doesn't support UIE ON/OFF */ | 258 | /* Currently, the vRTC doesn't support UIE ON/OFF */ |
261 | static int | 259 | static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
262 | mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | ||
263 | { | 260 | { |
264 | struct mrst_rtc *mrst = dev_get_drvdata(dev); | 261 | struct mrst_rtc *mrst = dev_get_drvdata(dev); |
265 | unsigned long flags; | 262 | unsigned long flags; |
266 | 263 | ||
267 | switch (cmd) { | ||
268 | case RTC_AIE_OFF: | ||
269 | case RTC_AIE_ON: | ||
270 | if (!mrst->irq) | ||
271 | return -EINVAL; | ||
272 | break; | ||
273 | default: | ||
274 | /* PIE ON/OFF is handled by mrst_irq_set_state() */ | ||
275 | return -ENOIOCTLCMD; | ||
276 | } | ||
277 | |||
278 | spin_lock_irqsave(&rtc_lock, flags); | 264 | spin_lock_irqsave(&rtc_lock, flags); |
279 | switch (cmd) { | 265 | if (enabled) |
280 | case RTC_AIE_OFF: /* alarm off */ | ||
281 | mrst_irq_disable(mrst, RTC_AIE); | ||
282 | break; | ||
283 | case RTC_AIE_ON: /* alarm on */ | ||
284 | mrst_irq_enable(mrst, RTC_AIE); | 266 | mrst_irq_enable(mrst, RTC_AIE); |
285 | break; | 267 | else |
286 | } | 268 | mrst_irq_disable(mrst, RTC_AIE); |
287 | spin_unlock_irqrestore(&rtc_lock, flags); | 269 | spin_unlock_irqrestore(&rtc_lock, flags); |
288 | return 0; | 270 | return 0; |
289 | } | 271 | } |
290 | 272 | ||
291 | #else | ||
292 | #define mrst_rtc_ioctl NULL | ||
293 | #endif | ||
294 | 273 | ||
295 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) | 274 | #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) |
296 | 275 | ||
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq) | |||
317 | #endif | 296 | #endif |
318 | 297 | ||
319 | static const struct rtc_class_ops mrst_rtc_ops = { | 298 | static const struct rtc_class_ops mrst_rtc_ops = { |
320 | .ioctl = mrst_rtc_ioctl, | ||
321 | .read_time = mrst_read_time, | 299 | .read_time = mrst_read_time, |
322 | .set_time = mrst_set_time, | 300 | .set_time = mrst_set_time, |
323 | .read_alarm = mrst_read_alarm, | 301 | .read_alarm = mrst_read_alarm, |
324 | .set_alarm = mrst_set_alarm, | 302 | .set_alarm = mrst_set_alarm, |
325 | .proc = mrst_procfs, | 303 | .proc = mrst_procfs, |
326 | .irq_set_state = mrst_irq_set_state, | 304 | .irq_set_state = mrst_irq_set_state, |
305 | .alarm_irq_enable = mrst_rtc_alarm_irq_enable, | ||
327 | }; | 306 | }; |
328 | 307 | ||
329 | static struct mrst_rtc mrst_rtc; | 308 | static struct mrst_rtc mrst_rtc; |
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index b2fff0ca49f..67820626e18 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c | |||
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv, | |||
82 | static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, | 82 | static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, |
83 | unsigned int reg) | 83 | unsigned int reg) |
84 | { | 84 | { |
85 | return __raw_writel(val, &priv->regs[reg]); | 85 | __raw_writel(val, &priv->regs[reg]); |
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, | 88 | static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, |
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c index bcca4729855..60627a76451 100644 --- a/drivers/rtc/rtc-mv.c +++ b/drivers/rtc/rtc-mv.c | |||
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | |||
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int mv_rtc_ioctl(struct device *dev, unsigned int cmd, | 172 | static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) |
173 | unsigned long arg) | ||
174 | { | 173 | { |
175 | struct platform_device *pdev = to_platform_device(dev); | 174 | struct platform_device *pdev = to_platform_device(dev); |
176 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); | 175 | struct rtc_plat_data *pdata = platform_get_drvdata(pdev); |
177 | void __iomem *ioaddr = pdata->ioaddr; | 176 | void __iomem *ioaddr = pdata->ioaddr; |
178 | 177 | ||
179 | if (pdata->irq < 0) | 178 | if (pdata->irq < 0) |
180 | return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ | 179 | return -EINVAL; /* fall back into rtc-dev's emulation */ |
181 | switch (cmd) { | 180 | |
182 | case RTC_AIE_OFF: | 181 | if (enabled) |
183 | writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); | ||
184 | break; | ||
185 | case RTC_AIE_ON: | ||
186 | writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); | 182 | writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); |
187 | break; | 183 | else |
188 | default: | 184 | writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); |
189 | return -ENOIOCTLCMD; | ||
190 | } | ||
191 | return 0; | 185 | return 0; |
192 | } | 186 | } |
193 | 187 | ||
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = { | |||
216 | .set_time = mv_rtc_set_time, | 210 | .set_time = mv_rtc_set_time, |
217 | .read_alarm = mv_rtc_read_alarm, | 211 | .read_alarm = mv_rtc_read_alarm, |
218 | .set_alarm = mv_rtc_set_alarm, | 212 | .set_alarm = mv_rtc_set_alarm, |
219 | .ioctl = mv_rtc_ioctl, | 213 | .alarm_irq_enable = mv_rtc_alarm_irq_enable, |
220 | }; | 214 | }; |
221 | 215 | ||
222 | static int __devinit mv_rtc_probe(struct platform_device *pdev) | 216 | static int __devinit mv_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index e72b523c79a..b4dbf3a319b 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
143 | u8 reg; | 143 | u8 reg; |
144 | 144 | ||
145 | switch (cmd) { | 145 | switch (cmd) { |
146 | case RTC_AIE_OFF: | ||
147 | case RTC_AIE_ON: | ||
148 | case RTC_UIE_OFF: | 146 | case RTC_UIE_OFF: |
149 | case RTC_UIE_ON: | 147 | case RTC_UIE_ON: |
150 | break; | 148 | break; |
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
156 | rtc_wait_not_busy(); | 154 | rtc_wait_not_busy(); |
157 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); | 155 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); |
158 | switch (cmd) { | 156 | switch (cmd) { |
159 | /* AIE = Alarm Interrupt Enable */ | ||
160 | case RTC_AIE_OFF: | ||
161 | reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
162 | break; | ||
163 | case RTC_AIE_ON: | ||
164 | reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
165 | break; | ||
166 | /* UIE = Update Interrupt Enable (1/second) */ | 157 | /* UIE = Update Interrupt Enable (1/second) */ |
167 | case RTC_UIE_OFF: | 158 | case RTC_UIE_OFF: |
168 | reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; | 159 | reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; |
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
182 | #define omap_rtc_ioctl NULL | 173 | #define omap_rtc_ioctl NULL |
183 | #endif | 174 | #endif |
184 | 175 | ||
176 | static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
177 | { | ||
178 | u8 reg; | ||
179 | |||
180 | local_irq_disable(); | ||
181 | rtc_wait_not_busy(); | ||
182 | reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); | ||
183 | if (enabled) | ||
184 | reg |= OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
185 | else | ||
186 | reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM; | ||
187 | rtc_wait_not_busy(); | ||
188 | rtc_write(reg, OMAP_RTC_INTERRUPTS_REG); | ||
189 | local_irq_enable(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
185 | /* this hardware doesn't support "don't care" alarm fields */ | 194 | /* this hardware doesn't support "don't care" alarm fields */ |
186 | static int tm2bcd(struct rtc_time *tm) | 195 | static int tm2bcd(struct rtc_time *tm) |
187 | { | 196 | { |
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = { | |||
309 | .set_time = omap_rtc_set_time, | 318 | .set_time = omap_rtc_set_time, |
310 | .read_alarm = omap_rtc_read_alarm, | 319 | .read_alarm = omap_rtc_read_alarm, |
311 | .set_alarm = omap_rtc_set_alarm, | 320 | .set_alarm = omap_rtc_set_alarm, |
321 | .alarm_irq_enable = omap_rtc_alarm_irq_enable, | ||
312 | }; | 322 | }; |
313 | 323 | ||
314 | static int omap_rtc_alarm; | 324 | static int omap_rtc_alarm; |
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 36eb6618446..694da39b6dd 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c | |||
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv, | |||
76 | static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, | 76 | static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, |
77 | unsigned int reg) | 77 | unsigned int reg) |
78 | { | 78 | { |
79 | return __raw_writel(val, &priv->regs[reg]); | 79 | __raw_writel(val, &priv->regs[reg]); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void rp5c01_lock(struct rp5c01_priv *priv) | 82 | static void rp5c01_lock(struct rp5c01_priv *priv) |
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index dd14e202c2c..6aaa1550e3b 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c | |||
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
299 | if (rs5c->type == rtc_rs5c372a | 299 | if (rs5c->type == rtc_rs5c372a |
300 | && (buf & RS5C372A_CTRL1_SL1)) | 300 | && (buf & RS5C372A_CTRL1_SL1)) |
301 | return -ENOIOCTLCMD; | 301 | return -ENOIOCTLCMD; |
302 | case RTC_AIE_OFF: | ||
303 | case RTC_AIE_ON: | ||
304 | /* these irq management calls only make sense for chips | ||
305 | * which are wired up to an IRQ. | ||
306 | */ | ||
307 | if (!rs5c->has_irq) | ||
308 | return -ENOIOCTLCMD; | ||
309 | break; | ||
310 | default: | 302 | default: |
311 | return -ENOIOCTLCMD; | 303 | return -ENOIOCTLCMD; |
312 | } | 304 | } |
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
317 | 309 | ||
318 | addr = RS5C_ADDR(RS5C_REG_CTRL1); | 310 | addr = RS5C_ADDR(RS5C_REG_CTRL1); |
319 | switch (cmd) { | 311 | switch (cmd) { |
320 | case RTC_AIE_OFF: /* alarm off */ | ||
321 | buf &= ~RS5C_CTRL1_AALE; | ||
322 | break; | ||
323 | case RTC_AIE_ON: /* alarm on */ | ||
324 | buf |= RS5C_CTRL1_AALE; | ||
325 | break; | ||
326 | case RTC_UIE_OFF: /* update off */ | 312 | case RTC_UIE_OFF: /* update off */ |
327 | buf &= ~RS5C_CTRL1_CT_MASK; | 313 | buf &= ~RS5C_CTRL1_CT_MASK; |
328 | break; | 314 | break; |
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
347 | #endif | 333 | #endif |
348 | 334 | ||
349 | 335 | ||
336 | static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
337 | { | ||
338 | struct i2c_client *client = to_i2c_client(dev); | ||
339 | struct rs5c372 *rs5c = i2c_get_clientdata(client); | ||
340 | unsigned char buf; | ||
341 | int status, addr; | ||
342 | |||
343 | buf = rs5c->regs[RS5C_REG_CTRL1]; | ||
344 | |||
345 | if (!rs5c->has_irq) | ||
346 | return -EINVAL; | ||
347 | |||
348 | status = rs5c_get_regs(rs5c); | ||
349 | if (status < 0) | ||
350 | return status; | ||
351 | |||
352 | addr = RS5C_ADDR(RS5C_REG_CTRL1); | ||
353 | if (enabled) | ||
354 | buf |= RS5C_CTRL1_AALE; | ||
355 | else | ||
356 | buf &= ~RS5C_CTRL1_AALE; | ||
357 | |||
358 | if (i2c_smbus_write_byte_data(client, addr, buf) < 0) { | ||
359 | printk(KERN_WARNING "%s: can't update alarm\n", | ||
360 | rs5c->rtc->name); | ||
361 | status = -EIO; | ||
362 | } else | ||
363 | rs5c->regs[RS5C_REG_CTRL1] = buf; | ||
364 | |||
365 | return status; | ||
366 | } | ||
367 | |||
368 | |||
350 | /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, | 369 | /* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, |
351 | * which only exposes a polled programming interface; and since | 370 | * which only exposes a polled programming interface; and since |
352 | * these calls map directly to those EFI requests; we don't demand | 371 | * these calls map directly to those EFI requests; we don't demand |
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = { | |||
466 | .set_time = rs5c372_rtc_set_time, | 485 | .set_time = rs5c372_rtc_set_time, |
467 | .read_alarm = rs5c_read_alarm, | 486 | .read_alarm = rs5c_read_alarm, |
468 | .set_alarm = rs5c_set_alarm, | 487 | .set_alarm = rs5c_set_alarm, |
488 | .alarm_irq_enable = rs5c_rtc_alarm_irq_enable, | ||
469 | }; | 489 | }; |
470 | 490 | ||
471 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) | 491 | #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) |
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 88ea52b8647..5dfe5ffcb0d 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
314 | unsigned long arg) | 314 | unsigned long arg) |
315 | { | 315 | { |
316 | switch (cmd) { | 316 | switch (cmd) { |
317 | case RTC_AIE_OFF: | ||
318 | spin_lock_irq(&sa1100_rtc_lock); | ||
319 | RTSR &= ~RTSR_ALE; | ||
320 | spin_unlock_irq(&sa1100_rtc_lock); | ||
321 | return 0; | ||
322 | case RTC_AIE_ON: | ||
323 | spin_lock_irq(&sa1100_rtc_lock); | ||
324 | RTSR |= RTSR_ALE; | ||
325 | spin_unlock_irq(&sa1100_rtc_lock); | ||
326 | return 0; | ||
327 | case RTC_UIE_OFF: | 317 | case RTC_UIE_OFF: |
328 | spin_lock_irq(&sa1100_rtc_lock); | 318 | spin_lock_irq(&sa1100_rtc_lock); |
329 | RTSR &= ~RTSR_HZE; | 319 | RTSR &= ~RTSR_HZE; |
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
338 | return -ENOIOCTLCMD; | 328 | return -ENOIOCTLCMD; |
339 | } | 329 | } |
340 | 330 | ||
331 | static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
332 | { | ||
333 | spin_lock_irq(&sa1100_rtc_lock); | ||
334 | if (enabled) | ||
335 | RTSR |= RTSR_ALE; | ||
336 | else | ||
337 | RTSR &= ~RTSR_ALE; | ||
338 | spin_unlock_irq(&sa1100_rtc_lock); | ||
339 | return 0; | ||
340 | } | ||
341 | |||
341 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) | 342 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) |
342 | { | 343 | { |
343 | rtc_time_to_tm(RCNR, tm); | 344 | rtc_time_to_tm(RCNR, tm); |
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = { | |||
410 | .proc = sa1100_rtc_proc, | 411 | .proc = sa1100_rtc_proc, |
411 | .irq_set_freq = sa1100_irq_set_freq, | 412 | .irq_set_freq = sa1100_irq_set_freq, |
412 | .irq_set_state = sa1100_irq_set_state, | 413 | .irq_set_state = sa1100_irq_set_state, |
414 | .alarm_irq_enable = sa1100_rtc_alarm_irq_enable, | ||
413 | }; | 415 | }; |
414 | 416 | ||
415 | static int sa1100_rtc_probe(struct platform_device *pdev) | 417 | static int sa1100_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 06e41ed9323..93314a9e7fa 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c | |||
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
350 | unsigned int ret = 0; | 350 | unsigned int ret = 0; |
351 | 351 | ||
352 | switch (cmd) { | 352 | switch (cmd) { |
353 | case RTC_AIE_OFF: | ||
354 | case RTC_AIE_ON: | ||
355 | sh_rtc_setaie(dev, cmd == RTC_AIE_ON); | ||
356 | break; | ||
357 | case RTC_UIE_OFF: | 353 | case RTC_UIE_OFF: |
358 | rtc->periodic_freq &= ~PF_OXS; | 354 | rtc->periodic_freq &= ~PF_OXS; |
359 | sh_rtc_setcie(dev, 0); | 355 | sh_rtc_setcie(dev, 0); |
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | |||
369 | return ret; | 365 | return ret; |
370 | } | 366 | } |
371 | 367 | ||
368 | static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
369 | { | ||
370 | sh_rtc_setaie(dev, enabled); | ||
371 | return 0; | ||
372 | } | ||
373 | |||
372 | static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) | 374 | static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) |
373 | { | 375 | { |
374 | struct platform_device *pdev = to_platform_device(dev); | 376 | struct platform_device *pdev = to_platform_device(dev); |
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = { | |||
604 | .irq_set_state = sh_rtc_irq_set_state, | 606 | .irq_set_state = sh_rtc_irq_set_state, |
605 | .irq_set_freq = sh_rtc_irq_set_freq, | 607 | .irq_set_freq = sh_rtc_irq_set_freq, |
606 | .proc = sh_rtc_proc, | 608 | .proc = sh_rtc_proc, |
609 | .alarm_irq_enable = sh_rtc_alarm_irq_enable, | ||
607 | }; | 610 | }; |
608 | 611 | ||
609 | static int __init sh_rtc_probe(struct platform_device *pdev) | 612 | static int __init sh_rtc_probe(struct platform_device *pdev) |
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index 51725f7755b..a82d6fe9707 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c | |||
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq) | |||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | static int test_rtc_ioctl(struct device *dev, unsigned int cmd, | 53 | static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) |
54 | unsigned long arg) | ||
55 | { | 54 | { |
56 | /* We do support interrupts, they're generated | 55 | return 0; |
57 | * using the sysfs interface. | ||
58 | */ | ||
59 | switch (cmd) { | ||
60 | case RTC_PIE_ON: | ||
61 | case RTC_PIE_OFF: | ||
62 | case RTC_UIE_ON: | ||
63 | case RTC_UIE_OFF: | ||
64 | case RTC_AIE_ON: | ||
65 | case RTC_AIE_OFF: | ||
66 | return 0; | ||
67 | |||
68 | default: | ||
69 | return -ENOIOCTLCMD; | ||
70 | } | ||
71 | } | 56 | } |
72 | 57 | ||
73 | static const struct rtc_class_ops test_rtc_ops = { | 58 | static const struct rtc_class_ops test_rtc_ops = { |
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = { | |||
76 | .read_alarm = test_rtc_read_alarm, | 61 | .read_alarm = test_rtc_read_alarm, |
77 | .set_alarm = test_rtc_set_alarm, | 62 | .set_alarm = test_rtc_set_alarm, |
78 | .set_mmss = test_rtc_set_mmss, | 63 | .set_mmss = test_rtc_set_mmss, |
79 | .ioctl = test_rtc_ioctl, | 64 | .alarm_irq_enable = test_rtc_alarm_irq_enable, |
80 | }; | 65 | }; |
81 | 66 | ||
82 | static ssize_t test_irq_show(struct device *dev, | 67 | static ssize_t test_irq_show(struct device *dev, |
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index c3244244e8c..769190ac6d1 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled) | |||
240 | static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) | 240 | static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) |
241 | { | 241 | { |
242 | switch (cmd) { | 242 | switch (cmd) { |
243 | case RTC_AIE_ON: | ||
244 | spin_lock_irq(&rtc_lock); | ||
245 | |||
246 | if (!alarm_enabled) { | ||
247 | enable_irq(aie_irq); | ||
248 | alarm_enabled = 1; | ||
249 | } | ||
250 | |||
251 | spin_unlock_irq(&rtc_lock); | ||
252 | break; | ||
253 | case RTC_AIE_OFF: | ||
254 | spin_lock_irq(&rtc_lock); | ||
255 | |||
256 | if (alarm_enabled) { | ||
257 | disable_irq(aie_irq); | ||
258 | alarm_enabled = 0; | ||
259 | } | ||
260 | |||
261 | spin_unlock_irq(&rtc_lock); | ||
262 | break; | ||
263 | case RTC_EPOCH_READ: | 243 | case RTC_EPOCH_READ: |
264 | return put_user(epoch, (unsigned long __user *)arg); | 244 | return put_user(epoch, (unsigned long __user *)arg); |
265 | case RTC_EPOCH_SET: | 245 | case RTC_EPOCH_SET: |
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long | |||
275 | return 0; | 255 | return 0; |
276 | } | 256 | } |
277 | 257 | ||
258 | static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
259 | { | ||
260 | spin_lock_irq(&rtc_lock); | ||
261 | if (enabled) { | ||
262 | if (!alarm_enabled) { | ||
263 | enable_irq(aie_irq); | ||
264 | alarm_enabled = 1; | ||
265 | } | ||
266 | } else { | ||
267 | if (alarm_enabled) { | ||
268 | disable_irq(aie_irq); | ||
269 | alarm_enabled = 0; | ||
270 | } | ||
271 | } | ||
272 | spin_unlock_irq(&rtc_lock); | ||
273 | return 0; | ||
274 | } | ||
275 | |||
278 | static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) | 276 | static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) |
279 | { | 277 | { |
280 | struct platform_device *pdev = (struct platform_device *)dev_id; | 278 | struct platform_device *pdev = (struct platform_device *)dev_id; |
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 8e0dd254eb1..81f13958e75 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c | |||
@@ -571,6 +571,7 @@ struct sysrq_state { | |||
571 | unsigned int alt_use; | 571 | unsigned int alt_use; |
572 | bool active; | 572 | bool active; |
573 | bool need_reinject; | 573 | bool need_reinject; |
574 | bool reinjecting; | ||
574 | }; | 575 | }; |
575 | 576 | ||
576 | static void sysrq_reinject_alt_sysrq(struct work_struct *work) | 577 | static void sysrq_reinject_alt_sysrq(struct work_struct *work) |
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) | |||
581 | unsigned int alt_code = sysrq->alt_use; | 582 | unsigned int alt_code = sysrq->alt_use; |
582 | 583 | ||
583 | if (sysrq->need_reinject) { | 584 | if (sysrq->need_reinject) { |
585 | /* we do not want the assignment to be reordered */ | ||
586 | sysrq->reinjecting = true; | ||
587 | mb(); | ||
588 | |||
584 | /* Simulate press and release of Alt + SysRq */ | 589 | /* Simulate press and release of Alt + SysRq */ |
585 | input_inject_event(handle, EV_KEY, alt_code, 1); | 590 | input_inject_event(handle, EV_KEY, alt_code, 1); |
586 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); | 591 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); |
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work) | |||
589 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); | 594 | input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); |
590 | input_inject_event(handle, EV_KEY, alt_code, 0); | 595 | input_inject_event(handle, EV_KEY, alt_code, 0); |
591 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); | 596 | input_inject_event(handle, EV_SYN, SYN_REPORT, 1); |
597 | |||
598 | mb(); | ||
599 | sysrq->reinjecting = false; | ||
592 | } | 600 | } |
593 | } | 601 | } |
594 | 602 | ||
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle, | |||
599 | bool was_active = sysrq->active; | 607 | bool was_active = sysrq->active; |
600 | bool suppress; | 608 | bool suppress; |
601 | 609 | ||
610 | /* | ||
611 | * Do not filter anything if we are in the process of re-injecting | ||
612 | * Alt+SysRq combination. | ||
613 | */ | ||
614 | if (sysrq->reinjecting) | ||
615 | return false; | ||
616 | |||
602 | switch (type) { | 617 | switch (type) { |
603 | 618 | ||
604 | case EV_SYN: | 619 | case EV_SYN: |
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle, | |||
629 | sysrq->alt_use = sysrq->alt; | 644 | sysrq->alt_use = sysrq->alt; |
630 | /* | 645 | /* |
631 | * If nothing else will be pressed we'll need | 646 | * If nothing else will be pressed we'll need |
632 | * to * re-inject Alt-SysRq keysroke. | 647 | * to re-inject Alt-SysRq keysroke. |
633 | */ | 648 | */ |
634 | sysrq->need_reinject = true; | 649 | sysrq->need_reinject = true; |
635 | } | 650 | } |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index fdce8799b98..e1aa8d607bc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) | |||
359 | 359 | ||
360 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 360 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
361 | 361 | ||
362 | if (page->private == EXTENT_PAGE_PRIVATE) | 362 | if (page->private == EXTENT_PAGE_PRIVATE) { |
363 | WARN_ON(1); | ||
363 | goto out; | 364 | goto out; |
364 | if (!page->private) | 365 | } |
366 | if (!page->private) { | ||
367 | WARN_ON(1); | ||
365 | goto out; | 368 | goto out; |
369 | } | ||
366 | len = page->private >> 2; | 370 | len = page->private >> 2; |
367 | WARN_ON(len == 0); | 371 | WARN_ON(len == 0); |
368 | 372 | ||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4e7e012ad66..f3c96fc0143 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -6583,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, | |||
6583 | u64 end = start + extent_key->offset - 1; | 6583 | u64 end = start + extent_key->offset - 1; |
6584 | 6584 | ||
6585 | em = alloc_extent_map(GFP_NOFS); | 6585 | em = alloc_extent_map(GFP_NOFS); |
6586 | BUG_ON(!em || IS_ERR(em)); | 6586 | BUG_ON(!em); |
6587 | 6587 | ||
6588 | em->start = start; | 6588 | em->start = start; |
6589 | em->len = extent_key->offset; | 6589 | em->len = extent_key->offset; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5e76a474cb7..92ac5192c51 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page) | |||
1946 | 1946 | ||
1947 | static void set_page_extent_head(struct page *page, unsigned long len) | 1947 | static void set_page_extent_head(struct page *page, unsigned long len) |
1948 | { | 1948 | { |
1949 | WARN_ON(!PagePrivate(page)); | ||
1949 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); | 1950 | set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); |
1950 | } | 1951 | } |
1951 | 1952 | ||
@@ -2821,9 +2822,17 @@ int try_release_extent_state(struct extent_map_tree *map, | |||
2821 | * at this point we can safely clear everything except the | 2822 | * at this point we can safely clear everything except the |
2822 | * locked bit and the nodatasum bit | 2823 | * locked bit and the nodatasum bit |
2823 | */ | 2824 | */ |
2824 | clear_extent_bit(tree, start, end, | 2825 | ret = clear_extent_bit(tree, start, end, |
2825 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), | 2826 | ~(EXTENT_LOCKED | EXTENT_NODATASUM), |
2826 | 0, 0, NULL, mask); | 2827 | 0, 0, NULL, mask); |
2828 | |||
2829 | /* if clear_extent_bit failed for enomem reasons, | ||
2830 | * we can't allow the release to continue. | ||
2831 | */ | ||
2832 | if (ret < 0) | ||
2833 | ret = 0; | ||
2834 | else | ||
2835 | ret = 1; | ||
2827 | } | 2836 | } |
2828 | return ret; | 2837 | return ret; |
2829 | } | 2838 | } |
@@ -3194,7 +3203,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3194 | } | 3203 | } |
3195 | if (!PageUptodate(p)) | 3204 | if (!PageUptodate(p)) |
3196 | uptodate = 0; | 3205 | uptodate = 0; |
3197 | unlock_page(p); | 3206 | |
3207 | /* | ||
3208 | * see below about how we avoid a nasty race with release page | ||
3209 | * and why we unlock later | ||
3210 | */ | ||
3211 | if (i != 0) | ||
3212 | unlock_page(p); | ||
3198 | } | 3213 | } |
3199 | if (uptodate) | 3214 | if (uptodate) |
3200 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 3215 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
@@ -3218,9 +3233,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3218 | atomic_inc(&eb->refs); | 3233 | atomic_inc(&eb->refs); |
3219 | spin_unlock(&tree->buffer_lock); | 3234 | spin_unlock(&tree->buffer_lock); |
3220 | radix_tree_preload_end(); | 3235 | radix_tree_preload_end(); |
3236 | |||
3237 | /* | ||
3238 | * there is a race where release page may have | ||
3239 | * tried to find this extent buffer in the radix | ||
3240 | * but failed. It will tell the VM it is safe to | ||
3241 | * reclaim the, and it will clear the page private bit. | ||
3242 | * We must make sure to set the page private bit properly | ||
3243 | * after the extent buffer is in the radix tree so | ||
3244 | * it doesn't get lost | ||
3245 | */ | ||
3246 | set_page_extent_mapped(eb->first_page); | ||
3247 | set_page_extent_head(eb->first_page, eb->len); | ||
3248 | if (!page0) | ||
3249 | unlock_page(eb->first_page); | ||
3221 | return eb; | 3250 | return eb; |
3222 | 3251 | ||
3223 | free_eb: | 3252 | free_eb: |
3253 | if (eb->first_page && !page0) | ||
3254 | unlock_page(eb->first_page); | ||
3255 | |||
3224 | if (!atomic_dec_and_test(&eb->refs)) | 3256 | if (!atomic_dec_and_test(&eb->refs)) |
3225 | return exists; | 3257 | return exists; |
3226 | btrfs_release_extent_buffer(eb); | 3258 | btrfs_release_extent_buffer(eb); |
@@ -3271,10 +3303,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3271 | continue; | 3303 | continue; |
3272 | 3304 | ||
3273 | lock_page(page); | 3305 | lock_page(page); |
3306 | WARN_ON(!PagePrivate(page)); | ||
3307 | |||
3308 | set_page_extent_mapped(page); | ||
3274 | if (i == 0) | 3309 | if (i == 0) |
3275 | set_page_extent_head(page, eb->len); | 3310 | set_page_extent_head(page, eb->len); |
3276 | else | ||
3277 | set_page_private(page, EXTENT_PAGE_PRIVATE); | ||
3278 | 3311 | ||
3279 | clear_page_dirty_for_io(page); | 3312 | clear_page_dirty_for_io(page); |
3280 | spin_lock_irq(&page->mapping->tree_lock); | 3313 | spin_lock_irq(&page->mapping->tree_lock); |
@@ -3464,6 +3497,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
3464 | 3497 | ||
3465 | for (i = start_i; i < num_pages; i++) { | 3498 | for (i = start_i; i < num_pages; i++) { |
3466 | page = extent_buffer_page(eb, i); | 3499 | page = extent_buffer_page(eb, i); |
3500 | |||
3501 | WARN_ON(!PagePrivate(page)); | ||
3502 | |||
3503 | set_page_extent_mapped(page); | ||
3504 | if (i == 0) | ||
3505 | set_page_extent_head(page, eb->len); | ||
3506 | |||
3467 | if (inc_all_pages) | 3507 | if (inc_all_pages) |
3468 | page_cache_get(page); | 3508 | page_cache_get(page); |
3469 | if (!PageUptodate(page)) { | 3509 | if (!PageUptodate(page)) { |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index b0e1fce1253..2b6c12e983b 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask) | |||
51 | { | 51 | { |
52 | struct extent_map *em; | 52 | struct extent_map *em; |
53 | em = kmem_cache_alloc(extent_map_cache, mask); | 53 | em = kmem_cache_alloc(extent_map_cache, mask); |
54 | if (!em || IS_ERR(em)) | 54 | if (!em) |
55 | return em; | 55 | return NULL; |
56 | em->in_tree = 0; | 56 | em->in_tree = 0; |
57 | em->flags = 0; | 57 | em->flags = 0; |
58 | em->compress_type = BTRFS_COMPRESS_NONE; | 58 | em->compress_type = BTRFS_COMPRESS_NONE; |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c1d3a818731..7084140d594 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -186,6 +186,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
186 | split = alloc_extent_map(GFP_NOFS); | 186 | split = alloc_extent_map(GFP_NOFS); |
187 | if (!split2) | 187 | if (!split2) |
188 | split2 = alloc_extent_map(GFP_NOFS); | 188 | split2 = alloc_extent_map(GFP_NOFS); |
189 | BUG_ON(!split || !split2); | ||
189 | 190 | ||
190 | write_lock(&em_tree->lock); | 191 | write_lock(&em_tree->lock); |
191 | em = lookup_extent_mapping(em_tree, start, len); | 192 | em = lookup_extent_mapping(em_tree, start, len); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bcc461a9695..fb9bd7832b6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -644,6 +644,7 @@ retry: | |||
644 | async_extent->ram_size - 1, 0); | 644 | async_extent->ram_size - 1, 0); |
645 | 645 | ||
646 | em = alloc_extent_map(GFP_NOFS); | 646 | em = alloc_extent_map(GFP_NOFS); |
647 | BUG_ON(!em); | ||
647 | em->start = async_extent->start; | 648 | em->start = async_extent->start; |
648 | em->len = async_extent->ram_size; | 649 | em->len = async_extent->ram_size; |
649 | em->orig_start = em->start; | 650 | em->orig_start = em->start; |
@@ -820,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
820 | BUG_ON(ret); | 821 | BUG_ON(ret); |
821 | 822 | ||
822 | em = alloc_extent_map(GFP_NOFS); | 823 | em = alloc_extent_map(GFP_NOFS); |
824 | BUG_ON(!em); | ||
823 | em->start = start; | 825 | em->start = start; |
824 | em->orig_start = em->start; | 826 | em->orig_start = em->start; |
825 | ram_size = ins.offset; | 827 | ram_size = ins.offset; |
@@ -1169,6 +1171,7 @@ out_check: | |||
1169 | struct extent_map_tree *em_tree; | 1171 | struct extent_map_tree *em_tree; |
1170 | em_tree = &BTRFS_I(inode)->extent_tree; | 1172 | em_tree = &BTRFS_I(inode)->extent_tree; |
1171 | em = alloc_extent_map(GFP_NOFS); | 1173 | em = alloc_extent_map(GFP_NOFS); |
1174 | BUG_ON(!em); | ||
1172 | em->start = cur_offset; | 1175 | em->start = cur_offset; |
1173 | em->orig_start = em->start; | 1176 | em->orig_start = em->start; |
1174 | em->len = num_bytes; | 1177 | em->len = num_bytes; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 02d224e8c83..be2d4f6aaa5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2208,7 +2208,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2208 | int num_types = 4; | 2208 | int num_types = 4; |
2209 | int alloc_size; | 2209 | int alloc_size; |
2210 | int ret = 0; | 2210 | int ret = 0; |
2211 | int slot_count = 0; | 2211 | u64 slot_count = 0; |
2212 | int i, c; | 2212 | int i, c; |
2213 | 2213 | ||
2214 | if (copy_from_user(&space_args, | 2214 | if (copy_from_user(&space_args, |
@@ -2247,7 +2247,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2247 | goto out; | 2247 | goto out; |
2248 | } | 2248 | } |
2249 | 2249 | ||
2250 | slot_count = min_t(int, space_args.space_slots, slot_count); | 2250 | slot_count = min_t(u64, space_args.space_slots, slot_count); |
2251 | 2251 | ||
2252 | alloc_size = sizeof(*dest) * slot_count; | 2252 | alloc_size = sizeof(*dest) * slot_count; |
2253 | 2253 | ||
@@ -2267,6 +2267,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2267 | for (i = 0; i < num_types; i++) { | 2267 | for (i = 0; i < num_types; i++) { |
2268 | struct btrfs_space_info *tmp; | 2268 | struct btrfs_space_info *tmp; |
2269 | 2269 | ||
2270 | if (!slot_count) | ||
2271 | break; | ||
2272 | |||
2270 | info = NULL; | 2273 | info = NULL; |
2271 | rcu_read_lock(); | 2274 | rcu_read_lock(); |
2272 | list_for_each_entry_rcu(tmp, &root->fs_info->space_info, | 2275 | list_for_each_entry_rcu(tmp, &root->fs_info->space_info, |
@@ -2288,7 +2291,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) | |||
2288 | memcpy(dest, &space, sizeof(space)); | 2291 | memcpy(dest, &space, sizeof(space)); |
2289 | dest++; | 2292 | dest++; |
2290 | space_args.total_spaces++; | 2293 | space_args.total_spaces++; |
2294 | slot_count--; | ||
2291 | } | 2295 | } |
2296 | if (!slot_count) | ||
2297 | break; | ||
2292 | } | 2298 | } |
2293 | up_read(&info->groups_sem); | 2299 | up_read(&info->groups_sem); |
2294 | } | 2300 | } |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1f5556acb53..0825e4ed944 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, | |||
1157 | new_node->bytenr = dest->node->start; | 1157 | new_node->bytenr = dest->node->start; |
1158 | new_node->level = node->level; | 1158 | new_node->level = node->level; |
1159 | new_node->lowest = node->lowest; | 1159 | new_node->lowest = node->lowest; |
1160 | new_node->checked = 1; | ||
1160 | new_node->root = dest; | 1161 | new_node->root = dest; |
1161 | 1162 | ||
1162 | if (!node->lowest) { | 1163 | if (!node->lowest) { |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2636a051e4b..af7dbca1527 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1605,12 +1605,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1605 | 1605 | ||
1606 | ret = find_next_devid(root, &device->devid); | 1606 | ret = find_next_devid(root, &device->devid); |
1607 | if (ret) { | 1607 | if (ret) { |
1608 | kfree(device->name); | ||
1608 | kfree(device); | 1609 | kfree(device); |
1609 | goto error; | 1610 | goto error; |
1610 | } | 1611 | } |
1611 | 1612 | ||
1612 | trans = btrfs_start_transaction(root, 0); | 1613 | trans = btrfs_start_transaction(root, 0); |
1613 | if (IS_ERR(trans)) { | 1614 | if (IS_ERR(trans)) { |
1615 | kfree(device->name); | ||
1614 | kfree(device); | 1616 | kfree(device); |
1615 | ret = PTR_ERR(trans); | 1617 | ret = PTR_ERR(trans); |
1616 | goto error; | 1618 | goto error; |
diff --git a/fs/namei.c b/fs/namei.c index ec4b2d0190a..9e701e28a32 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry | |||
455 | struct fs_struct *fs = current->fs; | 455 | struct fs_struct *fs = current->fs; |
456 | struct dentry *parent = nd->path.dentry; | 456 | struct dentry *parent = nd->path.dentry; |
457 | 457 | ||
458 | /* | ||
459 | * It can be possible to revalidate the dentry that we started | ||
460 | * the path walk with. force_reval_path may also revalidate the | ||
461 | * dentry already committed to the nameidata. | ||
462 | */ | ||
463 | if (unlikely(parent == dentry)) | ||
464 | return nameidata_drop_rcu(nd); | ||
465 | |||
466 | BUG_ON(!(nd->flags & LOOKUP_RCU)); | 458 | BUG_ON(!(nd->flags & LOOKUP_RCU)); |
467 | if (nd->root.mnt) { | 459 | if (nd->root.mnt) { |
468 | spin_lock(&fs->lock); | 460 | spin_lock(&fs->lock); |
@@ -571,33 +563,15 @@ void release_open_intent(struct nameidata *nd) | |||
571 | } | 563 | } |
572 | } | 564 | } |
573 | 565 | ||
574 | /* | 566 | static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) |
575 | * Call d_revalidate and handle filesystems that request rcu-walk | ||
576 | * to be dropped. This may be called and return in rcu-walk mode, | ||
577 | * regardless of success or error. If -ECHILD is returned, the caller | ||
578 | * must return -ECHILD back up the path walk stack so path walk may | ||
579 | * be restarted in ref-walk mode. | ||
580 | */ | ||
581 | static int d_revalidate(struct dentry *dentry, struct nameidata *nd) | ||
582 | { | 567 | { |
583 | int status; | 568 | return dentry->d_op->d_revalidate(dentry, nd); |
584 | |||
585 | status = dentry->d_op->d_revalidate(dentry, nd); | ||
586 | if (status == -ECHILD) { | ||
587 | if (nameidata_dentry_drop_rcu(nd, dentry)) | ||
588 | return status; | ||
589 | status = dentry->d_op->d_revalidate(dentry, nd); | ||
590 | } | ||
591 | |||
592 | return status; | ||
593 | } | 569 | } |
594 | 570 | ||
595 | static inline struct dentry * | 571 | static struct dentry * |
596 | do_revalidate(struct dentry *dentry, struct nameidata *nd) | 572 | do_revalidate(struct dentry *dentry, struct nameidata *nd) |
597 | { | 573 | { |
598 | int status; | 574 | int status = d_revalidate(dentry, nd); |
599 | |||
600 | status = d_revalidate(dentry, nd); | ||
601 | if (unlikely(status <= 0)) { | 575 | if (unlikely(status <= 0)) { |
602 | /* | 576 | /* |
603 | * The dentry failed validation. | 577 | * The dentry failed validation. |
@@ -606,24 +580,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
606 | * to return a fail status. | 580 | * to return a fail status. |
607 | */ | 581 | */ |
608 | if (status < 0) { | 582 | if (status < 0) { |
609 | /* If we're in rcu-walk, we don't have a ref */ | 583 | dput(dentry); |
610 | if (!(nd->flags & LOOKUP_RCU)) | ||
611 | dput(dentry); | ||
612 | dentry = ERR_PTR(status); | 584 | dentry = ERR_PTR(status); |
613 | 585 | } else if (!d_invalidate(dentry)) { | |
614 | } else { | 586 | dput(dentry); |
615 | /* Don't d_invalidate in rcu-walk mode */ | 587 | dentry = NULL; |
616 | if (nameidata_dentry_drop_rcu_maybe(nd, dentry)) | ||
617 | return ERR_PTR(-ECHILD); | ||
618 | if (!d_invalidate(dentry)) { | ||
619 | dput(dentry); | ||
620 | dentry = NULL; | ||
621 | } | ||
622 | } | 588 | } |
623 | } | 589 | } |
624 | return dentry; | 590 | return dentry; |
625 | } | 591 | } |
626 | 592 | ||
593 | static inline struct dentry * | ||
594 | do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd) | ||
595 | { | ||
596 | int status = d_revalidate(dentry, nd); | ||
597 | if (likely(status > 0)) | ||
598 | return dentry; | ||
599 | if (status == -ECHILD) { | ||
600 | if (nameidata_dentry_drop_rcu(nd, dentry)) | ||
601 | return ERR_PTR(-ECHILD); | ||
602 | return do_revalidate(dentry, nd); | ||
603 | } | ||
604 | if (status < 0) | ||
605 | return ERR_PTR(status); | ||
606 | /* Don't d_invalidate in rcu-walk mode */ | ||
607 | if (nameidata_dentry_drop_rcu(nd, dentry)) | ||
608 | return ERR_PTR(-ECHILD); | ||
609 | if (!d_invalidate(dentry)) { | ||
610 | dput(dentry); | ||
611 | dentry = NULL; | ||
612 | } | ||
613 | return dentry; | ||
614 | } | ||
615 | |||
627 | static inline int need_reval_dot(struct dentry *dentry) | 616 | static inline int need_reval_dot(struct dentry *dentry) |
628 | { | 617 | { |
629 | if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) | 618 | if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) |
@@ -668,9 +657,6 @@ force_reval_path(struct path *path, struct nameidata *nd) | |||
668 | return 0; | 657 | return 0; |
669 | 658 | ||
670 | if (!status) { | 659 | if (!status) { |
671 | /* Don't d_invalidate in rcu-walk mode */ | ||
672 | if (nameidata_drop_rcu(nd)) | ||
673 | return -ECHILD; | ||
674 | d_invalidate(dentry); | 660 | d_invalidate(dentry); |
675 | status = -ESTALE; | 661 | status = -ESTALE; |
676 | } | 662 | } |
@@ -777,6 +763,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) | |||
777 | int error; | 763 | int error; |
778 | struct dentry *dentry = link->dentry; | 764 | struct dentry *dentry = link->dentry; |
779 | 765 | ||
766 | BUG_ON(nd->flags & LOOKUP_RCU); | ||
767 | |||
780 | touch_atime(link->mnt, dentry); | 768 | touch_atime(link->mnt, dentry); |
781 | nd_set_link(nd, NULL); | 769 | nd_set_link(nd, NULL); |
782 | 770 | ||
@@ -811,6 +799,11 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) | |||
811 | { | 799 | { |
812 | void *cookie; | 800 | void *cookie; |
813 | int err = -ELOOP; | 801 | int err = -ELOOP; |
802 | |||
803 | /* We drop rcu-walk here */ | ||
804 | if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) | ||
805 | return -ECHILD; | ||
806 | |||
814 | if (current->link_count >= MAX_NESTED_LINKS) | 807 | if (current->link_count >= MAX_NESTED_LINKS) |
815 | goto loop; | 808 | goto loop; |
816 | if (current->total_link_count >= 40) | 809 | if (current->total_link_count >= 40) |
@@ -1255,9 +1248,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, | |||
1255 | return -ECHILD; | 1248 | return -ECHILD; |
1256 | 1249 | ||
1257 | nd->seq = seq; | 1250 | nd->seq = seq; |
1258 | if (dentry->d_flags & DCACHE_OP_REVALIDATE) | 1251 | if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { |
1259 | goto need_revalidate; | 1252 | dentry = do_revalidate_rcu(dentry, nd); |
1260 | done2: | 1253 | if (!dentry) |
1254 | goto need_lookup; | ||
1255 | if (IS_ERR(dentry)) | ||
1256 | goto fail; | ||
1257 | if (!(nd->flags & LOOKUP_RCU)) | ||
1258 | goto done; | ||
1259 | } | ||
1261 | path->mnt = mnt; | 1260 | path->mnt = mnt; |
1262 | path->dentry = dentry; | 1261 | path->dentry = dentry; |
1263 | if (likely(__follow_mount_rcu(nd, path, inode, false))) | 1262 | if (likely(__follow_mount_rcu(nd, path, inode, false))) |
@@ -1270,8 +1269,13 @@ done2: | |||
1270 | if (!dentry) | 1269 | if (!dentry) |
1271 | goto need_lookup; | 1270 | goto need_lookup; |
1272 | found: | 1271 | found: |
1273 | if (dentry->d_flags & DCACHE_OP_REVALIDATE) | 1272 | if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { |
1274 | goto need_revalidate; | 1273 | dentry = do_revalidate(dentry, nd); |
1274 | if (!dentry) | ||
1275 | goto need_lookup; | ||
1276 | if (IS_ERR(dentry)) | ||
1277 | goto fail; | ||
1278 | } | ||
1275 | done: | 1279 | done: |
1276 | path->mnt = mnt; | 1280 | path->mnt = mnt; |
1277 | path->dentry = dentry; | 1281 | path->dentry = dentry; |
@@ -1313,16 +1317,6 @@ need_lookup: | |||
1313 | mutex_unlock(&dir->i_mutex); | 1317 | mutex_unlock(&dir->i_mutex); |
1314 | goto found; | 1318 | goto found; |
1315 | 1319 | ||
1316 | need_revalidate: | ||
1317 | dentry = do_revalidate(dentry, nd); | ||
1318 | if (!dentry) | ||
1319 | goto need_lookup; | ||
1320 | if (IS_ERR(dentry)) | ||
1321 | goto fail; | ||
1322 | if (nd->flags & LOOKUP_RCU) | ||
1323 | goto done2; | ||
1324 | goto done; | ||
1325 | |||
1326 | fail: | 1320 | fail: |
1327 | return PTR_ERR(dentry); | 1321 | return PTR_ERR(dentry); |
1328 | } | 1322 | } |
@@ -1419,9 +1413,6 @@ exec_again: | |||
1419 | goto out_dput; | 1413 | goto out_dput; |
1420 | 1414 | ||
1421 | if (inode->i_op->follow_link) { | 1415 | if (inode->i_op->follow_link) { |
1422 | /* We commonly drop rcu-walk here */ | ||
1423 | if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) | ||
1424 | return -ECHILD; | ||
1425 | BUG_ON(inode != next.dentry->d_inode); | 1416 | BUG_ON(inode != next.dentry->d_inode); |
1426 | err = do_follow_link(&next, nd); | 1417 | err = do_follow_link(&next, nd); |
1427 | if (err) | 1418 | if (err) |
@@ -1467,8 +1458,6 @@ last_component: | |||
1467 | break; | 1458 | break; |
1468 | if (inode && unlikely(inode->i_op->follow_link) && | 1459 | if (inode && unlikely(inode->i_op->follow_link) && |
1469 | (lookup_flags & LOOKUP_FOLLOW)) { | 1460 | (lookup_flags & LOOKUP_FOLLOW)) { |
1470 | if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) | ||
1471 | return -ECHILD; | ||
1472 | BUG_ON(inode != next.dentry->d_inode); | 1461 | BUG_ON(inode != next.dentry->d_inode); |
1473 | err = do_follow_link(&next, nd); | 1462 | err = do_follow_link(&next, nd); |
1474 | if (err) | 1463 | if (err) |
@@ -1504,12 +1493,15 @@ return_reval: | |||
1504 | * We may need to check the cached dentry for staleness. | 1493 | * We may need to check the cached dentry for staleness. |
1505 | */ | 1494 | */ |
1506 | if (need_reval_dot(nd->path.dentry)) { | 1495 | if (need_reval_dot(nd->path.dentry)) { |
1496 | if (nameidata_drop_rcu_last_maybe(nd)) | ||
1497 | return -ECHILD; | ||
1507 | /* Note: we do not d_invalidate() */ | 1498 | /* Note: we do not d_invalidate() */ |
1508 | err = d_revalidate(nd->path.dentry, nd); | 1499 | err = d_revalidate(nd->path.dentry, nd); |
1509 | if (!err) | 1500 | if (!err) |
1510 | err = -ESTALE; | 1501 | err = -ESTALE; |
1511 | if (err < 0) | 1502 | if (err < 0) |
1512 | break; | 1503 | break; |
1504 | return 0; | ||
1513 | } | 1505 | } |
1514 | return_base: | 1506 | return_base: |
1515 | if (nameidata_drop_rcu_last_maybe(nd)) | 1507 | if (nameidata_drop_rcu_last_maybe(nd)) |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3be975e1891..cde36cb0f34 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr, | |||
484 | out: | 484 | out: |
485 | return status; | 485 | return status; |
486 | out_default: | 486 | out_default: |
487 | return nfs_cb_stat_to_errno(status); | 487 | return nfs_cb_stat_to_errno(nfserr); |
488 | } | 488 | } |
489 | 489 | ||
490 | /* | 490 | /* |
@@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, | |||
564 | if (unlikely(status)) | 564 | if (unlikely(status)) |
565 | goto out; | 565 | goto out; |
566 | if (unlikely(nfserr != NFS4_OK)) | 566 | if (unlikely(nfserr != NFS4_OK)) |
567 | goto out_default; | 567 | status = nfs_cb_stat_to_errno(nfserr); |
568 | out: | 568 | out: |
569 | return status; | 569 | return status; |
570 | out_default: | ||
571 | return nfs_cb_stat_to_errno(status); | ||
572 | } | 570 | } |
573 | 571 | ||
574 | /* | 572 | /* |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d98d0213285..54b60bfceb8 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
230 | dp->dl_client = clp; | 230 | dp->dl_client = clp; |
231 | get_nfs4_file(fp); | 231 | get_nfs4_file(fp); |
232 | dp->dl_file = fp; | 232 | dp->dl_file = fp; |
233 | dp->dl_vfs_file = find_readable_file(fp); | ||
234 | get_file(dp->dl_vfs_file); | ||
235 | dp->dl_flock = NULL; | ||
236 | dp->dl_type = type; | 233 | dp->dl_type = type; |
237 | dp->dl_stateid.si_boot = boot_time; | 234 | dp->dl_stateid.si_boot = boot_time; |
238 | dp->dl_stateid.si_stateownerid = current_delegid++; | 235 | dp->dl_stateid.si_stateownerid = current_delegid++; |
@@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f | |||
241 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); | 238 | fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); |
242 | dp->dl_time = 0; | 239 | dp->dl_time = 0; |
243 | atomic_set(&dp->dl_count, 1); | 240 | atomic_set(&dp->dl_count, 1); |
244 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
245 | list_add(&dp->dl_perclnt, &clp->cl_delegations); | ||
246 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); | 241 | INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); |
247 | return dp; | 242 | return dp; |
248 | } | 243 | } |
@@ -253,36 +248,30 @@ nfs4_put_delegation(struct nfs4_delegation *dp) | |||
253 | if (atomic_dec_and_test(&dp->dl_count)) { | 248 | if (atomic_dec_and_test(&dp->dl_count)) { |
254 | dprintk("NFSD: freeing dp %p\n",dp); | 249 | dprintk("NFSD: freeing dp %p\n",dp); |
255 | put_nfs4_file(dp->dl_file); | 250 | put_nfs4_file(dp->dl_file); |
256 | fput(dp->dl_vfs_file); | ||
257 | kmem_cache_free(deleg_slab, dp); | 251 | kmem_cache_free(deleg_slab, dp); |
258 | num_delegations--; | 252 | num_delegations--; |
259 | } | 253 | } |
260 | } | 254 | } |
261 | 255 | ||
262 | /* Remove the associated file_lock first, then remove the delegation. | 256 | static void nfs4_put_deleg_lease(struct nfs4_file *fp) |
263 | * lease_modify() is called to remove the FS_LEASE file_lock from | ||
264 | * the i_flock list, eventually calling nfsd's lock_manager | ||
265 | * fl_release_callback. | ||
266 | */ | ||
267 | static void | ||
268 | nfs4_close_delegation(struct nfs4_delegation *dp) | ||
269 | { | 257 | { |
270 | dprintk("NFSD: close_delegation dp %p\n",dp); | 258 | if (atomic_dec_and_test(&fp->fi_delegees)) { |
271 | /* XXX: do we even need this check?: */ | 259 | vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); |
272 | if (dp->dl_flock) | 260 | fp->fi_lease = NULL; |
273 | vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); | 261 | fp->fi_deleg_file = NULL; |
262 | } | ||
274 | } | 263 | } |
275 | 264 | ||
276 | /* Called under the state lock. */ | 265 | /* Called under the state lock. */ |
277 | static void | 266 | static void |
278 | unhash_delegation(struct nfs4_delegation *dp) | 267 | unhash_delegation(struct nfs4_delegation *dp) |
279 | { | 268 | { |
280 | list_del_init(&dp->dl_perfile); | ||
281 | list_del_init(&dp->dl_perclnt); | 269 | list_del_init(&dp->dl_perclnt); |
282 | spin_lock(&recall_lock); | 270 | spin_lock(&recall_lock); |
271 | list_del_init(&dp->dl_perfile); | ||
283 | list_del_init(&dp->dl_recall_lru); | 272 | list_del_init(&dp->dl_recall_lru); |
284 | spin_unlock(&recall_lock); | 273 | spin_unlock(&recall_lock); |
285 | nfs4_close_delegation(dp); | 274 | nfs4_put_deleg_lease(dp->dl_file); |
286 | nfs4_put_delegation(dp); | 275 | nfs4_put_delegation(dp); |
287 | } | 276 | } |
288 | 277 | ||
@@ -958,8 +947,6 @@ expire_client(struct nfs4_client *clp) | |||
958 | spin_lock(&recall_lock); | 947 | spin_lock(&recall_lock); |
959 | while (!list_empty(&clp->cl_delegations)) { | 948 | while (!list_empty(&clp->cl_delegations)) { |
960 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); | 949 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
961 | dprintk("NFSD: expire client. dp %p, fp %p\n", dp, | ||
962 | dp->dl_flock); | ||
963 | list_del_init(&dp->dl_perclnt); | 950 | list_del_init(&dp->dl_perclnt); |
964 | list_move(&dp->dl_recall_lru, &reaplist); | 951 | list_move(&dp->dl_recall_lru, &reaplist); |
965 | } | 952 | } |
@@ -2078,6 +2065,7 @@ alloc_init_file(struct inode *ino) | |||
2078 | fp->fi_inode = igrab(ino); | 2065 | fp->fi_inode = igrab(ino); |
2079 | fp->fi_id = current_fileid++; | 2066 | fp->fi_id = current_fileid++; |
2080 | fp->fi_had_conflict = false; | 2067 | fp->fi_had_conflict = false; |
2068 | fp->fi_lease = NULL; | ||
2081 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); | 2069 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); |
2082 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); | 2070 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); |
2083 | spin_lock(&recall_lock); | 2071 | spin_lock(&recall_lock); |
@@ -2329,23 +2317,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) | |||
2329 | nfs4_file_put_access(fp, O_RDONLY); | 2317 | nfs4_file_put_access(fp, O_RDONLY); |
2330 | } | 2318 | } |
2331 | 2319 | ||
2332 | /* | 2320 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) |
2333 | * Spawn a thread to perform a recall on the delegation represented | ||
2334 | * by the lease (file_lock) | ||
2335 | * | ||
2336 | * Called from break_lease() with lock_flocks() held. | ||
2337 | * Note: we assume break_lease will only call this *once* for any given | ||
2338 | * lease. | ||
2339 | */ | ||
2340 | static | ||
2341 | void nfsd_break_deleg_cb(struct file_lock *fl) | ||
2342 | { | 2321 | { |
2343 | struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; | ||
2344 | |||
2345 | dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); | ||
2346 | if (!dp) | ||
2347 | return; | ||
2348 | |||
2349 | /* We're assuming the state code never drops its reference | 2322 | /* We're assuming the state code never drops its reference |
2350 | * without first removing the lease. Since we're in this lease | 2323 | * without first removing the lease. Since we're in this lease |
2351 | * callback (and since the lease code is serialized by the kernel | 2324 | * callback (and since the lease code is serialized by the kernel |
@@ -2353,22 +2326,35 @@ void nfsd_break_deleg_cb(struct file_lock *fl) | |||
2353 | * it's safe to take a reference: */ | 2326 | * it's safe to take a reference: */ |
2354 | atomic_inc(&dp->dl_count); | 2327 | atomic_inc(&dp->dl_count); |
2355 | 2328 | ||
2356 | spin_lock(&recall_lock); | ||
2357 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2329 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); |
2358 | spin_unlock(&recall_lock); | ||
2359 | 2330 | ||
2360 | /* only place dl_time is set. protected by lock_flocks*/ | 2331 | /* only place dl_time is set. protected by lock_flocks*/ |
2361 | dp->dl_time = get_seconds(); | 2332 | dp->dl_time = get_seconds(); |
2362 | 2333 | ||
2334 | nfsd4_cb_recall(dp); | ||
2335 | } | ||
2336 | |||
2337 | /* Called from break_lease() with lock_flocks() held. */ | ||
2338 | static void nfsd_break_deleg_cb(struct file_lock *fl) | ||
2339 | { | ||
2340 | struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; | ||
2341 | struct nfs4_delegation *dp; | ||
2342 | |||
2343 | BUG_ON(!fp); | ||
2344 | /* We assume break_lease is only called once per lease: */ | ||
2345 | BUG_ON(fp->fi_had_conflict); | ||
2363 | /* | 2346 | /* |
2364 | * We don't want the locks code to timeout the lease for us; | 2347 | * We don't want the locks code to timeout the lease for us; |
2365 | * we'll remove it ourself if the delegation isn't returned | 2348 | * we'll remove it ourself if a delegation isn't returned |
2366 | * in time. | 2349 | * in time: |
2367 | */ | 2350 | */ |
2368 | fl->fl_break_time = 0; | 2351 | fl->fl_break_time = 0; |
2369 | 2352 | ||
2370 | dp->dl_file->fi_had_conflict = true; | 2353 | spin_lock(&recall_lock); |
2371 | nfsd4_cb_recall(dp); | 2354 | fp->fi_had_conflict = true; |
2355 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) | ||
2356 | nfsd_break_one_deleg(dp); | ||
2357 | spin_unlock(&recall_lock); | ||
2372 | } | 2358 | } |
2373 | 2359 | ||
2374 | static | 2360 | static |
@@ -2459,13 +2445,15 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) | |||
2459 | static struct nfs4_delegation * | 2445 | static struct nfs4_delegation * |
2460 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) | 2446 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) |
2461 | { | 2447 | { |
2462 | struct nfs4_delegation *dp; | 2448 | struct nfs4_delegation *dp = NULL; |
2463 | 2449 | ||
2450 | spin_lock(&recall_lock); | ||
2464 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { | 2451 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { |
2465 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) | 2452 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) |
2466 | return dp; | 2453 | break; |
2467 | } | 2454 | } |
2468 | return NULL; | 2455 | spin_unlock(&recall_lock); |
2456 | return dp; | ||
2469 | } | 2457 | } |
2470 | 2458 | ||
2471 | int share_access_to_flags(u32 share_access) | 2459 | int share_access_to_flags(u32 share_access) |
@@ -2641,6 +2629,66 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) | |||
2641 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; | 2629 | return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; |
2642 | } | 2630 | } |
2643 | 2631 | ||
2632 | static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) | ||
2633 | { | ||
2634 | struct file_lock *fl; | ||
2635 | |||
2636 | fl = locks_alloc_lock(); | ||
2637 | if (!fl) | ||
2638 | return NULL; | ||
2639 | locks_init_lock(fl); | ||
2640 | fl->fl_lmops = &nfsd_lease_mng_ops; | ||
2641 | fl->fl_flags = FL_LEASE; | ||
2642 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2643 | fl->fl_end = OFFSET_MAX; | ||
2644 | fl->fl_owner = (fl_owner_t)(dp->dl_file); | ||
2645 | fl->fl_pid = current->tgid; | ||
2646 | return fl; | ||
2647 | } | ||
2648 | |||
2649 | static int nfs4_setlease(struct nfs4_delegation *dp, int flag) | ||
2650 | { | ||
2651 | struct nfs4_file *fp = dp->dl_file; | ||
2652 | struct file_lock *fl; | ||
2653 | int status; | ||
2654 | |||
2655 | fl = nfs4_alloc_init_lease(dp, flag); | ||
2656 | if (!fl) | ||
2657 | return -ENOMEM; | ||
2658 | fl->fl_file = find_readable_file(fp); | ||
2659 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2660 | status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); | ||
2661 | if (status) { | ||
2662 | list_del_init(&dp->dl_perclnt); | ||
2663 | locks_free_lock(fl); | ||
2664 | return -ENOMEM; | ||
2665 | } | ||
2666 | fp->fi_lease = fl; | ||
2667 | fp->fi_deleg_file = fl->fl_file; | ||
2668 | get_file(fp->fi_deleg_file); | ||
2669 | atomic_set(&fp->fi_delegees, 1); | ||
2670 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2671 | return 0; | ||
2672 | } | ||
2673 | |||
2674 | static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) | ||
2675 | { | ||
2676 | struct nfs4_file *fp = dp->dl_file; | ||
2677 | |||
2678 | if (!fp->fi_lease) | ||
2679 | return nfs4_setlease(dp, flag); | ||
2680 | spin_lock(&recall_lock); | ||
2681 | if (fp->fi_had_conflict) { | ||
2682 | spin_unlock(&recall_lock); | ||
2683 | return -EAGAIN; | ||
2684 | } | ||
2685 | atomic_inc(&fp->fi_delegees); | ||
2686 | list_add(&dp->dl_perfile, &fp->fi_delegations); | ||
2687 | spin_unlock(&recall_lock); | ||
2688 | list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); | ||
2689 | return 0; | ||
2690 | } | ||
2691 | |||
2644 | /* | 2692 | /* |
2645 | * Attempt to hand out a delegation. | 2693 | * Attempt to hand out a delegation. |
2646 | */ | 2694 | */ |
@@ -2650,7 +2698,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2650 | struct nfs4_delegation *dp; | 2698 | struct nfs4_delegation *dp; |
2651 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2699 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2652 | int cb_up; | 2700 | int cb_up; |
2653 | struct file_lock *fl; | ||
2654 | int status, flag = 0; | 2701 | int status, flag = 0; |
2655 | 2702 | ||
2656 | cb_up = nfsd4_cb_channel_good(sop->so_client); | 2703 | cb_up = nfsd4_cb_channel_good(sop->so_client); |
@@ -2681,36 +2728,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2681 | } | 2728 | } |
2682 | 2729 | ||
2683 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); | 2730 | dp = alloc_init_deleg(sop->so_client, stp, fh, flag); |
2684 | if (dp == NULL) { | 2731 | if (dp == NULL) |
2685 | flag = NFS4_OPEN_DELEGATE_NONE; | 2732 | goto out_no_deleg; |
2686 | goto out; | 2733 | status = nfs4_set_delegation(dp, flag); |
2687 | } | 2734 | if (status) |
2688 | status = -ENOMEM; | 2735 | goto out_free; |
2689 | fl = locks_alloc_lock(); | ||
2690 | if (!fl) | ||
2691 | goto out; | ||
2692 | locks_init_lock(fl); | ||
2693 | fl->fl_lmops = &nfsd_lease_mng_ops; | ||
2694 | fl->fl_flags = FL_LEASE; | ||
2695 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | ||
2696 | fl->fl_end = OFFSET_MAX; | ||
2697 | fl->fl_owner = (fl_owner_t)dp; | ||
2698 | fl->fl_file = find_readable_file(stp->st_file); | ||
2699 | BUG_ON(!fl->fl_file); | ||
2700 | fl->fl_pid = current->tgid; | ||
2701 | dp->dl_flock = fl; | ||
2702 | |||
2703 | /* vfs_setlease checks to see if delegation should be handed out. | ||
2704 | * the lock_manager callback fl_change is used | ||
2705 | */ | ||
2706 | if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { | ||
2707 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); | ||
2708 | dp->dl_flock = NULL; | ||
2709 | locks_free_lock(fl); | ||
2710 | unhash_delegation(dp); | ||
2711 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2712 | goto out; | ||
2713 | } | ||
2714 | 2736 | ||
2715 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); | 2737 | memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); |
2716 | 2738 | ||
@@ -2722,6 +2744,12 @@ out: | |||
2722 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) | 2744 | && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) |
2723 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); | 2745 | dprintk("NFSD: WARNING: refusing delegation reclaim\n"); |
2724 | open->op_delegate_type = flag; | 2746 | open->op_delegate_type = flag; |
2747 | return; | ||
2748 | out_free: | ||
2749 | nfs4_put_delegation(dp); | ||
2750 | out_no_deleg: | ||
2751 | flag = NFS4_OPEN_DELEGATE_NONE; | ||
2752 | goto out; | ||
2725 | } | 2753 | } |
2726 | 2754 | ||
2727 | /* | 2755 | /* |
@@ -2916,8 +2944,6 @@ nfs4_laundromat(void) | |||
2916 | test_val = u; | 2944 | test_val = u; |
2917 | break; | 2945 | break; |
2918 | } | 2946 | } |
2919 | dprintk("NFSD: purging unused delegation dp %p, fp %p\n", | ||
2920 | dp, dp->dl_flock); | ||
2921 | list_move(&dp->dl_recall_lru, &reaplist); | 2947 | list_move(&dp->dl_recall_lru, &reaplist); |
2922 | } | 2948 | } |
2923 | spin_unlock(&recall_lock); | 2949 | spin_unlock(&recall_lock); |
@@ -3128,7 +3154,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | |||
3128 | goto out; | 3154 | goto out; |
3129 | renew_client(dp->dl_client); | 3155 | renew_client(dp->dl_client); |
3130 | if (filpp) { | 3156 | if (filpp) { |
3131 | *filpp = find_readable_file(dp->dl_file); | 3157 | *filpp = dp->dl_file->fi_deleg_file; |
3132 | BUG_ON(!*filpp); | 3158 | BUG_ON(!*filpp); |
3133 | } | 3159 | } |
3134 | } else { /* open or lock stateid */ | 3160 | } else { /* open or lock stateid */ |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 3074656ba7b..2d31224b07b 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -83,8 +83,6 @@ struct nfs4_delegation { | |||
83 | atomic_t dl_count; /* ref count */ | 83 | atomic_t dl_count; /* ref count */ |
84 | struct nfs4_client *dl_client; | 84 | struct nfs4_client *dl_client; |
85 | struct nfs4_file *dl_file; | 85 | struct nfs4_file *dl_file; |
86 | struct file *dl_vfs_file; | ||
87 | struct file_lock *dl_flock; | ||
88 | u32 dl_type; | 86 | u32 dl_type; |
89 | time_t dl_time; | 87 | time_t dl_time; |
90 | /* For recall: */ | 88 | /* For recall: */ |
@@ -379,6 +377,9 @@ struct nfs4_file { | |||
379 | */ | 377 | */ |
380 | atomic_t fi_readers; | 378 | atomic_t fi_readers; |
381 | atomic_t fi_writers; | 379 | atomic_t fi_writers; |
380 | struct file *fi_deleg_file; | ||
381 | struct file_lock *fi_lease; | ||
382 | atomic_t fi_delegees; | ||
382 | struct inode *fi_inode; | 383 | struct inode *fi_inode; |
383 | u32 fi_id; /* used with stateowner->so_id | 384 | u32 fi_id; /* used with stateowner->so_id |
384 | * for stateid_hashtbl hash */ | 385 | * for stateid_hashtbl hash */ |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 641117f2188..da1d9701f8e 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) | |||
808 | if (ra->p_count == 0) | 808 | if (ra->p_count == 0) |
809 | frap = rap; | 809 | frap = rap; |
810 | } | 810 | } |
811 | depth = nfsdstats.ra_size*11/10; | 811 | depth = nfsdstats.ra_size; |
812 | if (!frap) { | 812 | if (!frap) { |
813 | spin_unlock(&rab->pb_lock); | 813 | spin_unlock(&rab->pb_lock); |
814 | return NULL; | 814 | return NULL; |
@@ -1744,6 +1744,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, | |||
1744 | host_err = nfsd_break_lease(odentry->d_inode); | 1744 | host_err = nfsd_break_lease(odentry->d_inode); |
1745 | if (host_err) | 1745 | if (host_err) |
1746 | goto out_drop_write; | 1746 | goto out_drop_write; |
1747 | if (ndentry->d_inode) { | ||
1748 | host_err = nfsd_break_lease(ndentry->d_inode); | ||
1749 | if (host_err) | ||
1750 | goto out_drop_write; | ||
1751 | } | ||
1752 | if (host_err) | ||
1753 | goto out_drop_write; | ||
1747 | host_err = vfs_rename(fdir, odentry, tdir, ndentry); | 1754 | host_err = vfs_rename(fdir, odentry, tdir, ndentry); |
1748 | if (!host_err) { | 1755 | if (!host_err) { |
1749 | host_err = commit_metadata(tfhp); | 1756 | host_err = commit_metadata(tfhp); |
@@ -1812,22 +1819,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, | |||
1812 | 1819 | ||
1813 | host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); | 1820 | host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); |
1814 | if (host_err) | 1821 | if (host_err) |
1815 | goto out_nfserr; | 1822 | goto out_put; |
1816 | 1823 | ||
1817 | host_err = nfsd_break_lease(rdentry->d_inode); | 1824 | host_err = nfsd_break_lease(rdentry->d_inode); |
1818 | if (host_err) | 1825 | if (host_err) |
1819 | goto out_put; | 1826 | goto out_drop_write; |
1820 | if (type != S_IFDIR) | 1827 | if (type != S_IFDIR) |
1821 | host_err = vfs_unlink(dirp, rdentry); | 1828 | host_err = vfs_unlink(dirp, rdentry); |
1822 | else | 1829 | else |
1823 | host_err = vfs_rmdir(dirp, rdentry); | 1830 | host_err = vfs_rmdir(dirp, rdentry); |
1824 | out_put: | ||
1825 | dput(rdentry); | ||
1826 | |||
1827 | if (!host_err) | 1831 | if (!host_err) |
1828 | host_err = commit_metadata(fhp); | 1832 | host_err = commit_metadata(fhp); |
1829 | 1833 | out_drop_write: | |
1830 | mnt_drop_write(fhp->fh_export->ex_path.mnt); | 1834 | mnt_drop_write(fhp->fh_export->ex_path.mnt); |
1835 | out_put: | ||
1836 | dput(rdentry); | ||
1837 | |||
1831 | out_nfserr: | 1838 | out_nfserr: |
1832 | err = nfserrno(host_err); | 1839 | err = nfserrno(host_err); |
1833 | out: | 1840 | out: |
diff --git a/fs/proc/array.c b/fs/proc/array.c index df2b703b9d0..7c99c1cf7e5 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, | |||
353 | task_cap(m, task); | 353 | task_cap(m, task); |
354 | task_cpus_allowed(m, task); | 354 | task_cpus_allowed(m, task); |
355 | cpuset_task_status_allowed(m, task); | 355 | cpuset_task_status_allowed(m, task); |
356 | #if defined(CONFIG_S390) | ||
357 | task_show_regs(m, task); | ||
358 | #endif | ||
359 | task_context_switch_counts(m, task); | 356 | task_context_switch_counts(m, task); |
360 | return 0; | 357 | return 0; |
361 | } | 358 | } |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 8e6c8c42bc3..df29c8fde36 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page, | |||
57 | (transparent_hugepage_flags & \ | 57 | (transparent_hugepage_flags & \ |
58 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ | 58 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ |
59 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ | 59 | ((__vma)->vm_flags & VM_HUGEPAGE))) && \ |
60 | !((__vma)->vm_flags & VM_NOHUGEPAGE)) | 60 | !((__vma)->vm_flags & VM_NOHUGEPAGE) && \ |
61 | !is_vma_temporary_stack(__vma)) | ||
61 | #define transparent_hugepage_defrag(__vma) \ | 62 | #define transparent_hugepage_defrag(__vma) \ |
62 | ((transparent_hugepage_flags & \ | 63 | ((transparent_hugepage_flags & \ |
63 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ | 64 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ |
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 69747469174..fe7c4b9ae27 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
@@ -4,8 +4,8 @@ | |||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/input.h> | 5 | #include <linux/input.h> |
6 | 6 | ||
7 | #define MATRIX_MAX_ROWS 16 | 7 | #define MATRIX_MAX_ROWS 32 |
8 | #define MATRIX_MAX_COLS 16 | 8 | #define MATRIX_MAX_COLS 32 |
9 | 9 | ||
10 | #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ | 10 | #define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ |
11 | (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ | 11 | (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ |
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 32fb81212fd..1ca64113efe 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/errno.h> | ||
20 | #include <linux/printk.h> | ||
19 | #include <asm/atomic.h> | 21 | #include <asm/atomic.h> |
20 | 22 | ||
21 | /* Each escaped entry is prefixed by ESCAPE_CODE | 23 | /* Each escaped entry is prefixed by ESCAPE_CODE |
@@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); | |||
186 | int oprofile_add_data64(struct op_entry *entry, u64 val); | 188 | int oprofile_add_data64(struct op_entry *entry, u64 val); |
187 | int oprofile_write_commit(struct op_entry *entry); | 189 | int oprofile_write_commit(struct op_entry *entry); |
188 | 190 | ||
189 | #ifdef CONFIG_PERF_EVENTS | 191 | #ifdef CONFIG_HW_PERF_EVENTS |
190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | 192 | int __init oprofile_perf_init(struct oprofile_operations *ops); |
191 | void oprofile_perf_exit(void); | 193 | void oprofile_perf_exit(void); |
192 | char *op_name_from_perf_id(void); | 194 | char *op_name_from_perf_id(void); |
193 | #endif /* CONFIG_PERF_EVENTS */ | 195 | #else |
196 | static inline int __init oprofile_perf_init(struct oprofile_operations *ops) | ||
197 | { | ||
198 | pr_info("oprofile: hardware counters not available\n"); | ||
199 | return -ENODEV; | ||
200 | } | ||
201 | static inline void oprofile_perf_exit(void) { } | ||
202 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
194 | 203 | ||
195 | #endif /* OPROFILE_H */ | 204 | #endif /* OPROFILE_H */ |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 32a19f9397f..3258455549f 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym) | |||
41 | char symname[KSYM_NAME_LEN]; | 41 | char symname[KSYM_NAME_LEN]; |
42 | 42 | ||
43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) | 43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) |
44 | SEQ_printf(m, "<%p>", sym); | 44 | SEQ_printf(m, "<%pK>", sym); |
45 | else | 45 | else |
46 | SEQ_printf(m, "%s", symname); | 46 | SEQ_printf(m, "%s", symname); |
47 | } | 47 | } |
@@ -112,7 +112,7 @@ next_one: | |||
112 | static void | 112 | static void |
113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) | 113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) |
114 | { | 114 | { |
115 | SEQ_printf(m, " .base: %p\n", base); | 115 | SEQ_printf(m, " .base: %pK\n", base); |
116 | SEQ_printf(m, " .index: %d\n", | 116 | SEQ_printf(m, " .index: %d\n", |
117 | base->index); | 117 | base->index); |
118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", | 118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", |
diff --git a/kernel/timer.c b/kernel/timer.c index d53ce66daea..d6459923d24 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
959 | * | 959 | * |
960 | * Synchronization rules: Callers must prevent restarting of the timer, | 960 | * Synchronization rules: Callers must prevent restarting of the timer, |
961 | * otherwise this function is meaningless. It must not be called from | 961 | * otherwise this function is meaningless. It must not be called from |
962 | * hardirq contexts. The caller must not hold locks which would prevent | 962 | * interrupt contexts. The caller must not hold locks which would prevent |
963 | * completion of the timer's handler. The timer's handler must not call | 963 | * completion of the timer's handler. The timer's handler must not call |
964 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 964 | * add_timer_on(). Upon exit the timer is not queued and the handler is |
965 | * not running on any CPU. | 965 | * not running on any CPU. |
@@ -971,12 +971,10 @@ int del_timer_sync(struct timer_list *timer) | |||
971 | #ifdef CONFIG_LOCKDEP | 971 | #ifdef CONFIG_LOCKDEP |
972 | unsigned long flags; | 972 | unsigned long flags; |
973 | 973 | ||
974 | raw_local_irq_save(flags); | 974 | local_irq_save(flags); |
975 | local_bh_disable(); | ||
976 | lock_map_acquire(&timer->lockdep_map); | 975 | lock_map_acquire(&timer->lockdep_map); |
977 | lock_map_release(&timer->lockdep_map); | 976 | lock_map_release(&timer->lockdep_map); |
978 | _local_bh_enable(); | 977 | local_irq_restore(flags); |
979 | raw_local_irq_restore(flags); | ||
980 | #endif | 978 | #endif |
981 | /* | 979 | /* |
982 | * don't use it in hardirq context, because it | 980 | * don't use it in hardirq context, because it |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f37f974aa81..18bb15776c5 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) | |||
363 | goto out_save; | 363 | goto out_save; |
364 | } | 364 | } |
365 | 365 | ||
366 | printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", | 366 | |
367 | cpu, PTR_ERR(event)); | 367 | /* vary the KERN level based on the returned errno */ |
368 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
369 | printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
370 | else if (PTR_ERR(event) == -ENOENT) | ||
371 | printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); | ||
372 | else | ||
373 | printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); | ||
368 | return PTR_ERR(event); | 374 | return PTR_ERR(event); |
369 | 375 | ||
370 | /* success path */ | 376 | /* success path */ |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e62ddb8f24b..3e29781ee76 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1811,6 +1811,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 1811 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) | 1812 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
1813 | goto out; | 1813 | goto out; |
1814 | if (is_vma_temporary_stack(vma)) | ||
1815 | goto out; | ||
1814 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 1816 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
1815 | 1817 | ||
1816 | pgd = pgd_offset(mm, address); | 1818 | pgd = pgd_offset(mm, address); |
@@ -2032,32 +2034,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, | |||
2032 | if ((!(vma->vm_flags & VM_HUGEPAGE) && | 2034 | if ((!(vma->vm_flags & VM_HUGEPAGE) && |
2033 | !khugepaged_always()) || | 2035 | !khugepaged_always()) || |
2034 | (vma->vm_flags & VM_NOHUGEPAGE)) { | 2036 | (vma->vm_flags & VM_NOHUGEPAGE)) { |
2037 | skip: | ||
2035 | progress++; | 2038 | progress++; |
2036 | continue; | 2039 | continue; |
2037 | } | 2040 | } |
2038 | |||
2039 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ | 2041 | /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ |
2040 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { | 2042 | if (!vma->anon_vma || vma->vm_ops || vma->vm_file) |
2041 | khugepaged_scan.address = vma->vm_end; | 2043 | goto skip; |
2042 | progress++; | 2044 | if (is_vma_temporary_stack(vma)) |
2043 | continue; | 2045 | goto skip; |
2044 | } | 2046 | |
2045 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); | 2047 | VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); |
2046 | 2048 | ||
2047 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; | 2049 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
2048 | hend = vma->vm_end & HPAGE_PMD_MASK; | 2050 | hend = vma->vm_end & HPAGE_PMD_MASK; |
2049 | if (hstart >= hend) { | 2051 | if (hstart >= hend) |
2050 | progress++; | 2052 | goto skip; |
2051 | continue; | 2053 | if (khugepaged_scan.address > hend) |
2052 | } | 2054 | goto skip; |
2053 | if (khugepaged_scan.address < hstart) | 2055 | if (khugepaged_scan.address < hstart) |
2054 | khugepaged_scan.address = hstart; | 2056 | khugepaged_scan.address = hstart; |
2055 | if (khugepaged_scan.address > hend) { | 2057 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
2056 | khugepaged_scan.address = hend + HPAGE_PMD_SIZE; | ||
2057 | progress++; | ||
2058 | continue; | ||
2059 | } | ||
2060 | BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); | ||
2061 | 2058 | ||
2062 | while (khugepaged_scan.address < hend) { | 2059 | while (khugepaged_scan.address < hend) { |
2063 | int ret; | 2060 | int ret; |
@@ -2086,7 +2083,7 @@ breakouterloop: | |||
2086 | breakouterloop_mmap_sem: | 2083 | breakouterloop_mmap_sem: |
2087 | 2084 | ||
2088 | spin_lock(&khugepaged_mm_lock); | 2085 | spin_lock(&khugepaged_mm_lock); |
2089 | BUG_ON(khugepaged_scan.mm_slot != mm_slot); | 2086 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); |
2090 | /* | 2087 | /* |
2091 | * Release the current mm_slot if this mm is about to die, or | 2088 | * Release the current mm_slot if this mm is about to die, or |
2092 | * if we scanned all vmas of this mm. | 2089 | * if we scanned all vmas of this mm. |
@@ -2241,9 +2238,9 @@ static int khugepaged(void *none) | |||
2241 | 2238 | ||
2242 | for (;;) { | 2239 | for (;;) { |
2243 | mutex_unlock(&khugepaged_mutex); | 2240 | mutex_unlock(&khugepaged_mutex); |
2244 | BUG_ON(khugepaged_thread != current); | 2241 | VM_BUG_ON(khugepaged_thread != current); |
2245 | khugepaged_loop(); | 2242 | khugepaged_loop(); |
2246 | BUG_ON(khugepaged_thread != current); | 2243 | VM_BUG_ON(khugepaged_thread != current); |
2247 | 2244 | ||
2248 | mutex_lock(&khugepaged_mutex); | 2245 | mutex_lock(&khugepaged_mutex); |
2249 | if (!khugepaged_enabled()) | 2246 | if (!khugepaged_enabled()) |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index b2f729fdb31..60cac6f92e8 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -759,8 +759,8 @@ static int __cmd_record(int argc, const char **argv) | |||
759 | perf_session__process_machines(session, event__synthesize_guest_os); | 759 | perf_session__process_machines(session, event__synthesize_guest_os); |
760 | 760 | ||
761 | if (!system_wide) | 761 | if (!system_wide) |
762 | event__synthesize_thread(target_tid, process_synthesized_event, | 762 | event__synthesize_thread_map(threads, process_synthesized_event, |
763 | session); | 763 | session); |
764 | else | 764 | else |
765 | event__synthesize_threads(process_synthesized_event, session); | 765 | event__synthesize_threads(process_synthesized_event, session); |
766 | 766 | ||
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b6998e05576..5a29d9cd948 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -1306,7 +1306,7 @@ static int __cmd_top(void) | |||
1306 | return -ENOMEM; | 1306 | return -ENOMEM; |
1307 | 1307 | ||
1308 | if (target_tid != -1) | 1308 | if (target_tid != -1) |
1309 | event__synthesize_thread(target_tid, event__process, session); | 1309 | event__synthesize_thread_map(threads, event__process, session); |
1310 | else | 1310 | else |
1311 | event__synthesize_threads(event__process, session); | 1311 | event__synthesize_threads(event__process, session); |
1312 | 1312 | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1478ab4ee22..50d0a931497 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -263,11 +263,12 @@ static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event, | |||
263 | process, session); | 263 | process, session); |
264 | } | 264 | } |
265 | 265 | ||
266 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 266 | int event__synthesize_thread_map(struct thread_map *threads, |
267 | struct perf_session *session) | 267 | event__handler_t process, |
268 | struct perf_session *session) | ||
268 | { | 269 | { |
269 | event_t *comm_event, *mmap_event; | 270 | event_t *comm_event, *mmap_event; |
270 | int err = -1; | 271 | int err = -1, thread; |
271 | 272 | ||
272 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); | 273 | comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); |
273 | if (comm_event == NULL) | 274 | if (comm_event == NULL) |
@@ -277,8 +278,15 @@ int event__synthesize_thread(pid_t pid, event__handler_t process, | |||
277 | if (mmap_event == NULL) | 278 | if (mmap_event == NULL) |
278 | goto out_free_comm; | 279 | goto out_free_comm; |
279 | 280 | ||
280 | err = __event__synthesize_thread(comm_event, mmap_event, pid, | 281 | err = 0; |
281 | process, session); | 282 | for (thread = 0; thread < threads->nr; ++thread) { |
283 | if (__event__synthesize_thread(comm_event, mmap_event, | ||
284 | threads->map[thread], | ||
285 | process, session)) { | ||
286 | err = -1; | ||
287 | break; | ||
288 | } | ||
289 | } | ||
282 | free(mmap_event); | 290 | free(mmap_event); |
283 | out_free_comm: | 291 | out_free_comm: |
284 | free(comm_event); | 292 | free(comm_event); |
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 2b7e91902f1..cc7b52f9b49 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -135,14 +135,16 @@ typedef union event_union { | |||
135 | void event__print_totals(void); | 135 | void event__print_totals(void); |
136 | 136 | ||
137 | struct perf_session; | 137 | struct perf_session; |
138 | struct thread_map; | ||
138 | 139 | ||
139 | typedef int (*event__handler_synth_t)(event_t *event, | 140 | typedef int (*event__handler_synth_t)(event_t *event, |
140 | struct perf_session *session); | 141 | struct perf_session *session); |
141 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, | 142 | typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, |
142 | struct perf_session *session); | 143 | struct perf_session *session); |
143 | 144 | ||
144 | int event__synthesize_thread(pid_t pid, event__handler_t process, | 145 | int event__synthesize_thread_map(struct thread_map *threads, |
145 | struct perf_session *session); | 146 | event__handler_t process, |
147 | struct perf_session *session); | ||
146 | int event__synthesize_threads(event__handler_t process, | 148 | int event__synthesize_threads(event__handler_t process, |
147 | struct perf_session *session); | 149 | struct perf_session *session); |
148 | int event__synthesize_kernel_mmap(event__handler_t process, | 150 | int event__synthesize_kernel_mmap(event__handler_t process, |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 4c6983de6fd..362a0cb448d 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -72,7 +72,7 @@ int need_reinitialize; | |||
72 | 72 | ||
73 | int num_cpus; | 73 | int num_cpus; |
74 | 74 | ||
75 | typedef struct per_cpu_counters { | 75 | struct counters { |
76 | unsigned long long tsc; /* per thread */ | 76 | unsigned long long tsc; /* per thread */ |
77 | unsigned long long aperf; /* per thread */ | 77 | unsigned long long aperf; /* per thread */ |
78 | unsigned long long mperf; /* per thread */ | 78 | unsigned long long mperf; /* per thread */ |
@@ -88,13 +88,13 @@ typedef struct per_cpu_counters { | |||
88 | int pkg; | 88 | int pkg; |
89 | int core; | 89 | int core; |
90 | int cpu; | 90 | int cpu; |
91 | struct per_cpu_counters *next; | 91 | struct counters *next; |
92 | } PCC; | 92 | }; |
93 | 93 | ||
94 | PCC *pcc_even; | 94 | struct counters *cnt_even; |
95 | PCC *pcc_odd; | 95 | struct counters *cnt_odd; |
96 | PCC *pcc_delta; | 96 | struct counters *cnt_delta; |
97 | PCC *pcc_average; | 97 | struct counters *cnt_average; |
98 | struct timeval tv_even; | 98 | struct timeval tv_even; |
99 | struct timeval tv_odd; | 99 | struct timeval tv_odd; |
100 | struct timeval tv_delta; | 100 | struct timeval tv_delta; |
@@ -125,7 +125,7 @@ unsigned long long get_msr(int cpu, off_t offset) | |||
125 | return msr; | 125 | return msr; |
126 | } | 126 | } |
127 | 127 | ||
128 | void print_header() | 128 | void print_header(void) |
129 | { | 129 | { |
130 | if (show_pkg) | 130 | if (show_pkg) |
131 | fprintf(stderr, "pkg "); | 131 | fprintf(stderr, "pkg "); |
@@ -160,39 +160,39 @@ void print_header() | |||
160 | putc('\n', stderr); | 160 | putc('\n', stderr); |
161 | } | 161 | } |
162 | 162 | ||
163 | void dump_pcc(PCC *pcc) | 163 | void dump_cnt(struct counters *cnt) |
164 | { | 164 | { |
165 | fprintf(stderr, "package: %d ", pcc->pkg); | 165 | fprintf(stderr, "package: %d ", cnt->pkg); |
166 | fprintf(stderr, "core:: %d ", pcc->core); | 166 | fprintf(stderr, "core:: %d ", cnt->core); |
167 | fprintf(stderr, "CPU: %d ", pcc->cpu); | 167 | fprintf(stderr, "CPU: %d ", cnt->cpu); |
168 | fprintf(stderr, "TSC: %016llX\n", pcc->tsc); | 168 | fprintf(stderr, "TSC: %016llX\n", cnt->tsc); |
169 | fprintf(stderr, "c3: %016llX\n", pcc->c3); | 169 | fprintf(stderr, "c3: %016llX\n", cnt->c3); |
170 | fprintf(stderr, "c6: %016llX\n", pcc->c6); | 170 | fprintf(stderr, "c6: %016llX\n", cnt->c6); |
171 | fprintf(stderr, "c7: %016llX\n", pcc->c7); | 171 | fprintf(stderr, "c7: %016llX\n", cnt->c7); |
172 | fprintf(stderr, "aperf: %016llX\n", pcc->aperf); | 172 | fprintf(stderr, "aperf: %016llX\n", cnt->aperf); |
173 | fprintf(stderr, "pc2: %016llX\n", pcc->pc2); | 173 | fprintf(stderr, "pc2: %016llX\n", cnt->pc2); |
174 | fprintf(stderr, "pc3: %016llX\n", pcc->pc3); | 174 | fprintf(stderr, "pc3: %016llX\n", cnt->pc3); |
175 | fprintf(stderr, "pc6: %016llX\n", pcc->pc6); | 175 | fprintf(stderr, "pc6: %016llX\n", cnt->pc6); |
176 | fprintf(stderr, "pc7: %016llX\n", pcc->pc7); | 176 | fprintf(stderr, "pc7: %016llX\n", cnt->pc7); |
177 | fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr); | 177 | fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr); |
178 | } | 178 | } |
179 | 179 | ||
180 | void dump_list(PCC *pcc) | 180 | void dump_list(struct counters *cnt) |
181 | { | 181 | { |
182 | printf("dump_list 0x%p\n", pcc); | 182 | printf("dump_list 0x%p\n", cnt); |
183 | 183 | ||
184 | for (; pcc; pcc = pcc->next) | 184 | for (; cnt; cnt = cnt->next) |
185 | dump_pcc(pcc); | 185 | dump_cnt(cnt); |
186 | } | 186 | } |
187 | 187 | ||
188 | void print_pcc(PCC *p) | 188 | void print_cnt(struct counters *p) |
189 | { | 189 | { |
190 | double interval_float; | 190 | double interval_float; |
191 | 191 | ||
192 | interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; | 192 | interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; |
193 | 193 | ||
194 | /* topology columns, print blanks on 1st (average) line */ | 194 | /* topology columns, print blanks on 1st (average) line */ |
195 | if (p == pcc_average) { | 195 | if (p == cnt_average) { |
196 | if (show_pkg) | 196 | if (show_pkg) |
197 | fprintf(stderr, " "); | 197 | fprintf(stderr, " "); |
198 | if (show_core) | 198 | if (show_core) |
@@ -262,24 +262,24 @@ void print_pcc(PCC *p) | |||
262 | putc('\n', stderr); | 262 | putc('\n', stderr); |
263 | } | 263 | } |
264 | 264 | ||
265 | void print_counters(PCC *cnt) | 265 | void print_counters(struct counters *counters) |
266 | { | 266 | { |
267 | PCC *pcc; | 267 | struct counters *cnt; |
268 | 268 | ||
269 | print_header(); | 269 | print_header(); |
270 | 270 | ||
271 | if (num_cpus > 1) | 271 | if (num_cpus > 1) |
272 | print_pcc(pcc_average); | 272 | print_cnt(cnt_average); |
273 | 273 | ||
274 | for (pcc = cnt; pcc != NULL; pcc = pcc->next) | 274 | for (cnt = counters; cnt != NULL; cnt = cnt->next) |
275 | print_pcc(pcc); | 275 | print_cnt(cnt); |
276 | 276 | ||
277 | } | 277 | } |
278 | 278 | ||
279 | #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) | 279 | #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) |
280 | 280 | ||
281 | 281 | int compute_delta(struct counters *after, | |
282 | int compute_delta(PCC *after, PCC *before, PCC *delta) | 282 | struct counters *before, struct counters *delta) |
283 | { | 283 | { |
284 | int errors = 0; | 284 | int errors = 0; |
285 | int perf_err = 0; | 285 | int perf_err = 0; |
@@ -391,20 +391,20 @@ int compute_delta(PCC *after, PCC *before, PCC *delta) | |||
391 | delta->extra_msr = after->extra_msr; | 391 | delta->extra_msr = after->extra_msr; |
392 | if (errors) { | 392 | if (errors) { |
393 | fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); | 393 | fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); |
394 | dump_pcc(before); | 394 | dump_cnt(before); |
395 | fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); | 395 | fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); |
396 | dump_pcc(after); | 396 | dump_cnt(after); |
397 | errors = 0; | 397 | errors = 0; |
398 | } | 398 | } |
399 | } | 399 | } |
400 | return 0; | 400 | return 0; |
401 | } | 401 | } |
402 | 402 | ||
403 | void compute_average(PCC *delta, PCC *avg) | 403 | void compute_average(struct counters *delta, struct counters *avg) |
404 | { | 404 | { |
405 | PCC *sum; | 405 | struct counters *sum; |
406 | 406 | ||
407 | sum = calloc(1, sizeof(PCC)); | 407 | sum = calloc(1, sizeof(struct counters)); |
408 | if (sum == NULL) { | 408 | if (sum == NULL) { |
409 | perror("calloc sum"); | 409 | perror("calloc sum"); |
410 | exit(1); | 410 | exit(1); |
@@ -438,35 +438,34 @@ void compute_average(PCC *delta, PCC *avg) | |||
438 | free(sum); | 438 | free(sum); |
439 | } | 439 | } |
440 | 440 | ||
441 | void get_counters(PCC *pcc) | 441 | void get_counters(struct counters *cnt) |
442 | { | 442 | { |
443 | for ( ; pcc; pcc = pcc->next) { | 443 | for ( ; cnt; cnt = cnt->next) { |
444 | pcc->tsc = get_msr(pcc->cpu, MSR_TSC); | 444 | cnt->tsc = get_msr(cnt->cpu, MSR_TSC); |
445 | if (do_nhm_cstates) | 445 | if (do_nhm_cstates) |
446 | pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY); | 446 | cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY); |
447 | if (do_nhm_cstates) | 447 | if (do_nhm_cstates) |
448 | pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY); | 448 | cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY); |
449 | if (do_snb_cstates) | 449 | if (do_snb_cstates) |
450 | pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY); | 450 | cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY); |
451 | if (has_aperf) | 451 | if (has_aperf) |
452 | pcc->aperf = get_msr(pcc->cpu, MSR_APERF); | 452 | cnt->aperf = get_msr(cnt->cpu, MSR_APERF); |
453 | if (has_aperf) | 453 | if (has_aperf) |
454 | pcc->mperf = get_msr(pcc->cpu, MSR_MPERF); | 454 | cnt->mperf = get_msr(cnt->cpu, MSR_MPERF); |
455 | if (do_snb_cstates) | 455 | if (do_snb_cstates) |
456 | pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY); | 456 | cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY); |
457 | if (do_nhm_cstates) | 457 | if (do_nhm_cstates) |
458 | pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY); | 458 | cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY); |
459 | if (do_nhm_cstates) | 459 | if (do_nhm_cstates) |
460 | pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY); | 460 | cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY); |
461 | if (do_snb_cstates) | 461 | if (do_snb_cstates) |
462 | pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY); | 462 | cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY); |
463 | if (extra_msr_offset) | 463 | if (extra_msr_offset) |
464 | pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset); | 464 | cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset); |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | 468 | void print_nehalem_info(void) | |
469 | void print_nehalem_info() | ||
470 | { | 469 | { |
471 | unsigned long long msr; | 470 | unsigned long long msr; |
472 | unsigned int ratio; | 471 | unsigned int ratio; |
@@ -514,38 +513,38 @@ void print_nehalem_info() | |||
514 | 513 | ||
515 | } | 514 | } |
516 | 515 | ||
517 | void free_counter_list(PCC *list) | 516 | void free_counter_list(struct counters *list) |
518 | { | 517 | { |
519 | PCC *p; | 518 | struct counters *p; |
520 | 519 | ||
521 | for (p = list; p; ) { | 520 | for (p = list; p; ) { |
522 | PCC *free_me; | 521 | struct counters *free_me; |
523 | 522 | ||
524 | free_me = p; | 523 | free_me = p; |
525 | p = p->next; | 524 | p = p->next; |
526 | free(free_me); | 525 | free(free_me); |
527 | } | 526 | } |
528 | return; | ||
529 | } | 527 | } |
530 | 528 | ||
531 | void free_all_counters(void) | 529 | void free_all_counters(void) |
532 | { | 530 | { |
533 | free_counter_list(pcc_even); | 531 | free_counter_list(cnt_even); |
534 | pcc_even = NULL; | 532 | cnt_even = NULL; |
535 | 533 | ||
536 | free_counter_list(pcc_odd); | 534 | free_counter_list(cnt_odd); |
537 | pcc_odd = NULL; | 535 | cnt_odd = NULL; |
538 | 536 | ||
539 | free_counter_list(pcc_delta); | 537 | free_counter_list(cnt_delta); |
540 | pcc_delta = NULL; | 538 | cnt_delta = NULL; |
541 | 539 | ||
542 | free_counter_list(pcc_average); | 540 | free_counter_list(cnt_average); |
543 | pcc_average = NULL; | 541 | cnt_average = NULL; |
544 | } | 542 | } |
545 | 543 | ||
546 | void insert_cpu_counters(PCC **list, PCC *new) | 544 | void insert_counters(struct counters **list, |
545 | struct counters *new) | ||
547 | { | 546 | { |
548 | PCC *prev; | 547 | struct counters *prev; |
549 | 548 | ||
550 | /* | 549 | /* |
551 | * list was empty | 550 | * list was empty |
@@ -594,18 +593,16 @@ void insert_cpu_counters(PCC **list, PCC *new) | |||
594 | */ | 593 | */ |
595 | new->next = prev->next; | 594 | new->next = prev->next; |
596 | prev->next = new; | 595 | prev->next = new; |
597 | |||
598 | return; | ||
599 | } | 596 | } |
600 | 597 | ||
601 | void alloc_new_cpu_counters(int pkg, int core, int cpu) | 598 | void alloc_new_counters(int pkg, int core, int cpu) |
602 | { | 599 | { |
603 | PCC *new; | 600 | struct counters *new; |
604 | 601 | ||
605 | if (verbose > 1) | 602 | if (verbose > 1) |
606 | printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); | 603 | printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); |
607 | 604 | ||
608 | new = (PCC *)calloc(1, sizeof(PCC)); | 605 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
609 | if (new == NULL) { | 606 | if (new == NULL) { |
610 | perror("calloc"); | 607 | perror("calloc"); |
611 | exit(1); | 608 | exit(1); |
@@ -613,9 +610,10 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
613 | new->pkg = pkg; | 610 | new->pkg = pkg; |
614 | new->core = core; | 611 | new->core = core; |
615 | new->cpu = cpu; | 612 | new->cpu = cpu; |
616 | insert_cpu_counters(&pcc_odd, new); | 613 | insert_counters(&cnt_odd, new); |
617 | 614 | ||
618 | new = (PCC *)calloc(1, sizeof(PCC)); | 615 | new = (struct counters *)calloc(1, |
616 | sizeof(struct counters)); | ||
619 | if (new == NULL) { | 617 | if (new == NULL) { |
620 | perror("calloc"); | 618 | perror("calloc"); |
621 | exit(1); | 619 | exit(1); |
@@ -623,9 +621,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
623 | new->pkg = pkg; | 621 | new->pkg = pkg; |
624 | new->core = core; | 622 | new->core = core; |
625 | new->cpu = cpu; | 623 | new->cpu = cpu; |
626 | insert_cpu_counters(&pcc_even, new); | 624 | insert_counters(&cnt_even, new); |
627 | 625 | ||
628 | new = (PCC *)calloc(1, sizeof(PCC)); | 626 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
629 | if (new == NULL) { | 627 | if (new == NULL) { |
630 | perror("calloc"); | 628 | perror("calloc"); |
631 | exit(1); | 629 | exit(1); |
@@ -633,9 +631,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
633 | new->pkg = pkg; | 631 | new->pkg = pkg; |
634 | new->core = core; | 632 | new->core = core; |
635 | new->cpu = cpu; | 633 | new->cpu = cpu; |
636 | insert_cpu_counters(&pcc_delta, new); | 634 | insert_counters(&cnt_delta, new); |
637 | 635 | ||
638 | new = (PCC *)calloc(1, sizeof(PCC)); | 636 | new = (struct counters *)calloc(1, sizeof(struct counters)); |
639 | if (new == NULL) { | 637 | if (new == NULL) { |
640 | perror("calloc"); | 638 | perror("calloc"); |
641 | exit(1); | 639 | exit(1); |
@@ -643,7 +641,7 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu) | |||
643 | new->pkg = pkg; | 641 | new->pkg = pkg; |
644 | new->core = core; | 642 | new->core = core; |
645 | new->cpu = cpu; | 643 | new->cpu = cpu; |
646 | pcc_average = new; | 644 | cnt_average = new; |
647 | } | 645 | } |
648 | 646 | ||
649 | int get_physical_package_id(int cpu) | 647 | int get_physical_package_id(int cpu) |
@@ -719,7 +717,7 @@ void re_initialize(void) | |||
719 | { | 717 | { |
720 | printf("turbostat: topology changed, re-initializing.\n"); | 718 | printf("turbostat: topology changed, re-initializing.\n"); |
721 | free_all_counters(); | 719 | free_all_counters(); |
722 | num_cpus = for_all_cpus(alloc_new_cpu_counters); | 720 | num_cpus = for_all_cpus(alloc_new_counters); |
723 | need_reinitialize = 0; | 721 | need_reinitialize = 0; |
724 | printf("num_cpus is now %d\n", num_cpus); | 722 | printf("num_cpus is now %d\n", num_cpus); |
725 | } | 723 | } |
@@ -728,7 +726,7 @@ void dummy(int pkg, int core, int cpu) { return; } | |||
728 | /* | 726 | /* |
729 | * check to see if a cpu came on-line | 727 | * check to see if a cpu came on-line |
730 | */ | 728 | */ |
731 | void verify_num_cpus() | 729 | void verify_num_cpus(void) |
732 | { | 730 | { |
733 | int new_num_cpus; | 731 | int new_num_cpus; |
734 | 732 | ||
@@ -740,14 +738,12 @@ void verify_num_cpus() | |||
740 | num_cpus, new_num_cpus); | 738 | num_cpus, new_num_cpus); |
741 | need_reinitialize = 1; | 739 | need_reinitialize = 1; |
742 | } | 740 | } |
743 | |||
744 | return; | ||
745 | } | 741 | } |
746 | 742 | ||
747 | void turbostat_loop() | 743 | void turbostat_loop() |
748 | { | 744 | { |
749 | restart: | 745 | restart: |
750 | get_counters(pcc_even); | 746 | get_counters(cnt_even); |
751 | gettimeofday(&tv_even, (struct timezone *)NULL); | 747 | gettimeofday(&tv_even, (struct timezone *)NULL); |
752 | 748 | ||
753 | while (1) { | 749 | while (1) { |
@@ -757,24 +753,24 @@ restart: | |||
757 | goto restart; | 753 | goto restart; |
758 | } | 754 | } |
759 | sleep(interval_sec); | 755 | sleep(interval_sec); |
760 | get_counters(pcc_odd); | 756 | get_counters(cnt_odd); |
761 | gettimeofday(&tv_odd, (struct timezone *)NULL); | 757 | gettimeofday(&tv_odd, (struct timezone *)NULL); |
762 | 758 | ||
763 | compute_delta(pcc_odd, pcc_even, pcc_delta); | 759 | compute_delta(cnt_odd, cnt_even, cnt_delta); |
764 | timersub(&tv_odd, &tv_even, &tv_delta); | 760 | timersub(&tv_odd, &tv_even, &tv_delta); |
765 | compute_average(pcc_delta, pcc_average); | 761 | compute_average(cnt_delta, cnt_average); |
766 | print_counters(pcc_delta); | 762 | print_counters(cnt_delta); |
767 | if (need_reinitialize) { | 763 | if (need_reinitialize) { |
768 | re_initialize(); | 764 | re_initialize(); |
769 | goto restart; | 765 | goto restart; |
770 | } | 766 | } |
771 | sleep(interval_sec); | 767 | sleep(interval_sec); |
772 | get_counters(pcc_even); | 768 | get_counters(cnt_even); |
773 | gettimeofday(&tv_even, (struct timezone *)NULL); | 769 | gettimeofday(&tv_even, (struct timezone *)NULL); |
774 | compute_delta(pcc_even, pcc_odd, pcc_delta); | 770 | compute_delta(cnt_even, cnt_odd, cnt_delta); |
775 | timersub(&tv_even, &tv_odd, &tv_delta); | 771 | timersub(&tv_even, &tv_odd, &tv_delta); |
776 | compute_average(pcc_delta, pcc_average); | 772 | compute_average(cnt_delta, cnt_average); |
777 | print_counters(pcc_delta); | 773 | print_counters(cnt_delta); |
778 | } | 774 | } |
779 | } | 775 | } |
780 | 776 | ||
@@ -892,7 +888,7 @@ void check_cpuid() | |||
892 | * this check is valid for both Intel and AMD | 888 | * this check is valid for both Intel and AMD |
893 | */ | 889 | */ |
894 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); | 890 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); |
895 | has_invariant_tsc = edx && (1 << 8); | 891 | has_invariant_tsc = edx & (1 << 8); |
896 | 892 | ||
897 | if (!has_invariant_tsc) { | 893 | if (!has_invariant_tsc) { |
898 | fprintf(stderr, "No invariant TSC\n"); | 894 | fprintf(stderr, "No invariant TSC\n"); |
@@ -905,7 +901,7 @@ void check_cpuid() | |||
905 | */ | 901 | */ |
906 | 902 | ||
907 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); | 903 | asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); |
908 | has_aperf = ecx && (1 << 0); | 904 | has_aperf = ecx & (1 << 0); |
909 | if (!has_aperf) { | 905 | if (!has_aperf) { |
910 | fprintf(stderr, "No APERF MSR\n"); | 906 | fprintf(stderr, "No APERF MSR\n"); |
911 | exit(1); | 907 | exit(1); |
@@ -952,7 +948,7 @@ void turbostat_init() | |||
952 | check_dev_msr(); | 948 | check_dev_msr(); |
953 | check_super_user(); | 949 | check_super_user(); |
954 | 950 | ||
955 | num_cpus = for_all_cpus(alloc_new_cpu_counters); | 951 | num_cpus = for_all_cpus(alloc_new_counters); |
956 | 952 | ||
957 | if (verbose) | 953 | if (verbose) |
958 | print_nehalem_info(); | 954 | print_nehalem_info(); |
@@ -962,7 +958,7 @@ int fork_it(char **argv) | |||
962 | { | 958 | { |
963 | int retval; | 959 | int retval; |
964 | pid_t child_pid; | 960 | pid_t child_pid; |
965 | get_counters(pcc_even); | 961 | get_counters(cnt_even); |
966 | gettimeofday(&tv_even, (struct timezone *)NULL); | 962 | gettimeofday(&tv_even, (struct timezone *)NULL); |
967 | 963 | ||
968 | child_pid = fork(); | 964 | child_pid = fork(); |
@@ -985,14 +981,14 @@ int fork_it(char **argv) | |||
985 | exit(1); | 981 | exit(1); |
986 | } | 982 | } |
987 | } | 983 | } |
988 | get_counters(pcc_odd); | 984 | get_counters(cnt_odd); |
989 | gettimeofday(&tv_odd, (struct timezone *)NULL); | 985 | gettimeofday(&tv_odd, (struct timezone *)NULL); |
990 | retval = compute_delta(pcc_odd, pcc_even, pcc_delta); | 986 | retval = compute_delta(cnt_odd, cnt_even, cnt_delta); |
991 | 987 | ||
992 | timersub(&tv_odd, &tv_even, &tv_delta); | 988 | timersub(&tv_odd, &tv_even, &tv_delta); |
993 | compute_average(pcc_delta, pcc_average); | 989 | compute_average(cnt_delta, cnt_average); |
994 | if (!retval) | 990 | if (!retval) |
995 | print_counters(pcc_delta); | 991 | print_counters(cnt_delta); |
996 | 992 | ||
997 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; | 993 | fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; |
998 | 994 | ||