diff options
120 files changed, 1028 insertions, 546 deletions
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 3 | 1 | VERSION = 3 |
| 2 | PATCHLEVEL = 10 | 2 | PATCHLEVEL = 10 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = -rc7 |
| 5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 49d993cee512..2651b1da1c56 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369 | |||
| 1189 | is not correctly implemented in PL310 as clean lines are not | 1189 | is not correctly implemented in PL310 as clean lines are not |
| 1190 | invalidated as a result of these operations. | 1190 | invalidated as a result of these operations. |
| 1191 | 1191 | ||
| 1192 | config ARM_ERRATA_643719 | ||
| 1193 | bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" | ||
| 1194 | depends on CPU_V7 && SMP | ||
| 1195 | help | ||
| 1196 | This option enables the workaround for the 643719 Cortex-A9 (prior to | ||
| 1197 | r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR | ||
| 1198 | register returns zero when it should return one. The workaround | ||
| 1199 | corrects this value, ensuring cache maintenance operations which use | ||
| 1200 | it behave as intended and avoiding data corruption. | ||
| 1201 | |||
| 1192 | config ARM_ERRATA_720789 | 1202 | config ARM_ERRATA_720789 |
| 1193 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" | 1203 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" |
| 1194 | depends on CPU_V7 | 1204 | depends on CPU_V7 |
| @@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR | |||
| 2006 | 2016 | ||
| 2007 | config KEXEC | 2017 | config KEXEC |
| 2008 | bool "Kexec system call (EXPERIMENTAL)" | 2018 | bool "Kexec system call (EXPERIMENTAL)" |
| 2009 | depends on (!SMP || HOTPLUG_CPU) | 2019 | depends on (!SMP || PM_SLEEP_SMP) |
| 2010 | help | 2020 | help |
| 2011 | kexec is a system call that implements the ability to shutdown your | 2021 | kexec is a system call that implements the ability to shutdown your |
| 2012 | current kernel, and to start another kernel. It is like a reboot | 2022 | current kernel, and to start another kernel. It is like a reboot |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 79e9bdbfc491..120b83bfde20 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
| @@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \ | |||
| 116 | 116 | ||
| 117 | # Make sure files are removed during clean | 117 | # Make sure files are removed during clean |
| 118 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ | 118 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ |
| 119 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) | 119 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ |
| 120 | hyp-stub.S | ||
| 120 | 121 | ||
| 121 | ifeq ($(CONFIG_FUNCTION_TRACER),y) | 122 | ifeq ($(CONFIG_FUNCTION_TRACER),y) |
| 122 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 123 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index d1650fb34c0a..ded558bb0f3b 100644 --- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi | |||
| @@ -763,7 +763,7 @@ | |||
| 763 | }; | 763 | }; |
| 764 | }; | 764 | }; |
| 765 | 765 | ||
| 766 | pinctrl@03680000 { | 766 | pinctrl@03860000 { |
| 767 | gpz: gpz { | 767 | gpz: gpz { |
| 768 | gpio-controller; | 768 | gpio-controller; |
| 769 | #gpio-cells = <2>; | 769 | #gpio-cells = <2>; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 0673524238a6..fc9fb3d526e2 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
| @@ -161,9 +161,9 @@ | |||
| 161 | interrupts = <0 50 0>; | 161 | interrupts = <0 50 0>; |
| 162 | }; | 162 | }; |
| 163 | 163 | ||
| 164 | pinctrl_3: pinctrl@03680000 { | 164 | pinctrl_3: pinctrl@03860000 { |
| 165 | compatible = "samsung,exynos5250-pinctrl"; | 165 | compatible = "samsung,exynos5250-pinctrl"; |
| 166 | reg = <0x0368000 0x1000>; | 166 | reg = <0x03860000 0x1000>; |
| 167 | interrupts = <0 47 0>; | 167 | interrupts = <0 47 0>; |
| 168 | }; | 168 | }; |
| 169 | 169 | ||
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bff71388e72a..17d0ae8672fa 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
| @@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, | |||
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 322 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
| 323 | static inline void flush_kernel_dcache_page(struct page *page) | 323 | extern void flush_kernel_dcache_page(struct page *); |
| 324 | { | ||
| 325 | } | ||
| 326 | 324 | ||
| 327 | #define flush_dcache_mmap_lock(mapping) \ | 325 | #define flush_dcache_mmap_lock(mapping) \ |
| 328 | spin_lock_irq(&(mapping)->tree_lock) | 326 | spin_lock_irq(&(mapping)->tree_lock) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8ef8c9337809..4fb074c446bf 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
| @@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image) | |||
| 134 | unsigned long reboot_code_buffer_phys; | 134 | unsigned long reboot_code_buffer_phys; |
| 135 | void *reboot_code_buffer; | 135 | void *reboot_code_buffer; |
| 136 | 136 | ||
| 137 | if (num_online_cpus() > 1) { | ||
| 138 | pr_err("kexec: error: multiple CPUs still online\n"); | ||
| 139 | return; | ||
| 140 | } | ||
| 137 | 141 | ||
| 138 | page_list = image->head & PAGE_MASK; | 142 | page_list = image->head & PAGE_MASK; |
| 139 | 143 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 282de4826abb..6e8931ccf13e 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -184,30 +184,61 @@ int __init reboot_setup(char *str) | |||
| 184 | 184 | ||
| 185 | __setup("reboot=", reboot_setup); | 185 | __setup("reboot=", reboot_setup); |
| 186 | 186 | ||
| 187 | /* | ||
| 188 | * Called by kexec, immediately prior to machine_kexec(). | ||
| 189 | * | ||
| 190 | * This must completely disable all secondary CPUs; simply causing those CPUs | ||
| 191 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | ||
| 192 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | ||
| 193 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | ||
| 194 | * functionality embodied in disable_nonboot_cpus() to achieve this. | ||
| 195 | */ | ||
| 187 | void machine_shutdown(void) | 196 | void machine_shutdown(void) |
| 188 | { | 197 | { |
| 189 | #ifdef CONFIG_SMP | 198 | disable_nonboot_cpus(); |
| 190 | smp_send_stop(); | ||
| 191 | #endif | ||
| 192 | } | 199 | } |
| 193 | 200 | ||
| 201 | /* | ||
| 202 | * Halting simply requires that the secondary CPUs stop performing any | ||
| 203 | * activity (executing tasks, handling interrupts). smp_send_stop() | ||
| 204 | * achieves this. | ||
| 205 | */ | ||
| 194 | void machine_halt(void) | 206 | void machine_halt(void) |
| 195 | { | 207 | { |
| 196 | machine_shutdown(); | 208 | smp_send_stop(); |
| 209 | |||
| 197 | local_irq_disable(); | 210 | local_irq_disable(); |
| 198 | while (1); | 211 | while (1); |
| 199 | } | 212 | } |
| 200 | 213 | ||
| 214 | /* | ||
| 215 | * Power-off simply requires that the secondary CPUs stop performing any | ||
| 216 | * activity (executing tasks, handling interrupts). smp_send_stop() | ||
| 217 | * achieves this. When the system power is turned off, it will take all CPUs | ||
| 218 | * with it. | ||
| 219 | */ | ||
| 201 | void machine_power_off(void) | 220 | void machine_power_off(void) |
| 202 | { | 221 | { |
| 203 | machine_shutdown(); | 222 | smp_send_stop(); |
| 223 | |||
| 204 | if (pm_power_off) | 224 | if (pm_power_off) |
| 205 | pm_power_off(); | 225 | pm_power_off(); |
| 206 | } | 226 | } |
| 207 | 227 | ||
| 228 | /* | ||
| 229 | * Restart requires that the secondary CPUs stop performing any activity | ||
| 230 | * while the primary CPU resets the system. Systems with a single CPU can | ||
| 231 | * use soft_restart() as their machine descriptor's .restart hook, since that | ||
| 232 | * will cause the only available CPU to reset. Systems with multiple CPUs must | ||
| 233 | * provide a HW restart implementation, to ensure that all CPUs reset at once. | ||
| 234 | * This is required so that any code running after reset on the primary CPU | ||
| 235 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | ||
| 236 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | ||
| 237 | * to use. Implementing such co-ordination would be essentially impossible. | ||
| 238 | */ | ||
| 208 | void machine_restart(char *cmd) | 239 | void machine_restart(char *cmd) |
| 209 | { | 240 | { |
| 210 | machine_shutdown(); | 241 | smp_send_stop(); |
| 211 | 242 | ||
| 212 | arm_pm_restart(reboot_mode, cmd); | 243 | arm_pm_restart(reboot_mode, cmd); |
| 213 | 244 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 550d63cef68e..5919eb451bb9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu) | |||
| 651 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | 651 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
| 652 | } | 652 | } |
| 653 | 653 | ||
| 654 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 655 | static void smp_kill_cpus(cpumask_t *mask) | ||
| 656 | { | ||
| 657 | unsigned int cpu; | ||
| 658 | for_each_cpu(cpu, mask) | ||
| 659 | platform_cpu_kill(cpu); | ||
| 660 | } | ||
| 661 | #else | ||
| 662 | static void smp_kill_cpus(cpumask_t *mask) { } | ||
| 663 | #endif | ||
| 664 | |||
| 665 | void smp_send_stop(void) | 654 | void smp_send_stop(void) |
| 666 | { | 655 | { |
| 667 | unsigned long timeout; | 656 | unsigned long timeout; |
| @@ -679,8 +668,6 @@ void smp_send_stop(void) | |||
| 679 | 668 | ||
| 680 | if (num_online_cpus() > 1) | 669 | if (num_online_cpus() > 1) |
| 681 | pr_warning("SMP: failed to stop secondary CPUs\n"); | 670 | pr_warning("SMP: failed to stop secondary CPUs\n"); |
| 682 | |||
| 683 | smp_kill_cpus(&mask); | ||
| 684 | } | 671 | } |
| 685 | 672 | ||
| 686 | /* | 673 | /* |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 15451ee4acc8..515b00064da8 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
| @@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis) | |||
| 92 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr | 92 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr |
| 93 | ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr | 93 | ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr |
| 94 | ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr | 94 | ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr |
| 95 | #ifdef CONFIG_ARM_ERRATA_643719 | ||
| 96 | ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register | ||
| 97 | ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do | ||
| 98 | ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? | ||
| 99 | biceq r2, r2, #0x0000000f @ clear minor revision number | ||
| 100 | teqeq r2, r1 @ test for errata affected core and if so... | ||
| 101 | orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') | ||
| 102 | #endif | ||
| 95 | ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 | 103 | ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 |
| 96 | ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 | 104 | ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 |
| 97 | moveq pc, lr @ return if level == 0 | 105 | moveq pc, lr @ return if level == 0 |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0d473cce501c..32aa5861119f 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page) | |||
| 301 | EXPORT_SYMBOL(flush_dcache_page); | 301 | EXPORT_SYMBOL(flush_dcache_page); |
| 302 | 302 | ||
| 303 | /* | 303 | /* |
| 304 | * Ensure cache coherency for the kernel mapping of this page. We can | ||
| 305 | * assume that the page is pinned via kmap. | ||
| 306 | * | ||
| 307 | * If the page only exists in the page cache and there are no user | ||
| 308 | * space mappings, this is a no-op since the page was already marked | ||
| 309 | * dirty at creation. Otherwise, we need to flush the dirty kernel | ||
| 310 | * cache lines directly. | ||
| 311 | */ | ||
| 312 | void flush_kernel_dcache_page(struct page *page) | ||
| 313 | { | ||
| 314 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { | ||
| 315 | struct address_space *mapping; | ||
| 316 | |||
| 317 | mapping = page_mapping(page); | ||
| 318 | |||
| 319 | if (!mapping || mapping_mapped(mapping)) { | ||
| 320 | void *addr; | ||
| 321 | |||
| 322 | addr = page_address(page); | ||
| 323 | /* | ||
| 324 | * kmap_atomic() doesn't set the page virtual | ||
| 325 | * address for highmem pages, and | ||
| 326 | * kunmap_atomic() takes care of cache | ||
| 327 | * flushing already. | ||
| 328 | */ | ||
| 329 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) | ||
| 330 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
| 331 | } | ||
| 332 | } | ||
| 333 | } | ||
| 334 | EXPORT_SYMBOL(flush_kernel_dcache_page); | ||
| 335 | |||
| 336 | /* | ||
| 304 | * Flush an anonymous page so that users of get_user_pages() | 337 | * Flush an anonymous page so that users of get_user_pages() |
| 305 | * can safely access the data. The expected sequence is: | 338 | * can safely access the data. The expected sequence is: |
| 306 | * | 339 | * |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565671a6..4d409e6a552d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
| 616 | } while (pte++, addr += PAGE_SIZE, addr != end); | 616 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 617 | } | 617 | } |
| 618 | 618 | ||
| 619 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, | 619 | static void __init __map_init_section(pmd_t *pmd, unsigned long addr, |
| 620 | unsigned long end, phys_addr_t phys, | 620 | unsigned long end, phys_addr_t phys, |
| 621 | const struct mem_type *type) | 621 | const struct mem_type *type) |
| 622 | { | 622 | { |
| 623 | pmd_t *p = pmd; | ||
| 624 | |||
| 623 | #ifndef CONFIG_ARM_LPAE | 625 | #ifndef CONFIG_ARM_LPAE |
| 624 | /* | 626 | /* |
| 625 | * In classic MMU format, puds and pmds are folded in to | 627 | * In classic MMU format, puds and pmds are folded in to |
| @@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr, | |||
| 638 | phys += SECTION_SIZE; | 640 | phys += SECTION_SIZE; |
| 639 | } while (pmd++, addr += SECTION_SIZE, addr != end); | 641 | } while (pmd++, addr += SECTION_SIZE, addr != end); |
| 640 | 642 | ||
| 641 | flush_pmd_entry(pmd); | 643 | flush_pmd_entry(p); |
| 642 | } | 644 | } |
| 643 | 645 | ||
| 644 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | 646 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
| @@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
| 661 | */ | 663 | */ |
| 662 | if (type->prot_sect && | 664 | if (type->prot_sect && |
| 663 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | 665 | ((addr | next | phys) & ~SECTION_MASK) == 0) { |
| 664 | map_init_section(pmd, addr, next, phys, type); | 666 | __map_init_section(pmd, addr, next, phys, type); |
| 665 | } else { | 667 | } else { |
| 666 | alloc_init_pte(pmd, addr, next, | 668 | alloc_init_pte(pmd, addr, next, |
| 667 | __phys_to_pfn(phys), type); | 669 | __phys_to_pfn(phys), type); |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 2c73a7301ff7..4c8c9c10a388 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -409,8 +409,8 @@ __v7_ca9mp_proc_info: | |||
| 409 | */ | 409 | */ |
| 410 | .type __v7_pj4b_proc_info, #object | 410 | .type __v7_pj4b_proc_info, #object |
| 411 | __v7_pj4b_proc_info: | 411 | __v7_pj4b_proc_info: |
| 412 | .long 0x562f5840 | 412 | .long 0x560f5800 |
| 413 | .long 0xfffffff0 | 413 | .long 0xff0fff00 |
| 414 | __v7_proc __v7_pj4b_setup | 414 | __v7_proc __v7_pj4b_setup |
| 415 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info | 415 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info |
| 416 | 416 | ||
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1e49e5eb81e9..9ba33c40cdf8 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
| @@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
| 1336 | return; | 1336 | return; |
| 1337 | } | 1337 | } |
| 1338 | 1338 | ||
| 1339 | perf_callchain_store(entry, regs->pc); | ||
| 1339 | tail = (struct frame_tail __user *)regs->regs[29]; | 1340 | tail = (struct frame_tail __user *)regs->regs[29]; |
| 1340 | 1341 | ||
| 1341 | while (entry->nr < PERF_MAX_STACK_DEPTH && | 1342 | while (entry->nr < PERF_MAX_STACK_DEPTH && |
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h index f545477e61f3..471f481e67f3 100644 --- a/arch/metag/include/asm/hugetlb.h +++ b/arch/metag/include/asm/hugetlb.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _ASM_METAG_HUGETLB_H | 2 | #define _ASM_METAG_HUGETLB_H |
| 3 | 3 | ||
| 4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
| 5 | #include <asm-generic/hugetlb.h> | ||
| 5 | 6 | ||
| 6 | 7 | ||
| 7 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 8 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h index 678f68d5f37b..8730c0a3c37d 100644 --- a/arch/mn10300/include/asm/irqflags.h +++ b/arch/mn10300/include/asm/irqflags.h | |||
| @@ -13,9 +13,8 @@ | |||
| 13 | #define _ASM_IRQFLAGS_H | 13 | #define _ASM_IRQFLAGS_H |
| 14 | 14 | ||
| 15 | #include <asm/cpu-regs.h> | 15 | #include <asm/cpu-regs.h> |
| 16 | #ifndef __ASSEMBLY__ | 16 | /* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ |
| 17 | #include <linux/smp.h> | 17 | #include <asm/smp.h> |
| 18 | #endif | ||
| 19 | 18 | ||
| 20 | /* | 19 | /* |
| 21 | * interrupt control | 20 | * interrupt control |
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h index 6745dbe64944..56c42417d428 100644 --- a/arch/mn10300/include/asm/smp.h +++ b/arch/mn10300/include/asm/smp.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #ifndef __ASSEMBLY__ | 24 | #ifndef __ASSEMBLY__ |
| 25 | #include <linux/threads.h> | 25 | #include <linux/threads.h> |
| 26 | #include <linux/cpumask.h> | 26 | #include <linux/cpumask.h> |
| 27 | #include <linux/thread_info.h> | ||
| 27 | #endif | 28 | #endif |
| 28 | 29 | ||
| 29 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
| @@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map; | |||
| 85 | extern void smp_init_cpus(void); | 86 | extern void smp_init_cpus(void); |
| 86 | extern void smp_cache_interrupt(void); | 87 | extern void smp_cache_interrupt(void); |
| 87 | extern void send_IPI_allbutself(int irq); | 88 | extern void send_IPI_allbutself(int irq); |
| 88 | extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); | 89 | extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); |
| 89 | 90 | ||
| 90 | extern void arch_send_call_function_single_ipi(int cpu); | 91 | extern void arch_send_call_function_single_ipi(int cpu); |
| 91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 92 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
| @@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu); | |||
| 100 | #ifndef __ASSEMBLY__ | 101 | #ifndef __ASSEMBLY__ |
| 101 | 102 | ||
| 102 | static inline void smp_init_cpus(void) {} | 103 | static inline void smp_init_cpus(void) {} |
| 104 | #define raw_smp_processor_id() 0 | ||
| 103 | 105 | ||
| 104 | #endif /* __ASSEMBLY__ */ | 106 | #endif /* __ASSEMBLY__ */ |
| 105 | #endif /* CONFIG_SMP */ | 107 | #endif /* CONFIG_SMP */ |
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index cc50d33b7b88..b6b34a0987e7 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h | |||
| @@ -27,7 +27,7 @@ extern struct node_map_data node_data[]; | |||
| 27 | 27 | ||
| 28 | #define PFNNID_SHIFT (30 - PAGE_SHIFT) | 28 | #define PFNNID_SHIFT (30 - PAGE_SHIFT) |
| 29 | #define PFNNID_MAP_MAX 512 /* support 512GB */ | 29 | #define PFNNID_MAP_MAX 512 /* support 512GB */ |
| 30 | extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; | 30 | extern signed char pfnnid_map[PFNNID_MAP_MAX]; |
| 31 | 31 | ||
| 32 | #ifndef CONFIG_64BIT | 32 | #ifndef CONFIG_64BIT |
| 33 | #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) | 33 | #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) |
| @@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
| 46 | i = pfn >> PFNNID_SHIFT; | 46 | i = pfn >> PFNNID_SHIFT; |
| 47 | BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); | 47 | BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); |
| 48 | 48 | ||
| 49 | return (int)pfnnid_map[i]; | 49 | return pfnnid_map[i]; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static inline int pfn_valid(int pfn) | 52 | static inline int pfn_valid(int pfn) |
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3234f492d575..465154076d23 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h | |||
| @@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
| 225 | return channel ? 15 : 14; | 225 | return channel ? 15 : 14; |
| 226 | } | 226 | } |
| 227 | 227 | ||
| 228 | #define HAVE_PCI_MMAP | ||
| 229 | |||
| 230 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
| 231 | enum pci_mmap_state mmap_state, int write_combine); | ||
| 232 | |||
| 228 | #endif /* __ASM_PARISC_PCI_H */ | 233 | #endif /* __ASM_PARISC_PCI_H */ |
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 9e2d2e408529..872275659d98 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c | |||
| @@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = { | |||
| 1205 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, | 1205 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, |
| 1206 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, | 1206 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, |
| 1207 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, | 1207 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, |
| 1208 | {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, | ||
| 1208 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, | 1209 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, |
| 1209 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, | 1210 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, |
| 1210 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, | 1211 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 36d7f402e48e..b743a80eaba0 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
| @@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) | |||
| 860 | #endif | 860 | #endif |
| 861 | 861 | ||
| 862 | ldil L%dcache_stride, %r1 | 862 | ldil L%dcache_stride, %r1 |
| 863 | ldw R%dcache_stride(%r1), %r1 | 863 | ldw R%dcache_stride(%r1), r31 |
| 864 | 864 | ||
| 865 | #ifdef CONFIG_64BIT | 865 | #ifdef CONFIG_64BIT |
| 866 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 | 866 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 |
| @@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) | |||
| 868 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 | 868 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 |
| 869 | #endif | 869 | #endif |
| 870 | add %r28, %r25, %r25 | 870 | add %r28, %r25, %r25 |
| 871 | sub %r25, %r1, %r25 | 871 | sub %r25, r31, %r25 |
| 872 | 872 | ||
| 873 | 873 | ||
| 874 | 1: fdc,m %r1(%r28) | 874 | 1: fdc,m r31(%r28) |
| 875 | fdc,m %r1(%r28) | 875 | fdc,m r31(%r28) |
| 876 | fdc,m %r1(%r28) | 876 | fdc,m r31(%r28) |
| 877 | fdc,m %r1(%r28) | 877 | fdc,m r31(%r28) |
| 878 | fdc,m %r1(%r28) | 878 | fdc,m r31(%r28) |
| 879 | fdc,m %r1(%r28) | 879 | fdc,m r31(%r28) |
| 880 | fdc,m %r1(%r28) | 880 | fdc,m r31(%r28) |
| 881 | fdc,m %r1(%r28) | 881 | fdc,m r31(%r28) |
| 882 | fdc,m %r1(%r28) | 882 | fdc,m r31(%r28) |
| 883 | fdc,m %r1(%r28) | 883 | fdc,m r31(%r28) |
| 884 | fdc,m %r1(%r28) | 884 | fdc,m r31(%r28) |
| 885 | fdc,m %r1(%r28) | 885 | fdc,m r31(%r28) |
| 886 | fdc,m %r1(%r28) | 886 | fdc,m r31(%r28) |
| 887 | fdc,m %r1(%r28) | 887 | fdc,m r31(%r28) |
| 888 | fdc,m %r1(%r28) | 888 | fdc,m r31(%r28) |
| 889 | cmpb,COND(<<) %r28, %r25,1b | 889 | cmpb,COND(<<) %r28, %r25,1b |
| 890 | fdc,m %r1(%r28) | 890 | fdc,m r31(%r28) |
| 891 | 891 | ||
| 892 | sync | 892 | sync |
| 893 | 893 | ||
| @@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) | |||
| 936 | #endif | 936 | #endif |
| 937 | 937 | ||
| 938 | ldil L%icache_stride, %r1 | 938 | ldil L%icache_stride, %r1 |
| 939 | ldw R%icache_stride(%r1), %r1 | 939 | ldw R%icache_stride(%r1), %r31 |
| 940 | 940 | ||
| 941 | #ifdef CONFIG_64BIT | 941 | #ifdef CONFIG_64BIT |
| 942 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 | 942 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 |
| @@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) | |||
| 944 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 | 944 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 |
| 945 | #endif | 945 | #endif |
| 946 | add %r28, %r25, %r25 | 946 | add %r28, %r25, %r25 |
| 947 | sub %r25, %r1, %r25 | 947 | sub %r25, %r31, %r25 |
| 948 | 948 | ||
| 949 | 949 | ||
| 950 | /* fic only has the type 26 form on PA1.1, requiring an | 950 | /* fic only has the type 26 form on PA1.1, requiring an |
| 951 | * explicit space specification, so use %sr4 */ | 951 | * explicit space specification, so use %sr4 */ |
| 952 | 1: fic,m %r1(%sr4,%r28) | 952 | 1: fic,m %r31(%sr4,%r28) |
| 953 | fic,m %r1(%sr4,%r28) | 953 | fic,m %r31(%sr4,%r28) |
| 954 | fic,m %r1(%sr4,%r28) | 954 | fic,m %r31(%sr4,%r28) |
| 955 | fic,m %r1(%sr4,%r28) | 955 | fic,m %r31(%sr4,%r28) |
| 956 | fic,m %r1(%sr4,%r28) | 956 | fic,m %r31(%sr4,%r28) |
| 957 | fic,m %r1(%sr4,%r28) | 957 | fic,m %r31(%sr4,%r28) |
| 958 | fic,m %r1(%sr4,%r28) | 958 | fic,m %r31(%sr4,%r28) |
| 959 | fic,m %r1(%sr4,%r28) | 959 | fic,m %r31(%sr4,%r28) |
| 960 | fic,m %r1(%sr4,%r28) | 960 | fic,m %r31(%sr4,%r28) |
| 961 | fic,m %r1(%sr4,%r28) | 961 | fic,m %r31(%sr4,%r28) |
| 962 | fic,m %r1(%sr4,%r28) | 962 | fic,m %r31(%sr4,%r28) |
| 963 | fic,m %r1(%sr4,%r28) | 963 | fic,m %r31(%sr4,%r28) |
| 964 | fic,m %r1(%sr4,%r28) | 964 | fic,m %r31(%sr4,%r28) |
| 965 | fic,m %r1(%sr4,%r28) | 965 | fic,m %r31(%sr4,%r28) |
| 966 | fic,m %r1(%sr4,%r28) | 966 | fic,m %r31(%sr4,%r28) |
| 967 | cmpb,COND(<<) %r28, %r25,1b | 967 | cmpb,COND(<<) %r28, %r25,1b |
| 968 | fic,m %r1(%sr4,%r28) | 968 | fic,m %r31(%sr4,%r28) |
| 969 | 969 | ||
| 970 | sync | 970 | sync |
| 971 | 971 | ||
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 60309051875e..64f2764a8cef 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c | |||
| @@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | 222 | ||
| 223 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
| 224 | enum pci_mmap_state mmap_state, int write_combine) | ||
| 225 | { | ||
| 226 | unsigned long prot; | ||
| 227 | |||
| 228 | /* | ||
| 229 | * I/O space can be accessed via normal processor loads and stores on | ||
| 230 | * this platform but for now we elect not to do this and portable | ||
| 231 | * drivers should not do this anyway. | ||
| 232 | */ | ||
| 233 | if (mmap_state == pci_mmap_io) | ||
| 234 | return -EINVAL; | ||
| 235 | |||
| 236 | if (write_combine) | ||
| 237 | return -EINVAL; | ||
| 238 | |||
| 239 | /* | ||
| 240 | * Ignore write-combine; for now only return uncached mappings. | ||
| 241 | */ | ||
| 242 | prot = pgprot_val(vma->vm_page_prot); | ||
| 243 | prot |= _PAGE_NO_CACHE; | ||
| 244 | vma->vm_page_prot = __pgprot(prot); | ||
| 245 | |||
| 246 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
| 247 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
| 248 | } | ||
| 249 | |||
| 223 | /* | 250 | /* |
| 224 | * A driver is enabling the device. We make sure that all the appropriate | 251 | * A driver is enabling the device. We make sure that all the appropriate |
| 225 | * bits are set to allow the device to operate as the driver is expecting. | 252 | * bits are set to allow the device to operate as the driver is expecting. |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 1c965642068b..505b56c6b9b9 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt | |||
| 47 | 47 | ||
| 48 | #ifdef CONFIG_DISCONTIGMEM | 48 | #ifdef CONFIG_DISCONTIGMEM |
| 49 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; | 49 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; |
| 50 | unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; | 50 | signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; |
| 51 | #endif | 51 | #endif |
| 52 | 52 | ||
| 53 | static struct resource data_resource = { | 53 | static struct resource data_resource = { |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 5cd7ad0c1176..1a1b51189773 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 673 | ret = s; | 673 | ret = s; |
| 674 | goto out; | 674 | goto out; |
| 675 | } | 675 | } |
| 676 | kvmppc_lazy_ee_enable(); | ||
| 677 | 676 | ||
| 678 | kvm_guest_enter(); | 677 | kvm_guest_enter(); |
| 679 | 678 | ||
| @@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 699 | kvmppc_load_guest_fp(vcpu); | 698 | kvmppc_load_guest_fp(vcpu); |
| 700 | #endif | 699 | #endif |
| 701 | 700 | ||
| 701 | kvmppc_lazy_ee_enable(); | ||
| 702 | |||
| 702 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 703 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
| 703 | 704 | ||
| 704 | /* No need for kvm_guest_exit. It's done in handle_exit. | 705 | /* No need for kvm_guest_exit. It's done in handle_exit. |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 237c8e5f2640..77fdd2cef33b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
| 592 | do { | 592 | do { |
| 593 | pmd = pmd_offset(pud, addr); | 593 | pmd = pmd_offset(pud, addr); |
| 594 | next = pmd_addr_end(addr, end); | 594 | next = pmd_addr_end(addr, end); |
| 595 | if (pmd_none_or_clear_bad(pmd)) | 595 | if (!is_hugepd(pmd)) { |
| 596 | /* | ||
| 597 | * if it is not hugepd pointer, we should already find | ||
| 598 | * it cleared. | ||
| 599 | */ | ||
| 600 | WARN_ON(!pmd_none_or_clear_bad(pmd)); | ||
| 596 | continue; | 601 | continue; |
| 602 | } | ||
| 597 | #ifdef CONFIG_PPC_FSL_BOOK3E | 603 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 598 | /* | 604 | /* |
| 599 | * Increment next by the size of the huge mapping since | 605 | * Increment next by the size of the huge mapping since |
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index ff18e3cfb6b1..7e4a97fbded4 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
| @@ -6,6 +6,7 @@ generic-y += cputime.h | |||
| 6 | generic-y += div64.h | 6 | generic-y += div64.h |
| 7 | generic-y += emergency-restart.h | 7 | generic-y += emergency-restart.h |
| 8 | generic-y += exec.h | 8 | generic-y += exec.h |
| 9 | generic-y += linkage.h | ||
| 9 | generic-y += local64.h | 10 | generic-y += local64.h |
| 10 | generic-y += mutex.h | 11 | generic-y += mutex.h |
| 11 | generic-y += irq_regs.h | 12 | generic-y += irq_regs.h |
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 15a716934e4d..b836e9297f2a 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h | |||
| @@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void) | |||
| 135 | 135 | ||
| 136 | #ifdef CONFIG_SMP | 136 | #ifdef CONFIG_SMP |
| 137 | # define LEON3_IRQ_IPI_DEFAULT 13 | 137 | # define LEON3_IRQ_IPI_DEFAULT 13 |
| 138 | # define LEON3_IRQ_TICKER (leon3_ticker_irq) | 138 | # define LEON3_IRQ_TICKER (leon3_gptimer_irq) |
| 139 | # define LEON3_IRQ_CROSS_CALL 15 | 139 | # define LEON3_IRQ_CROSS_CALL 15 |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h index f3034eddf468..24ec48c3ff90 100644 --- a/arch/sparc/include/asm/leon_amba.h +++ b/arch/sparc/include/asm/leon_amba.h | |||
| @@ -47,6 +47,7 @@ struct amba_prom_registers { | |||
| 47 | #define LEON3_GPTIMER_LD 4 | 47 | #define LEON3_GPTIMER_LD 4 |
| 48 | #define LEON3_GPTIMER_IRQEN 8 | 48 | #define LEON3_GPTIMER_IRQEN 8 |
| 49 | #define LEON3_GPTIMER_SEPIRQ 8 | 49 | #define LEON3_GPTIMER_SEPIRQ 8 |
| 50 | #define LEON3_GPTIMER_TIMERS 0x7 | ||
| 50 | 51 | ||
| 51 | #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ | 52 | #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ |
| 52 | /* 0 = hold scalar and counter */ | 53 | /* 0 = hold scalar and counter */ |
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h deleted file mode 100644 index 291c2d01c44f..000000000000 --- a/arch/sparc/include/asm/linkage.h +++ /dev/null | |||
| @@ -1,6 +0,0 @@ | |||
| 1 | #ifndef __ASM_LINKAGE_H | ||
| 2 | #define __ASM_LINKAGE_H | ||
| 3 | |||
| 4 | /* Nothing to see here... */ | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 75bb608c423e..5ef48dab5636 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
| @@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command) | |||
| 843 | unsigned long len; | 843 | unsigned long len; |
| 844 | 844 | ||
| 845 | strcpy(full_boot_str, "boot "); | 845 | strcpy(full_boot_str, "boot "); |
| 846 | strcpy(full_boot_str + strlen("boot "), boot_command); | 846 | strlcpy(full_boot_str + strlen("boot "), boot_command, |
| 847 | sizeof(full_boot_str + strlen("boot "))); | ||
| 847 | len = strlen(full_boot_str); | 848 | len = strlen(full_boot_str); |
| 848 | 849 | ||
| 849 | if (reboot_data_supported) { | 850 | if (reboot_data_supported) { |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 7c0231dabe44..b7c68976cbc7 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
| @@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock); | |||
| 38 | 38 | ||
| 39 | unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ | 39 | unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ |
| 40 | unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ | 40 | unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ |
| 41 | int leon3_ticker_irq; /* Timer ticker IRQ */ | ||
| 42 | unsigned int sparc_leon_eirq; | 41 | unsigned int sparc_leon_eirq; |
| 43 | #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) | 42 | #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) |
| 44 | #define LEON_IACK (&leon3_irqctrl_regs->iclear) | 43 | #define LEON_IACK (&leon3_irqctrl_regs->iclear) |
| @@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) | |||
| 278 | 277 | ||
| 279 | leon_clear_profile_irq(cpu); | 278 | leon_clear_profile_irq(cpu); |
| 280 | 279 | ||
| 280 | if (cpu == boot_cpu_id) | ||
| 281 | timer_interrupt(irq, NULL); | ||
| 282 | |||
| 281 | ce = &per_cpu(sparc32_clockevent, cpu); | 283 | ce = &per_cpu(sparc32_clockevent, cpu); |
| 282 | 284 | ||
| 283 | irq_enter(); | 285 | irq_enter(); |
| @@ -299,6 +301,7 @@ void __init leon_init_timers(void) | |||
| 299 | int icsel; | 301 | int icsel; |
| 300 | int ampopts; | 302 | int ampopts; |
| 301 | int err; | 303 | int err; |
| 304 | u32 config; | ||
| 302 | 305 | ||
| 303 | sparc_config.get_cycles_offset = leon_cycles_offset; | 306 | sparc_config.get_cycles_offset = leon_cycles_offset; |
| 304 | sparc_config.cs_period = 1000000 / HZ; | 307 | sparc_config.cs_period = 1000000 / HZ; |
| @@ -377,23 +380,6 @@ void __init leon_init_timers(void) | |||
| 377 | LEON3_BYPASS_STORE_PA( | 380 | LEON3_BYPASS_STORE_PA( |
| 378 | &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); | 381 | &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); |
| 379 | 382 | ||
| 380 | #ifdef CONFIG_SMP | ||
| 381 | leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; | ||
| 382 | |||
| 383 | if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & | ||
| 384 | (1<<LEON3_GPTIMER_SEPIRQ))) { | ||
| 385 | printk(KERN_ERR "timer not configured with separate irqs\n"); | ||
| 386 | BUG(); | ||
| 387 | } | ||
| 388 | |||
| 389 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, | ||
| 390 | 0); | ||
| 391 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, | ||
| 392 | (((1000000/HZ) - 1))); | ||
| 393 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, | ||
| 394 | 0); | ||
| 395 | #endif | ||
| 396 | |||
| 397 | /* | 383 | /* |
| 398 | * The IRQ controller may (if implemented) consist of multiple | 384 | * The IRQ controller may (if implemented) consist of multiple |
| 399 | * IRQ controllers, each mapped on a 4Kb boundary. | 385 | * IRQ controllers, each mapped on a 4Kb boundary. |
| @@ -416,13 +402,6 @@ void __init leon_init_timers(void) | |||
| 416 | if (eirq != 0) | 402 | if (eirq != 0) |
| 417 | leon_eirq_setup(eirq); | 403 | leon_eirq_setup(eirq); |
| 418 | 404 | ||
| 419 | irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); | ||
| 420 | err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); | ||
| 421 | if (err) { | ||
| 422 | printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); | ||
| 423 | prom_halt(); | ||
| 424 | } | ||
| 425 | |||
| 426 | #ifdef CONFIG_SMP | 405 | #ifdef CONFIG_SMP |
| 427 | { | 406 | { |
| 428 | unsigned long flags; | 407 | unsigned long flags; |
| @@ -439,30 +418,31 @@ void __init leon_init_timers(void) | |||
| 439 | } | 418 | } |
| 440 | #endif | 419 | #endif |
| 441 | 420 | ||
| 442 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, | 421 | config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); |
| 443 | LEON3_GPTIMER_EN | | 422 | if (config & (1 << LEON3_GPTIMER_SEPIRQ)) |
| 444 | LEON3_GPTIMER_RL | | 423 | leon3_gptimer_irq += leon3_gptimer_idx; |
| 445 | LEON3_GPTIMER_LD | | 424 | else if ((config & LEON3_GPTIMER_TIMERS) > 1) |
| 446 | LEON3_GPTIMER_IRQEN); | 425 | pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); |
| 447 | 426 | ||
| 448 | #ifdef CONFIG_SMP | 427 | #ifdef CONFIG_SMP |
| 449 | /* Install per-cpu IRQ handler for broadcasted ticker */ | 428 | /* Install per-cpu IRQ handler for broadcasted ticker */ |
| 450 | irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, | 429 | irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, |
| 451 | "per-cpu", 0); | 430 | "per-cpu", 0); |
| 452 | err = request_irq(irq, leon_percpu_timer_ce_interrupt, | 431 | err = request_irq(irq, leon_percpu_timer_ce_interrupt, |
| 453 | IRQF_PERCPU | IRQF_TIMER, "ticker", | 432 | IRQF_PERCPU | IRQF_TIMER, "timer", NULL); |
| 454 | NULL); | 433 | #else |
| 434 | irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); | ||
| 435 | err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); | ||
| 436 | #endif | ||
| 455 | if (err) { | 437 | if (err) { |
| 456 | printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); | 438 | pr_err("Unable to attach timer IRQ%d\n", irq); |
| 457 | prom_halt(); | 439 | prom_halt(); |
| 458 | } | 440 | } |
| 459 | 441 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, | |
| 460 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, | ||
| 461 | LEON3_GPTIMER_EN | | 442 | LEON3_GPTIMER_EN | |
| 462 | LEON3_GPTIMER_RL | | 443 | LEON3_GPTIMER_RL | |
| 463 | LEON3_GPTIMER_LD | | 444 | LEON3_GPTIMER_LD | |
| 464 | LEON3_GPTIMER_IRQEN); | 445 | LEON3_GPTIMER_IRQEN); |
| 465 | #endif | ||
| 466 | return; | 446 | return; |
| 467 | bad: | 447 | bad: |
| 468 | printk(KERN_ERR "No Timer/irqctrl found\n"); | 448 | printk(KERN_ERR "No Timer/irqctrl found\n"); |
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 7739a54315e2..6df26e37f879 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c | |||
| @@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev) | |||
| 536 | 536 | ||
| 537 | /* find device register base address */ | 537 | /* find device register base address */ |
| 538 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); | 538 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); |
| 539 | regs = devm_request_and_ioremap(&ofdev->dev, res); | 539 | regs = devm_ioremap_resource(&ofdev->dev, res); |
| 540 | if (!regs) { | 540 | if (IS_ERR(regs)) |
| 541 | dev_err(&ofdev->dev, "io-regs mapping failed\n"); | 541 | return PTR_ERR(regs); |
| 542 | return -EADDRNOTAVAIL; | ||
| 543 | } | ||
| 544 | 542 | ||
| 545 | /* | 543 | /* |
| 546 | * check that we're in Host Slot and that we can act as a Host Bridge | 544 | * check that we're in Host Slot and that we can act as a Host Bridge |
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c index bdf53d9a8d46..b0b3967a2dd2 100644 --- a/arch/sparc/kernel/leon_pmc.c +++ b/arch/sparc/kernel/leon_pmc.c | |||
| @@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void) | |||
| 47 | * MMU does not get a TLB miss here by using the MMU BYPASS ASI. | 47 | * MMU does not get a TLB miss here by using the MMU BYPASS ASI. |
| 48 | */ | 48 | */ |
| 49 | register unsigned int address = (unsigned int)leon3_irqctrl_regs; | 49 | register unsigned int address = (unsigned int)leon3_irqctrl_regs; |
| 50 | |||
| 51 | /* Interrupts need to be enabled to not hang the CPU */ | ||
| 52 | local_irq_enable(); | ||
| 53 | |||
| 50 | __asm__ __volatile__ ( | 54 | __asm__ __volatile__ ( |
| 51 | "wr %%g0, %%asr19\n" | 55 | "wr %%g0, %%asr19\n" |
| 52 | "lda [%0] %1, %%g0\n" | 56 | "lda [%0] %1, %%g0\n" |
| @@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void) | |||
| 60 | */ | 64 | */ |
| 61 | void pmc_leon_idle(void) | 65 | void pmc_leon_idle(void) |
| 62 | { | 66 | { |
| 67 | /* Interrupts need to be enabled to not hang the CPU */ | ||
| 68 | local_irq_enable(); | ||
| 69 | |||
| 63 | /* For systems without power-down, this will be no-op */ | 70 | /* For systems without power-down, this will be no-op */ |
| 64 | __asm__ __volatile__ ("wr %g0, %asr19\n\t"); | 71 | __asm__ __volatile__ ("wr %g0, %asr19\n\t"); |
| 65 | } | 72 | } |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 38bf80a22f02..1434526970a6 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
| @@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 304 | 304 | ||
| 305 | /* Initialize PROM console and command line. */ | 305 | /* Initialize PROM console and command line. */ |
| 306 | *cmdline_p = prom_getbootargs(); | 306 | *cmdline_p = prom_getbootargs(); |
| 307 | strcpy(boot_command_line, *cmdline_p); | 307 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
| 308 | parse_early_param(); | 308 | parse_early_param(); |
| 309 | 309 | ||
| 310 | boot_flags_init(*cmdline_p); | 310 | boot_flags_init(*cmdline_p); |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 88a127b9c69e..13785547e435 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
| @@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 555 | { | 555 | { |
| 556 | /* Initialize PROM console and command line. */ | 556 | /* Initialize PROM console and command line. */ |
| 557 | *cmdline_p = prom_getbootargs(); | 557 | *cmdline_p = prom_getbootargs(); |
| 558 | strcpy(boot_command_line, *cmdline_p); | 558 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
| 559 | parse_early_param(); | 559 | parse_early_param(); |
| 560 | 560 | ||
| 561 | boot_flags_init(*cmdline_p); | 561 | boot_flags_init(*cmdline_p); |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a7171997adfd..04fd55a6e461 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
| @@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md) | |||
| 1098 | m->size = *val; | 1098 | m->size = *val; |
| 1099 | val = mdesc_get_property(md, node, | 1099 | val = mdesc_get_property(md, node, |
| 1100 | "address-congruence-offset", NULL); | 1100 | "address-congruence-offset", NULL); |
| 1101 | m->offset = *val; | 1101 | |
| 1102 | /* The address-congruence-offset property is optional. | ||
| 1103 | * Explicity zero it be identifty this. | ||
| 1104 | */ | ||
| 1105 | if (val) | ||
| 1106 | m->offset = *val; | ||
| 1107 | else | ||
| 1108 | m->offset = 0UL; | ||
| 1102 | 1109 | ||
| 1103 | numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", | 1110 | numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", |
| 1104 | count - 1, m->base, m->size, m->offset); | 1111 | count - 1, m->base, m->size, m->offset); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 83d89bcb44af..37e7bc4c95b3 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
| @@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | |||
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | if (!tb->active) { | 87 | if (!tb->active) { |
| 88 | global_flush_tlb_page(mm, vaddr); | ||
| 89 | flush_tsb_user_page(mm, vaddr); | 88 | flush_tsb_user_page(mm, vaddr); |
| 89 | global_flush_tlb_page(mm, vaddr); | ||
| 90 | goto out; | 90 | goto out; |
| 91 | } | 91 | } |
| 92 | 92 | ||
diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c index f5ec32e0d419..d2b49d2365e7 100644 --- a/arch/sparc/prom/bootstr_32.c +++ b/arch/sparc/prom/bootstr_32.c | |||
| @@ -23,23 +23,25 @@ prom_getbootargs(void) | |||
| 23 | return barg_buf; | 23 | return barg_buf; |
| 24 | } | 24 | } |
| 25 | 25 | ||
| 26 | switch(prom_vers) { | 26 | switch (prom_vers) { |
| 27 | case PROM_V0: | 27 | case PROM_V0: |
| 28 | cp = barg_buf; | 28 | cp = barg_buf; |
| 29 | /* Start from 1 and go over fd(0,0,0)kernel */ | 29 | /* Start from 1 and go over fd(0,0,0)kernel */ |
| 30 | for(iter = 1; iter < 8; iter++) { | 30 | for (iter = 1; iter < 8; iter++) { |
| 31 | arg = (*(romvec->pv_v0bootargs))->argv[iter]; | 31 | arg = (*(romvec->pv_v0bootargs))->argv[iter]; |
| 32 | if (arg == NULL) | 32 | if (arg == NULL) |
| 33 | break; | 33 | break; |
| 34 | while(*arg != 0) { | 34 | while (*arg != 0) { |
| 35 | /* Leave place for space and null. */ | 35 | /* Leave place for space and null. */ |
| 36 | if(cp >= barg_buf + BARG_LEN-2){ | 36 | if (cp >= barg_buf + BARG_LEN - 2) |
| 37 | /* We might issue a warning here. */ | 37 | /* We might issue a warning here. */ |
| 38 | break; | 38 | break; |
| 39 | } | ||
| 40 | *cp++ = *arg++; | 39 | *cp++ = *arg++; |
| 41 | } | 40 | } |
| 42 | *cp++ = ' '; | 41 | *cp++ = ' '; |
| 42 | if (cp >= barg_buf + BARG_LEN - 1) | ||
| 43 | /* We might issue a warning here. */ | ||
| 44 | break; | ||
| 43 | } | 45 | } |
| 44 | *cp = 0; | 46 | *cp = 0; |
| 45 | break; | 47 | break; |
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 92204c3800b5..bd1b2a3ac34e 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c | |||
| @@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node) | |||
| 39 | return prom_node_to_node("child", node); | 39 | return prom_node_to_node("child", node); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | inline phandle prom_getchild(phandle node) | 42 | phandle prom_getchild(phandle node) |
| 43 | { | 43 | { |
| 44 | phandle cnode; | 44 | phandle cnode; |
| 45 | 45 | ||
| @@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node) | |||
| 72 | return prom_node_to_node(prom_peer_name, node); | 72 | return prom_node_to_node(prom_peer_name, node); |
| 73 | } | 73 | } |
| 74 | 74 | ||
| 75 | inline phandle prom_getsibling(phandle node) | 75 | phandle prom_getsibling(phandle node) |
| 76 | { | 76 | { |
| 77 | phandle sibnode; | 77 | phandle sibnode; |
| 78 | 78 | ||
| @@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling); | |||
| 89 | /* Return the length in bytes of property 'prop' at node 'node'. | 89 | /* Return the length in bytes of property 'prop' at node 'node'. |
| 90 | * Return -1 on error. | 90 | * Return -1 on error. |
| 91 | */ | 91 | */ |
| 92 | inline int prom_getproplen(phandle node, const char *prop) | 92 | int prom_getproplen(phandle node, const char *prop) |
| 93 | { | 93 | { |
| 94 | unsigned long args[6]; | 94 | unsigned long args[6]; |
| 95 | 95 | ||
| @@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen); | |||
| 113 | * 'buffer' which has a size of 'bufsize'. If the acquisition | 113 | * 'buffer' which has a size of 'bufsize'. If the acquisition |
| 114 | * was successful the length will be returned, else -1 is returned. | 114 | * was successful the length will be returned, else -1 is returned. |
| 115 | */ | 115 | */ |
| 116 | inline int prom_getproperty(phandle node, const char *prop, | 116 | int prom_getproperty(phandle node, const char *prop, |
| 117 | char *buffer, int bufsize) | 117 | char *buffer, int bufsize) |
| 118 | { | 118 | { |
| 119 | unsigned long args[8]; | 119 | unsigned long args[8]; |
| 120 | int plen; | 120 | int plen; |
| @@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty); | |||
| 141 | /* Acquire an integer property and return its value. Returns -1 | 141 | /* Acquire an integer property and return its value. Returns -1 |
| 142 | * on failure. | 142 | * on failure. |
| 143 | */ | 143 | */ |
| 144 | inline int prom_getint(phandle node, const char *prop) | 144 | int prom_getint(phandle node, const char *prop) |
| 145 | { | 145 | { |
| 146 | int intprop; | 146 | int intprop; |
| 147 | 147 | ||
| @@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop"; | |||
| 235 | /* Return the first property type for node 'node'. | 235 | /* Return the first property type for node 'node'. |
| 236 | * buffer should be at least 32B in length | 236 | * buffer should be at least 32B in length |
| 237 | */ | 237 | */ |
| 238 | inline char *prom_firstprop(phandle node, char *buffer) | 238 | char *prom_firstprop(phandle node, char *buffer) |
| 239 | { | 239 | { |
| 240 | unsigned long args[7]; | 240 | unsigned long args[7]; |
| 241 | 241 | ||
| @@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop); | |||
| 261 | * at node 'node' . Returns NULL string if no more | 261 | * at node 'node' . Returns NULL string if no more |
| 262 | * property types for this node. | 262 | * property types for this node. |
| 263 | */ | 263 | */ |
| 264 | inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) | 264 | char *prom_nextprop(phandle node, const char *oprop, char *buffer) |
| 265 | { | 265 | { |
| 266 | unsigned long args[7]; | 266 | unsigned long args[7]; |
| 267 | char buf[32]; | 267 | char buf[32]; |
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index 4385cb6fa00a..a93b02a25222 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
| @@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int); | |||
| 84 | EXPORT_SYMBOL(__ashrdi3); | 84 | EXPORT_SYMBOL(__ashrdi3); |
| 85 | uint64_t __ashldi3(uint64_t, unsigned int); | 85 | uint64_t __ashldi3(uint64_t, unsigned int); |
| 86 | EXPORT_SYMBOL(__ashldi3); | 86 | EXPORT_SYMBOL(__ashldi3); |
| 87 | int __ffsdi2(uint64_t); | ||
| 88 | EXPORT_SYMBOL(__ffsdi2); | ||
| 87 | #endif | 89 | #endif |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index d7d21851e60c..3df3bd544492 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
| @@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req) | |||
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | do { | 149 | do { |
| 150 | loff_t pos; | 150 | loff_t pos = file->f_pos; |
| 151 | mm_segment_t old_fs = get_fs(); | 151 | mm_segment_t old_fs = get_fs(); |
| 152 | set_fs(KERNEL_DS); | 152 | set_fs(KERNEL_DS); |
| 153 | len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); | 153 | len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 685692c94f05..fe120da25625 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt" | |||
| 2265 | config IA32_EMULATION | 2265 | config IA32_EMULATION |
| 2266 | bool "IA32 Emulation" | 2266 | bool "IA32 Emulation" |
| 2267 | depends on X86_64 | 2267 | depends on X86_64 |
| 2268 | select BINFMT_ELF | ||
| 2268 | select COMPAT_BINFMT_ELF | 2269 | select COMPAT_BINFMT_ELF |
| 2269 | select HAVE_UID16 | 2270 | select HAVE_UID16 |
| 2270 | ---help--- | 2271 | ---help--- |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 62fe22cd4cba..477e9d75149b 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
| @@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8) | |||
| 2681 | addq %rcx, KEYP | 2681 | addq %rcx, KEYP |
| 2682 | 2682 | ||
| 2683 | movdqa IV, STATE1 | 2683 | movdqa IV, STATE1 |
| 2684 | pxor 0x00(INP), STATE1 | 2684 | movdqu 0x00(INP), INC |
| 2685 | pxor INC, STATE1 | ||
| 2685 | movdqu IV, 0x00(OUTP) | 2686 | movdqu IV, 0x00(OUTP) |
| 2686 | 2687 | ||
| 2687 | _aesni_gf128mul_x_ble() | 2688 | _aesni_gf128mul_x_ble() |
| 2688 | movdqa IV, STATE2 | 2689 | movdqa IV, STATE2 |
| 2689 | pxor 0x10(INP), STATE2 | 2690 | movdqu 0x10(INP), INC |
| 2691 | pxor INC, STATE2 | ||
| 2690 | movdqu IV, 0x10(OUTP) | 2692 | movdqu IV, 0x10(OUTP) |
| 2691 | 2693 | ||
| 2692 | _aesni_gf128mul_x_ble() | 2694 | _aesni_gf128mul_x_ble() |
| 2693 | movdqa IV, STATE3 | 2695 | movdqa IV, STATE3 |
| 2694 | pxor 0x20(INP), STATE3 | 2696 | movdqu 0x20(INP), INC |
| 2697 | pxor INC, STATE3 | ||
| 2695 | movdqu IV, 0x20(OUTP) | 2698 | movdqu IV, 0x20(OUTP) |
| 2696 | 2699 | ||
| 2697 | _aesni_gf128mul_x_ble() | 2700 | _aesni_gf128mul_x_ble() |
| 2698 | movdqa IV, STATE4 | 2701 | movdqa IV, STATE4 |
| 2699 | pxor 0x30(INP), STATE4 | 2702 | movdqu 0x30(INP), INC |
| 2703 | pxor INC, STATE4 | ||
| 2700 | movdqu IV, 0x30(OUTP) | 2704 | movdqu IV, 0x30(OUTP) |
| 2701 | 2705 | ||
| 2702 | call *%r11 | 2706 | call *%r11 |
| 2703 | 2707 | ||
| 2704 | pxor 0x00(OUTP), STATE1 | 2708 | movdqu 0x00(OUTP), INC |
| 2709 | pxor INC, STATE1 | ||
| 2705 | movdqu STATE1, 0x00(OUTP) | 2710 | movdqu STATE1, 0x00(OUTP) |
| 2706 | 2711 | ||
| 2707 | _aesni_gf128mul_x_ble() | 2712 | _aesni_gf128mul_x_ble() |
| 2708 | movdqa IV, STATE1 | 2713 | movdqa IV, STATE1 |
| 2709 | pxor 0x40(INP), STATE1 | 2714 | movdqu 0x40(INP), INC |
| 2715 | pxor INC, STATE1 | ||
| 2710 | movdqu IV, 0x40(OUTP) | 2716 | movdqu IV, 0x40(OUTP) |
| 2711 | 2717 | ||
| 2712 | pxor 0x10(OUTP), STATE2 | 2718 | movdqu 0x10(OUTP), INC |
| 2719 | pxor INC, STATE2 | ||
| 2713 | movdqu STATE2, 0x10(OUTP) | 2720 | movdqu STATE2, 0x10(OUTP) |
| 2714 | 2721 | ||
| 2715 | _aesni_gf128mul_x_ble() | 2722 | _aesni_gf128mul_x_ble() |
| 2716 | movdqa IV, STATE2 | 2723 | movdqa IV, STATE2 |
| 2717 | pxor 0x50(INP), STATE2 | 2724 | movdqu 0x50(INP), INC |
| 2725 | pxor INC, STATE2 | ||
| 2718 | movdqu IV, 0x50(OUTP) | 2726 | movdqu IV, 0x50(OUTP) |
| 2719 | 2727 | ||
| 2720 | pxor 0x20(OUTP), STATE3 | 2728 | movdqu 0x20(OUTP), INC |
| 2729 | pxor INC, STATE3 | ||
| 2721 | movdqu STATE3, 0x20(OUTP) | 2730 | movdqu STATE3, 0x20(OUTP) |
| 2722 | 2731 | ||
| 2723 | _aesni_gf128mul_x_ble() | 2732 | _aesni_gf128mul_x_ble() |
| 2724 | movdqa IV, STATE3 | 2733 | movdqa IV, STATE3 |
| 2725 | pxor 0x60(INP), STATE3 | 2734 | movdqu 0x60(INP), INC |
| 2735 | pxor INC, STATE3 | ||
| 2726 | movdqu IV, 0x60(OUTP) | 2736 | movdqu IV, 0x60(OUTP) |
| 2727 | 2737 | ||
| 2728 | pxor 0x30(OUTP), STATE4 | 2738 | movdqu 0x30(OUTP), INC |
| 2739 | pxor INC, STATE4 | ||
| 2729 | movdqu STATE4, 0x30(OUTP) | 2740 | movdqu STATE4, 0x30(OUTP) |
| 2730 | 2741 | ||
| 2731 | _aesni_gf128mul_x_ble() | 2742 | _aesni_gf128mul_x_ble() |
| 2732 | movdqa IV, STATE4 | 2743 | movdqa IV, STATE4 |
| 2733 | pxor 0x70(INP), STATE4 | 2744 | movdqu 0x70(INP), INC |
| 2745 | pxor INC, STATE4 | ||
| 2734 | movdqu IV, 0x70(OUTP) | 2746 | movdqu IV, 0x70(OUTP) |
| 2735 | 2747 | ||
| 2736 | _aesni_gf128mul_x_ble() | 2748 | _aesni_gf128mul_x_ble() |
| @@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8) | |||
| 2738 | 2750 | ||
| 2739 | call *%r11 | 2751 | call *%r11 |
| 2740 | 2752 | ||
| 2741 | pxor 0x40(OUTP), STATE1 | 2753 | movdqu 0x40(OUTP), INC |
| 2754 | pxor INC, STATE1 | ||
| 2742 | movdqu STATE1, 0x40(OUTP) | 2755 | movdqu STATE1, 0x40(OUTP) |
| 2743 | 2756 | ||
| 2744 | pxor 0x50(OUTP), STATE2 | 2757 | movdqu 0x50(OUTP), INC |
| 2758 | pxor INC, STATE2 | ||
| 2745 | movdqu STATE2, 0x50(OUTP) | 2759 | movdqu STATE2, 0x50(OUTP) |
| 2746 | 2760 | ||
| 2747 | pxor 0x60(OUTP), STATE3 | 2761 | movdqu 0x60(OUTP), INC |
| 2762 | pxor INC, STATE3 | ||
| 2748 | movdqu STATE3, 0x60(OUTP) | 2763 | movdqu STATE3, 0x60(OUTP) |
| 2749 | 2764 | ||
| 2750 | pxor 0x70(OUTP), STATE4 | 2765 | movdqu 0x70(OUTP), INC |
| 2766 | pxor INC, STATE4 | ||
| 2751 | movdqu STATE4, 0x70(OUTP) | 2767 | movdqu STATE4, 0x70(OUTP) |
| 2752 | 2768 | ||
| 2753 | ret | 2769 | ret |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 805078e08013..52ff81cce008 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
| @@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, | |||
| 192 | /* struct user */ | 192 | /* struct user */ |
| 193 | DUMP_WRITE(&dump, sizeof(dump)); | 193 | DUMP_WRITE(&dump, sizeof(dump)); |
| 194 | /* Now dump all of the user data. Include malloced stuff as well */ | 194 | /* Now dump all of the user data. Include malloced stuff as well */ |
| 195 | DUMP_SEEK(PAGE_SIZE); | 195 | DUMP_SEEK(PAGE_SIZE - sizeof(dump)); |
| 196 | /* now we start writing out the user space info */ | 196 | /* now we start writing out the user space info */ |
| 197 | set_fs(USER_DS); | 197 | set_fs(USER_DS); |
| 198 | /* Dump the data area */ | 198 | /* Dump the data area */ |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ba870bb6dd8e..57873beb3292 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
| @@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector); | |||
| 41 | 41 | ||
| 42 | extern void init_ISA_irqs(void); | 42 | extern void init_ISA_irqs(void); |
| 43 | 43 | ||
| 44 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 45 | void arch_trigger_all_cpu_backtrace(void); | ||
| 46 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
| 47 | #endif | ||
| 48 | |||
| 44 | #endif /* _ASM_X86_IRQ_H */ | 49 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 6825e2efd1b4..6bc3985ee473 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
| @@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} | |||
| 60 | #ifdef CONFIG_MICROCODE_EARLY | 60 | #ifdef CONFIG_MICROCODE_EARLY |
| 61 | #define MAX_UCODE_COUNT 128 | 61 | #define MAX_UCODE_COUNT 128 |
| 62 | extern void __init load_ucode_bsp(void); | 62 | extern void __init load_ucode_bsp(void); |
| 63 | extern __init void load_ucode_ap(void); | 63 | extern void __cpuinit load_ucode_ap(void); |
| 64 | extern int __init save_microcode_in_initrd(void); | 64 | extern int __init save_microcode_in_initrd(void); |
| 65 | #else | 65 | #else |
| 66 | static inline void __init load_ucode_bsp(void) {} | 66 | static inline void __init load_ucode_bsp(void) {} |
| 67 | static inline __init void load_ucode_ap(void) {} | 67 | static inline void __cpuinit load_ucode_ap(void) {} |
| 68 | static inline int __init save_microcode_in_initrd(void) | 68 | static inline int __init save_microcode_in_initrd(void) |
| 69 | { | 69 | { |
| 70 | return 0; | 70 | return 0; |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c0fa356e90de..86f9301903c8 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
| @@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , | |||
| 18 | void __user *, size_t *, loff_t *); | 18 | void __user *, size_t *, loff_t *); |
| 19 | extern int unknown_nmi_panic; | 19 | extern int unknown_nmi_panic; |
| 20 | 20 | ||
| 21 | void arch_trigger_all_cpu_backtrace(void); | 21 | #endif /* CONFIG_X86_LOCAL_APIC */ |
| 22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
| 23 | #endif | ||
| 24 | 22 | ||
| 25 | #define NMI_FLAG_FIRST 1 | 23 | #define NMI_FLAG_FIRST 1 |
| 26 | 24 | ||
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 31cb9ae992b7..a698d7165c96 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * | 9 | * |
| 10 | */ | 10 | */ |
| 11 | #include <asm/apic.h> | 11 | #include <asm/apic.h> |
| 12 | #include <asm/nmi.h> | ||
| 12 | 13 | ||
| 13 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
| 14 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 35ffda5d0727..5f90b85ff22e 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
| @@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
| 714 | if (mtrr_tom2) | 714 | if (mtrr_tom2) |
| 715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; | 715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
| 716 | 716 | ||
| 717 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); | ||
| 718 | /* | 717 | /* |
| 719 | * [0, 1M) should always be covered by var mtrr with WB | 718 | * [0, 1M) should always be covered by var mtrr with WB |
| 720 | * and fixed mtrrs should take effect before var mtrr for it: | 719 | * and fixed mtrrs should take effect before var mtrr for it: |
| 721 | */ | 720 | */ |
| 722 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, | 721 | nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, |
| 723 | 1ULL<<(20 - PAGE_SHIFT)); | 722 | 1ULL<<(20 - PAGE_SHIFT)); |
| 724 | /* Sort the ranges: */ | 723 | /* add from var mtrr at last */ |
| 725 | sort_range(range, nr_range); | 724 | nr_range = x86_get_mtrr_mem_range(range, nr_range, |
| 725 | x_remove_base, x_remove_size); | ||
| 726 | 726 | ||
| 727 | range_sums = sum_ranges(range, nr_range); | 727 | range_sums = sum_ranges(range, nr_range); |
| 728 | printk(KERN_INFO "total RAM covered: %ldM\n", | 728 | printk(KERN_INFO "total RAM covered: %ldM\n", |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f60d41ff9a97..a9e22073bd56 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
| @@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | |||
| 165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), | 165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
| 166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | 166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), |
| 167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
| 168 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
| 169 | EVENT_EXTRA_END | 168 | EVENT_EXTRA_END |
| 170 | }; | 169 | }; |
| 171 | 170 | ||
| 172 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | 171 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { |
| 173 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 172 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
| 174 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 173 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), |
| 174 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
| 175 | EVENT_EXTRA_END | 175 | EVENT_EXTRA_END |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index d2c381280e3c..3dd37ebd591b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
| @@ -242,6 +242,7 @@ void __init kvmclock_init(void) | |||
| 242 | if (!mem) | 242 | if (!mem) |
| 243 | return; | 243 | return; |
| 244 | hv_clock = __va(mem); | 244 | hv_clock = __va(mem); |
| 245 | memset(hv_clock, 0, size); | ||
| 245 | 246 | ||
| 246 | if (kvm_register_clock("boot clock")) { | 247 | if (kvm_register_clock("boot clock")) { |
| 247 | hv_clock = NULL; | 248 | hv_clock = NULL; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4e7a37ff03ab..81a5f5e8f142 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -277,18 +277,6 @@ void exit_idle(void) | |||
| 277 | } | 277 | } |
| 278 | #endif | 278 | #endif |
| 279 | 279 | ||
| 280 | void arch_cpu_idle_prepare(void) | ||
| 281 | { | ||
| 282 | /* | ||
| 283 | * If we're the non-boot CPU, nothing set the stack canary up | ||
| 284 | * for us. CPU0 already has it initialized but no harm in | ||
| 285 | * doing it again. This is a good place for updating it, as | ||
| 286 | * we wont ever return from this function (so the invalid | ||
| 287 | * canaries already on the stack wont ever trigger). | ||
| 288 | */ | ||
| 289 | boot_init_stack_canary(); | ||
| 290 | } | ||
| 291 | |||
| 292 | void arch_cpu_idle_enter(void) | 280 | void arch_cpu_idle_enter(void) |
| 293 | { | 281 | { |
| 294 | local_touch_nmi(); | 282 | local_touch_nmi(); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9c73b51817e4..bfd348e99369 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
| 372 | 372 | ||
| 373 | void __cpuinit set_cpu_sibling_map(int cpu) | 373 | void __cpuinit set_cpu_sibling_map(int cpu) |
| 374 | { | 374 | { |
| 375 | bool has_mc = boot_cpu_data.x86_max_cores > 1; | ||
| 376 | bool has_smt = smp_num_siblings > 1; | 375 | bool has_smt = smp_num_siblings > 1; |
| 376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; | ||
| 377 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 377 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
| 378 | struct cpuinfo_x86 *o; | 378 | struct cpuinfo_x86 *o; |
| 379 | int i; | 379 | int i; |
| 380 | 380 | ||
| 381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); | 381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
| 382 | 382 | ||
| 383 | if (!has_smt && !has_mc) { | 383 | if (!has_mp) { |
| 384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
| 385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); | 385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
| 386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); | 386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); |
| @@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 394 | if ((i == cpu) || (has_smt && match_smt(c, o))) | 394 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
| 395 | link_mask(sibling, cpu, i); | 395 | link_mask(sibling, cpu, i); |
| 396 | 396 | ||
| 397 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 397 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
| 398 | link_mask(llc_shared, cpu, i); | 398 | link_mask(llc_shared, cpu, i); |
| 399 | 399 | ||
| 400 | } | 400 | } |
| @@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
| 406 | for_each_cpu(i, cpu_sibling_setup_mask) { | 406 | for_each_cpu(i, cpu_sibling_setup_mask) { |
| 407 | o = &cpu_data(i); | 407 | o = &cpu_data(i); |
| 408 | 408 | ||
| 409 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 409 | if ((i == cpu) || (has_mp && match_mc(c, o))) { |
| 410 | link_mask(core, cpu, i); | 410 | link_mask(core, cpu, i); |
| 411 | 411 | ||
| 412 | /* | 412 | /* |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 094b5d96ab14..e8ba99c34180 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
| 582 | if (index != XCR_XFEATURE_ENABLED_MASK) | 582 | if (index != XCR_XFEATURE_ENABLED_MASK) |
| 583 | return 1; | 583 | return 1; |
| 584 | xcr0 = xcr; | 584 | xcr0 = xcr; |
| 585 | if (kvm_x86_ops->get_cpl(vcpu) != 0) | ||
| 586 | return 1; | ||
| 587 | if (!(xcr0 & XSTATE_FP)) | 585 | if (!(xcr0 & XSTATE_FP)) |
| 588 | return 1; | 586 | return 1; |
| 589 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) | 587 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) |
| @@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
| 597 | 595 | ||
| 598 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 596 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
| 599 | { | 597 | { |
| 600 | if (__kvm_set_xcr(vcpu, index, xcr)) { | 598 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || |
| 599 | __kvm_set_xcr(vcpu, index, xcr)) { | ||
| 601 | kvm_inject_gp(vcpu, 0); | 600 | kvm_inject_gp(vcpu, 0); |
| 602 | return 1; | 601 | return 1; |
| 603 | } | 602 | } |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 5ae2eb09419e..d2fbcedcf6ea 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
| @@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
| 1069 | * that by attempting to use more space than is available. | 1069 | * that by attempting to use more space than is available. |
| 1070 | */ | 1070 | */ |
| 1071 | unsigned long dummy_size = remaining_size + 1024; | 1071 | unsigned long dummy_size = remaining_size + 1024; |
| 1072 | void *dummy = kmalloc(dummy_size, GFP_ATOMIC); | 1072 | void *dummy = kzalloc(dummy_size, GFP_ATOMIC); |
| 1073 | |||
| 1074 | if (!dummy) | ||
| 1075 | return EFI_OUT_OF_RESOURCES; | ||
| 1073 | 1076 | ||
| 1074 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | 1077 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, |
| 1075 | EFI_VARIABLE_NON_VOLATILE | | 1078 | EFI_VARIABLE_NON_VOLATILE | |
| @@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
| 1089 | 0, dummy); | 1092 | 0, dummy); |
| 1090 | } | 1093 | } |
| 1091 | 1094 | ||
| 1095 | kfree(dummy); | ||
| 1096 | |||
| 1092 | /* | 1097 | /* |
| 1093 | * The runtime code may now have triggered a garbage collection | 1098 | * The runtime code may now have triggered a garbage collection |
| 1094 | * run, so check the variable info again | 1099 | * run, so check the variable info again |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 652fd5ce303c..cab13f2fc28e 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
| @@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev, | |||
| 164 | if (dev_desc->clk_required) { | 164 | if (dev_desc->clk_required) { |
| 165 | ret = register_device_clock(adev, pdata); | 165 | ret = register_device_clock(adev, pdata); |
| 166 | if (ret) { | 166 | if (ret) { |
| 167 | /* | 167 | /* Skip the device, but continue the namespace scan. */ |
| 168 | * Skip the device, but don't terminate the namespace | 168 | ret = 0; |
| 169 | * scan. | 169 | goto err_out; |
| 170 | */ | ||
| 171 | kfree(pdata); | ||
| 172 | return 0; | ||
| 173 | } | 170 | } |
| 174 | } | 171 | } |
| 175 | 172 | ||
| 173 | /* | ||
| 174 | * This works around a known issue in ACPI tables where LPSS devices | ||
| 175 | * have _PS0 and _PS3 without _PSC (and no power resources), so | ||
| 176 | * acpi_bus_init_power() will assume that the BIOS has put them into D0. | ||
| 177 | */ | ||
| 178 | ret = acpi_device_fix_up_power(adev); | ||
| 179 | if (ret) { | ||
| 180 | /* Skip the device, but continue the namespace scan. */ | ||
| 181 | ret = 0; | ||
| 182 | goto err_out; | ||
| 183 | } | ||
| 184 | |||
| 176 | adev->driver_data = pdata; | 185 | adev->driver_data = pdata; |
| 177 | ret = acpi_create_platform_device(adev, id); | 186 | ret = acpi_create_platform_device(adev, id); |
| 178 | if (ret > 0) | 187 | if (ret > 0) |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 318fa32a141e..31c217a42839 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
| @@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device) | |||
| 290 | return 0; | 290 | return 0; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | /** | ||
| 294 | * acpi_device_fix_up_power - Force device with missing _PSC into D0. | ||
| 295 | * @device: Device object whose power state is to be fixed up. | ||
| 296 | * | ||
| 297 | * Devices without power resources and _PSC, but having _PS0 and _PS3 defined, | ||
| 298 | * are assumed to be put into D0 by the BIOS. However, in some cases that may | ||
| 299 | * not be the case and this function should be used then. | ||
| 300 | */ | ||
| 301 | int acpi_device_fix_up_power(struct acpi_device *device) | ||
| 302 | { | ||
| 303 | int ret = 0; | ||
| 304 | |||
| 305 | if (!device->power.flags.power_resources | ||
| 306 | && !device->power.flags.explicit_get | ||
| 307 | && device->power.state == ACPI_STATE_D0) | ||
| 308 | ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0); | ||
| 309 | |||
| 310 | return ret; | ||
| 311 | } | ||
| 312 | |||
| 293 | int acpi_bus_update_power(acpi_handle handle, int *state_p) | 313 | int acpi_bus_update_power(acpi_handle handle, int *state_p) |
| 294 | { | 314 | { |
| 295 | struct acpi_device *device; | 315 | struct acpi_device *device; |
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c index 4fdea381ef21..ec117c6c996c 100644 --- a/drivers/acpi/dock.c +++ b/drivers/acpi/dock.c | |||
| @@ -868,8 +868,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr, | |||
| 868 | if (!count) | 868 | if (!count) |
| 869 | return -EINVAL; | 869 | return -EINVAL; |
| 870 | 870 | ||
| 871 | acpi_scan_lock_acquire(); | ||
| 871 | begin_undock(dock_station); | 872 | begin_undock(dock_station); |
| 872 | ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); | 873 | ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST); |
| 874 | acpi_scan_lock_release(); | ||
| 873 | return ret ? ret: count; | 875 | return ret ? ret: count; |
| 874 | } | 876 | } |
| 875 | static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); | 877 | static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock); |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index f962047c6c85..288bb270f8ed 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
| @@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle) | |||
| 885 | ACPI_STA_DEFAULT); | 885 | ACPI_STA_DEFAULT); |
| 886 | mutex_init(&resource->resource_lock); | 886 | mutex_init(&resource->resource_lock); |
| 887 | INIT_LIST_HEAD(&resource->dependent); | 887 | INIT_LIST_HEAD(&resource->dependent); |
| 888 | INIT_LIST_HEAD(&resource->list_node); | ||
| 888 | resource->name = device->pnp.bus_id; | 889 | resource->name = device->pnp.bus_id; |
| 889 | strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); | 890 | strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); |
| 890 | strcpy(acpi_device_class(device), ACPI_POWER_CLASS); | 891 | strcpy(acpi_device_class(device), ACPI_POWER_CLASS); |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index a3868f6c222a..3322b47ab7ca 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
| @@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi) | |||
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | 306 | static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, |
| 307 | u8 triggering, u8 polarity, u8 shareable) | 307 | u8 triggering, u8 polarity, u8 shareable, |
| 308 | bool legacy) | ||
| 308 | { | 309 | { |
| 309 | int irq, p, t; | 310 | int irq, p, t; |
| 310 | 311 | ||
| @@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, | |||
| 317 | * In IO-APIC mode, use overrided attribute. Two reasons: | 318 | * In IO-APIC mode, use overrided attribute. Two reasons: |
| 318 | * 1. BIOS bug in DSDT | 319 | * 1. BIOS bug in DSDT |
| 319 | * 2. BIOS uses IO-APIC mode Interrupt Source Override | 320 | * 2. BIOS uses IO-APIC mode Interrupt Source Override |
| 321 | * | ||
| 322 | * We do this only if we are dealing with IRQ() or IRQNoFlags() | ||
| 323 | * resource (the legacy ISA resources). With modern ACPI 5 devices | ||
| 324 | * using extended IRQ descriptors we take the IRQ configuration | ||
| 325 | * from _CRS directly. | ||
| 320 | */ | 326 | */ |
| 321 | if (!acpi_get_override_irq(gsi, &t, &p)) { | 327 | if (legacy && !acpi_get_override_irq(gsi, &t, &p)) { |
| 322 | u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; | 328 | u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; |
| 323 | u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; | 329 | u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; |
| 324 | 330 | ||
| 325 | if (triggering != trig || polarity != pol) { | 331 | if (triggering != trig || polarity != pol) { |
| 326 | pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, | 332 | pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi, |
| 327 | t ? "edge" : "level", p ? "low" : "high"); | 333 | t ? "level" : "edge", p ? "low" : "high"); |
| 328 | triggering = trig; | 334 | triggering = trig; |
| 329 | polarity = pol; | 335 | polarity = pol; |
| 330 | } | 336 | } |
| @@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | |||
| 373 | } | 379 | } |
| 374 | acpi_dev_get_irqresource(res, irq->interrupts[index], | 380 | acpi_dev_get_irqresource(res, irq->interrupts[index], |
| 375 | irq->triggering, irq->polarity, | 381 | irq->triggering, irq->polarity, |
| 376 | irq->sharable); | 382 | irq->sharable, true); |
| 377 | break; | 383 | break; |
| 378 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: | 384 | case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: |
| 379 | ext_irq = &ares->data.extended_irq; | 385 | ext_irq = &ares->data.extended_irq; |
| @@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, | |||
| 383 | } | 389 | } |
| 384 | acpi_dev_get_irqresource(res, ext_irq->interrupts[index], | 390 | acpi_dev_get_irqresource(res, ext_irq->interrupts[index], |
| 385 | ext_irq->triggering, ext_irq->polarity, | 391 | ext_irq->triggering, ext_irq->polarity, |
| 386 | ext_irq->sharable); | 392 | ext_irq->sharable, false); |
| 387 | break; | 393 | break; |
| 388 | default: | 394 | default: |
| 389 | return false; | 395 | return false; |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 4b1f9265887f..01e21037d8fe 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv) | |||
| 450 | { | 450 | { |
| 451 | struct firmware_buf *buf = fw_priv->buf; | 451 | struct firmware_buf *buf = fw_priv->buf; |
| 452 | 452 | ||
| 453 | /* | ||
| 454 | * There is a small window in which user can write to 'loading' | ||
| 455 | * between loading done and disappearance of 'loading' | ||
| 456 | */ | ||
| 457 | if (test_bit(FW_STATUS_DONE, &buf->status)) | ||
| 458 | return; | ||
| 459 | |||
| 453 | set_bit(FW_STATUS_ABORT, &buf->status); | 460 | set_bit(FW_STATUS_ABORT, &buf->status); |
| 454 | complete_all(&buf->completion); | 461 | complete_all(&buf->completion); |
| 462 | |||
| 463 | /* avoid user action after loading abort */ | ||
| 464 | fw_priv->buf = NULL; | ||
| 455 | } | 465 | } |
| 456 | 466 | ||
| 457 | #define is_fw_load_aborted(buf) \ | 467 | #define is_fw_load_aborted(buf) \ |
| @@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev, | |||
| 528 | struct device_attribute *attr, char *buf) | 538 | struct device_attribute *attr, char *buf) |
| 529 | { | 539 | { |
| 530 | struct firmware_priv *fw_priv = to_firmware_priv(dev); | 540 | struct firmware_priv *fw_priv = to_firmware_priv(dev); |
| 531 | int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); | 541 | int loading = 0; |
| 542 | |||
| 543 | mutex_lock(&fw_lock); | ||
| 544 | if (fw_priv->buf) | ||
| 545 | loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status); | ||
| 546 | mutex_unlock(&fw_lock); | ||
| 532 | 547 | ||
| 533 | return sprintf(buf, "%d\n", loading); | 548 | return sprintf(buf, "%d\n", loading); |
| 534 | } | 549 | } |
| @@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev, | |||
| 570 | const char *buf, size_t count) | 585 | const char *buf, size_t count) |
| 571 | { | 586 | { |
| 572 | struct firmware_priv *fw_priv = to_firmware_priv(dev); | 587 | struct firmware_priv *fw_priv = to_firmware_priv(dev); |
| 573 | struct firmware_buf *fw_buf = fw_priv->buf; | 588 | struct firmware_buf *fw_buf; |
| 574 | int loading = simple_strtol(buf, NULL, 10); | 589 | int loading = simple_strtol(buf, NULL, 10); |
| 575 | int i; | 590 | int i; |
| 576 | 591 | ||
| 577 | mutex_lock(&fw_lock); | 592 | mutex_lock(&fw_lock); |
| 578 | 593 | fw_buf = fw_priv->buf; | |
| 579 | if (!fw_buf) | 594 | if (!fw_buf) |
| 580 | goto out; | 595 | goto out; |
| 581 | 596 | ||
| @@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work) | |||
| 777 | struct firmware_priv, timeout_work.work); | 792 | struct firmware_priv, timeout_work.work); |
| 778 | 793 | ||
| 779 | mutex_lock(&fw_lock); | 794 | mutex_lock(&fw_lock); |
| 780 | if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) { | ||
| 781 | mutex_unlock(&fw_lock); | ||
| 782 | return; | ||
| 783 | } | ||
| 784 | fw_load_abort(fw_priv); | 795 | fw_load_abort(fw_priv); |
| 785 | mutex_unlock(&fw_lock); | 796 | mutex_unlock(&fw_lock); |
| 786 | } | 797 | } |
| @@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, | |||
| 861 | 872 | ||
| 862 | cancel_delayed_work_sync(&fw_priv->timeout_work); | 873 | cancel_delayed_work_sync(&fw_priv->timeout_work); |
| 863 | 874 | ||
| 864 | fw_priv->buf = NULL; | ||
| 865 | |||
| 866 | device_remove_file(f_dev, &dev_attr_loading); | 875 | device_remove_file(f_dev, &dev_attr_loading); |
| 867 | err_del_bin_attr: | 876 | err_del_bin_attr: |
| 868 | device_remove_bin_file(f_dev, &firmware_attr_data); | 877 | device_remove_bin_file(f_dev, &firmware_attr_data); |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3063452e55da..49394e3f31bc 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
| @@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) | |||
| 1036 | char *name; | 1036 | char *name; |
| 1037 | u64 segment; | 1037 | u64 segment; |
| 1038 | int ret; | 1038 | int ret; |
| 1039 | char *name_format; | ||
| 1039 | 1040 | ||
| 1040 | name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); | 1041 | name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); |
| 1041 | if (!name) | 1042 | if (!name) |
| 1042 | return NULL; | 1043 | return NULL; |
| 1043 | segment = offset >> rbd_dev->header.obj_order; | 1044 | segment = offset >> rbd_dev->header.obj_order; |
| 1044 | ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx", | 1045 | name_format = "%s.%012llx"; |
| 1046 | if (rbd_dev->image_format == 2) | ||
| 1047 | name_format = "%s.%016llx"; | ||
| 1048 | ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format, | ||
| 1045 | rbd_dev->header.object_prefix, segment); | 1049 | rbd_dev->header.object_prefix, segment); |
| 1046 | if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { | 1050 | if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) { |
| 1047 | pr_err("error formatting segment name for #%llu (%d)\n", | 1051 | pr_err("error formatting segment name for #%llu (%d)\n", |
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index dcde35231e25..5b7b9110254b 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c | |||
| @@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev, | |||
| 190 | if (ret) | 190 | if (ret) |
| 191 | return ERR_PTR(ret); | 191 | return ERR_PTR(ret); |
| 192 | } | 192 | } |
| 193 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, | 193 | return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags); |
| 194 | 0600); | ||
| 195 | } | 194 | } |
| 196 | EXPORT_SYMBOL(drm_gem_prime_export); | 195 | EXPORT_SYMBOL(drm_gem_prime_export); |
| 197 | 196 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0e5341695922..6948eb88c2b7 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
| @@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev) | |||
| 2687 | int r600_uvd_init(struct radeon_device *rdev) | 2687 | int r600_uvd_init(struct radeon_device *rdev) |
| 2688 | { | 2688 | { |
| 2689 | int i, j, r; | 2689 | int i, j, r; |
| 2690 | /* disable byte swapping */ | ||
| 2691 | u32 lmi_swap_cntl = 0; | ||
| 2692 | u32 mp_swap_cntl = 0; | ||
| 2690 | 2693 | ||
| 2691 | /* raise clocks while booting up the VCPU */ | 2694 | /* raise clocks while booting up the VCPU */ |
| 2692 | radeon_set_uvd_clocks(rdev, 53300, 40000); | 2695 | radeon_set_uvd_clocks(rdev, 53300, 40000); |
| @@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
| 2711 | WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | 2714 | WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | |
| 2712 | (1 << 21) | (1 << 9) | (1 << 20)); | 2715 | (1 << 21) | (1 << 9) | (1 << 20)); |
| 2713 | 2716 | ||
| 2714 | /* disable byte swapping */ | 2717 | #ifdef __BIG_ENDIAN |
| 2715 | WREG32(UVD_LMI_SWAP_CNTL, 0); | 2718 | /* swap (8 in 32) RB and IB */ |
| 2716 | WREG32(UVD_MP_SWAP_CNTL, 0); | 2719 | lmi_swap_cntl = 0xa; |
| 2720 | mp_swap_cntl = 0; | ||
| 2721 | #endif | ||
| 2722 | WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl); | ||
| 2723 | WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl); | ||
| 2717 | 2724 | ||
| 2718 | WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); | 2725 | WREG32(UVD_MPC_SET_MUXA0, 0x40c2040); |
| 2719 | WREG32(UVD_MPC_SET_MUXA1, 0x0); | 2726 | WREG32(UVD_MPC_SET_MUXA1, 0x0); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 189973836cff..b0dc0b6cb4e0 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) | |||
| 244 | */ | 244 | */ |
| 245 | void radeon_wb_disable(struct radeon_device *rdev) | 245 | void radeon_wb_disable(struct radeon_device *rdev) |
| 246 | { | 246 | { |
| 247 | int r; | ||
| 248 | |||
| 249 | if (rdev->wb.wb_obj) { | ||
| 250 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
| 251 | if (unlikely(r != 0)) | ||
| 252 | return; | ||
| 253 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
| 254 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
| 255 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 256 | } | ||
| 257 | rdev->wb.enabled = false; | 247 | rdev->wb.enabled = false; |
| 258 | } | 248 | } |
| 259 | 249 | ||
| @@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev) | |||
| 269 | { | 259 | { |
| 270 | radeon_wb_disable(rdev); | 260 | radeon_wb_disable(rdev); |
| 271 | if (rdev->wb.wb_obj) { | 261 | if (rdev->wb.wb_obj) { |
| 262 | if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) { | ||
| 263 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
| 264 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
| 265 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 266 | } | ||
| 272 | radeon_bo_unref(&rdev->wb.wb_obj); | 267 | radeon_bo_unref(&rdev->wb.wb_obj); |
| 273 | rdev->wb.wb = NULL; | 268 | rdev->wb.wb = NULL; |
| 274 | rdev->wb.wb_obj = NULL; | 269 | rdev->wb.wb_obj = NULL; |
| @@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev) | |||
| 295 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | 290 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
| 296 | return r; | 291 | return r; |
| 297 | } | 292 | } |
| 298 | } | 293 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
| 299 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | 294 | if (unlikely(r != 0)) { |
| 300 | if (unlikely(r != 0)) { | 295 | radeon_wb_fini(rdev); |
| 301 | radeon_wb_fini(rdev); | 296 | return r; |
| 302 | return r; | 297 | } |
| 303 | } | 298 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
| 304 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | 299 | &rdev->wb.gpu_addr); |
| 305 | &rdev->wb.gpu_addr); | 300 | if (r) { |
| 306 | if (r) { | 301 | radeon_bo_unreserve(rdev->wb.wb_obj); |
| 302 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
| 303 | radeon_wb_fini(rdev); | ||
| 304 | return r; | ||
| 305 | } | ||
| 306 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
| 307 | radeon_bo_unreserve(rdev->wb.wb_obj); | 307 | radeon_bo_unreserve(rdev->wb.wb_obj); |
| 308 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | 308 | if (r) { |
| 309 | radeon_wb_fini(rdev); | 309 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); |
| 310 | return r; | 310 | radeon_wb_fini(rdev); |
| 311 | } | 311 | return r; |
| 312 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 312 | } |
| 313 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
| 314 | if (r) { | ||
| 315 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
| 316 | radeon_wb_fini(rdev); | ||
| 317 | return r; | ||
| 318 | } | 313 | } |
| 319 | 314 | ||
| 320 | /* clear wb memory */ | 315 | /* clear wb memory */ |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 5b937dfe6f65..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) | |||
| 63 | { | 63 | { |
| 64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; | 64 | struct radeon_fence_driver *drv = &rdev->fence_drv[ring]; |
| 65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { | 65 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
| 66 | *drv->cpu_addr = cpu_to_le32(seq); | 66 | if (drv->cpu_addr) { |
| 67 | *drv->cpu_addr = cpu_to_le32(seq); | ||
| 68 | } | ||
| 67 | } else { | 69 | } else { |
| 68 | WREG32(drv->scratch_reg, seq); | 70 | WREG32(drv->scratch_reg, seq); |
| 69 | } | 71 | } |
| @@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring) | |||
| 84 | u32 seq = 0; | 86 | u32 seq = 0; |
| 85 | 87 | ||
| 86 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { | 88 | if (likely(rdev->wb.enabled || !drv->scratch_reg)) { |
| 87 | seq = le32_to_cpu(*drv->cpu_addr); | 89 | if (drv->cpu_addr) { |
| 90 | seq = le32_to_cpu(*drv->cpu_addr); | ||
| 91 | } else { | ||
| 92 | seq = lower_32_bits(atomic64_read(&drv->last_seq)); | ||
| 93 | } | ||
| 88 | } else { | 94 | } else { |
| 89 | seq = RREG32(drv->scratch_reg); | 95 | seq = RREG32(drv->scratch_reg); |
| 90 | } | 96 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2c1341f63dc5..43ec4a401f07 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
| @@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
| 1197 | int radeon_vm_bo_rmv(struct radeon_device *rdev, | 1197 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
| 1198 | struct radeon_bo_va *bo_va) | 1198 | struct radeon_bo_va *bo_va) |
| 1199 | { | 1199 | { |
| 1200 | int r; | 1200 | int r = 0; |
| 1201 | 1201 | ||
| 1202 | mutex_lock(&rdev->vm_manager.lock); | 1202 | mutex_lock(&rdev->vm_manager.lock); |
| 1203 | mutex_lock(&bo_va->vm->mutex); | 1203 | mutex_lock(&bo_va->vm->mutex); |
| 1204 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); | 1204 | if (bo_va->soffset) { |
| 1205 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); | ||
| 1206 | } | ||
| 1205 | mutex_unlock(&rdev->vm_manager.lock); | 1207 | mutex_unlock(&rdev->vm_manager.lock); |
| 1206 | list_del(&bo_va->vm_list); | 1208 | list_del(&bo_va->vm_list); |
| 1207 | mutex_unlock(&bo_va->vm->mutex); | 1209 | mutex_unlock(&bo_va->vm->mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index e17faa7cf732..82434018cbe8 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
| @@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi | |||
| 402 | return -ENOMEM; | 402 | return -ENOMEM; |
| 403 | /* Align requested size with padding so unlock_commit can | 403 | /* Align requested size with padding so unlock_commit can |
| 404 | * pad safely */ | 404 | * pad safely */ |
| 405 | radeon_ring_free_size(rdev, ring); | ||
| 406 | if (ring->ring_free_dw == (ring->ring_size / 4)) { | ||
| 407 | /* This is an empty ring update lockup info to avoid | ||
| 408 | * false positive. | ||
| 409 | */ | ||
| 410 | radeon_ring_lockup_update(ring); | ||
| 411 | } | ||
| 405 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; | 412 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
| 406 | while (ndw > (ring->ring_free_dw - 1)) { | 413 | while (ndw > (ring->ring_free_dw - 1)) { |
| 407 | radeon_ring_free_size(rdev, ring); | 414 | radeon_ring_free_size(rdev, ring); |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 906e5c0ca3b9..cad735dd02c6 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
| @@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev) | |||
| 159 | if (!r) { | 159 | if (!r) { |
| 160 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | 160 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); |
| 161 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | 161 | radeon_bo_unpin(rdev->uvd.vcpu_bo); |
| 162 | rdev->uvd.cpu_addr = NULL; | ||
| 163 | if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) { | ||
| 164 | radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr); | ||
| 165 | } | ||
| 162 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | 166 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
| 167 | |||
| 168 | if (rdev->uvd.cpu_addr) { | ||
| 169 | radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); | ||
| 170 | } else { | ||
| 171 | rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL; | ||
| 172 | } | ||
| 163 | } | 173 | } |
| 164 | return r; | 174 | return r; |
| 165 | } | 175 | } |
| @@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev) | |||
| 178 | return r; | 188 | return r; |
| 179 | } | 189 | } |
| 180 | 190 | ||
| 191 | /* Have been pin in cpu unmap unpin */ | ||
| 192 | radeon_bo_kunmap(rdev->uvd.vcpu_bo); | ||
| 193 | radeon_bo_unpin(rdev->uvd.vcpu_bo); | ||
| 194 | |||
| 181 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, | 195 | r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM, |
| 182 | &rdev->uvd.gpu_addr); | 196 | &rdev->uvd.gpu_addr); |
| 183 | if (r) { | 197 | if (r) { |
| @@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, | |||
| 613 | } | 627 | } |
| 614 | 628 | ||
| 615 | /* stitch together an UVD create msg */ | 629 | /* stitch together an UVD create msg */ |
| 616 | msg[0] = 0x00000de4; | 630 | msg[0] = cpu_to_le32(0x00000de4); |
| 617 | msg[1] = 0x00000000; | 631 | msg[1] = cpu_to_le32(0x00000000); |
| 618 | msg[2] = handle; | 632 | msg[2] = cpu_to_le32(handle); |
| 619 | msg[3] = 0x00000000; | 633 | msg[3] = cpu_to_le32(0x00000000); |
| 620 | msg[4] = 0x00000000; | 634 | msg[4] = cpu_to_le32(0x00000000); |
| 621 | msg[5] = 0x00000000; | 635 | msg[5] = cpu_to_le32(0x00000000); |
| 622 | msg[6] = 0x00000000; | 636 | msg[6] = cpu_to_le32(0x00000000); |
| 623 | msg[7] = 0x00000780; | 637 | msg[7] = cpu_to_le32(0x00000780); |
| 624 | msg[8] = 0x00000440; | 638 | msg[8] = cpu_to_le32(0x00000440); |
| 625 | msg[9] = 0x00000000; | 639 | msg[9] = cpu_to_le32(0x00000000); |
| 626 | msg[10] = 0x01b37000; | 640 | msg[10] = cpu_to_le32(0x01b37000); |
| 627 | for (i = 11; i < 1024; ++i) | 641 | for (i = 11; i < 1024; ++i) |
| 628 | msg[i] = 0x0; | 642 | msg[i] = cpu_to_le32(0x0); |
| 629 | 643 | ||
| 630 | radeon_bo_kunmap(bo); | 644 | radeon_bo_kunmap(bo); |
| 631 | radeon_bo_unreserve(bo); | 645 | radeon_bo_unreserve(bo); |
| @@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, | |||
| 659 | } | 673 | } |
| 660 | 674 | ||
| 661 | /* stitch together an UVD destroy msg */ | 675 | /* stitch together an UVD destroy msg */ |
| 662 | msg[0] = 0x00000de4; | 676 | msg[0] = cpu_to_le32(0x00000de4); |
| 663 | msg[1] = 0x00000002; | 677 | msg[1] = cpu_to_le32(0x00000002); |
| 664 | msg[2] = handle; | 678 | msg[2] = cpu_to_le32(handle); |
| 665 | msg[3] = 0x00000000; | 679 | msg[3] = cpu_to_le32(0x00000000); |
| 666 | for (i = 4; i < 1024; ++i) | 680 | for (i = 4; i < 1024; ++i) |
| 667 | msg[i] = 0x0; | 681 | msg[i] = cpu_to_le32(0x0); |
| 668 | 682 | ||
| 669 | radeon_bo_kunmap(bo); | 683 | radeon_bo_kunmap(bo); |
| 670 | radeon_bo_unreserve(bo); | 684 | radeon_bo_unreserve(bo); |
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index d6cbfe9df218..fa061d46527f 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c | |||
| @@ -137,7 +137,7 @@ static const struct xpad_device { | |||
| 137 | { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 137 | { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
| 138 | { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, | 138 | { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, |
| 139 | { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, | 139 | { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, |
| 140 | { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, | 140 | { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
| 141 | { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, | 141 | { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, |
| 142 | { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, | 142 | { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, |
| 143 | { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, | 143 | { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 62a2c0e4cc99..7ac9c9818d55 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
| @@ -431,6 +431,7 @@ config KEYBOARD_TEGRA | |||
| 431 | 431 | ||
| 432 | config KEYBOARD_OPENCORES | 432 | config KEYBOARD_OPENCORES |
| 433 | tristate "OpenCores Keyboard Controller" | 433 | tristate "OpenCores Keyboard Controller" |
| 434 | depends on HAS_IOMEM | ||
| 434 | help | 435 | help |
| 435 | Say Y here if you want to use the OpenCores Keyboard Controller | 436 | Say Y here if you want to use the OpenCores Keyboard Controller |
| 436 | http://www.opencores.org/project,keyboardcontroller | 437 | http://www.opencores.org/project,keyboardcontroller |
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig index aebfe3ecb945..1bda828f4b55 100644 --- a/drivers/input/serio/Kconfig +++ b/drivers/input/serio/Kconfig | |||
| @@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2 | |||
| 205 | 205 | ||
| 206 | config SERIO_ALTERA_PS2 | 206 | config SERIO_ALTERA_PS2 |
| 207 | tristate "Altera UP PS/2 controller" | 207 | tristate "Altera UP PS/2 controller" |
| 208 | depends on HAS_IOMEM | ||
| 208 | help | 209 | help |
| 209 | Say Y here if you have Altera University Program PS/2 ports. | 210 | Say Y here if you have Altera University Program PS/2 ports. |
| 210 | 211 | ||
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 518282da6d85..384fbcd0cee0 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
| @@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
| 363 | case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ | 363 | case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ |
| 364 | case 0x160802: /* Cintiq 13HD Pro Pen */ | 364 | case 0x160802: /* Cintiq 13HD Pro Pen */ |
| 365 | case 0x180802: /* DTH2242 Pen */ | 365 | case 0x180802: /* DTH2242 Pen */ |
| 366 | case 0x100802: /* Intuos4/5 13HD/24HD General Pen */ | ||
| 366 | wacom->tool[idx] = BTN_TOOL_PEN; | 367 | wacom->tool[idx] = BTN_TOOL_PEN; |
| 367 | break; | 368 | break; |
| 368 | 369 | ||
| @@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) | |||
| 401 | case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ | 402 | case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ |
| 402 | case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ | 403 | case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ |
| 403 | case 0x18080a: /* DTH2242 Eraser */ | 404 | case 0x18080a: /* DTH2242 Eraser */ |
| 405 | case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */ | ||
| 404 | wacom->tool[idx] = BTN_TOOL_RUBBER; | 406 | wacom->tool[idx] = BTN_TOOL_RUBBER; |
| 405 | break; | 407 | break; |
| 406 | 408 | ||
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c index 8e60437ac85b..ae89d2609ab0 100644 --- a/drivers/input/touchscreen/cyttsp_core.c +++ b/drivers/input/touchscreen/cyttsp_core.c | |||
| @@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd) | |||
| 116 | return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); | 116 | return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static int cyttsp_handshake(struct cyttsp *ts) | ||
| 120 | { | ||
| 121 | if (ts->pdata->use_hndshk) | ||
| 122 | return ttsp_send_command(ts, | ||
| 123 | ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); | ||
| 124 | |||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 119 | static int cyttsp_load_bl_regs(struct cyttsp *ts) | 128 | static int cyttsp_load_bl_regs(struct cyttsp *ts) |
| 120 | { | 129 | { |
| 121 | memset(&ts->bl_data, 0, sizeof(ts->bl_data)); | 130 | memset(&ts->bl_data, 0, sizeof(ts->bl_data)); |
| @@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts) | |||
| 133 | memcpy(bl_cmd, bl_command, sizeof(bl_command)); | 142 | memcpy(bl_cmd, bl_command, sizeof(bl_command)); |
| 134 | if (ts->pdata->bl_keys) | 143 | if (ts->pdata->bl_keys) |
| 135 | memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], | 144 | memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], |
| 136 | ts->pdata->bl_keys, sizeof(bl_command)); | 145 | ts->pdata->bl_keys, CY_NUM_BL_KEYS); |
| 137 | 146 | ||
| 138 | error = ttsp_write_block_data(ts, CY_REG_BASE, | 147 | error = ttsp_write_block_data(ts, CY_REG_BASE, |
| 139 | sizeof(bl_cmd), bl_cmd); | 148 | sizeof(bl_cmd), bl_cmd); |
| @@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts) | |||
| 167 | if (error) | 176 | if (error) |
| 168 | return error; | 177 | return error; |
| 169 | 178 | ||
| 179 | error = cyttsp_handshake(ts); | ||
| 180 | if (error) | ||
| 181 | return error; | ||
| 182 | |||
| 170 | return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; | 183 | return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; |
| 171 | } | 184 | } |
| 172 | 185 | ||
| @@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts) | |||
| 188 | if (error) | 201 | if (error) |
| 189 | return error; | 202 | return error; |
| 190 | 203 | ||
| 204 | error = cyttsp_handshake(ts); | ||
| 205 | if (error) | ||
| 206 | return error; | ||
| 207 | |||
| 191 | if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) | 208 | if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) |
| 192 | return -EIO; | 209 | return -EIO; |
| 193 | 210 | ||
| @@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle) | |||
| 344 | goto out; | 361 | goto out; |
| 345 | 362 | ||
| 346 | /* provide flow control handshake */ | 363 | /* provide flow control handshake */ |
| 347 | if (ts->pdata->use_hndshk) { | 364 | error = cyttsp_handshake(ts); |
| 348 | error = ttsp_send_command(ts, | 365 | if (error) |
| 349 | ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); | 366 | goto out; |
| 350 | if (error) | ||
| 351 | goto out; | ||
| 352 | } | ||
| 353 | 367 | ||
| 354 | if (unlikely(ts->state == CY_IDLE_STATE)) | 368 | if (unlikely(ts->state == CY_IDLE_STATE)) |
| 355 | goto out; | 369 | goto out; |
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h index 1aa3c6967e70..f1ebde369f86 100644 --- a/drivers/input/touchscreen/cyttsp_core.h +++ b/drivers/input/touchscreen/cyttsp_core.h | |||
| @@ -67,8 +67,8 @@ struct cyttsp_xydata { | |||
| 67 | /* TTSP System Information interface definition */ | 67 | /* TTSP System Information interface definition */ |
| 68 | struct cyttsp_sysinfo_data { | 68 | struct cyttsp_sysinfo_data { |
| 69 | u8 hst_mode; | 69 | u8 hst_mode; |
| 70 | u8 mfg_cmd; | ||
| 71 | u8 mfg_stat; | 70 | u8 mfg_stat; |
| 71 | u8 mfg_cmd; | ||
| 72 | u8 cid[3]; | 72 | u8 cid[3]; |
| 73 | u8 tt_undef1; | 73 | u8 tt_undef1; |
| 74 | u8 uid[8]; | 74 | u8 uid[8]; |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 1760ceb68b7b..19ceaa60e0f4 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d, | |||
| 705 | static int __cpuinit gic_secondary_init(struct notifier_block *nfb, | 705 | static int __cpuinit gic_secondary_init(struct notifier_block *nfb, |
| 706 | unsigned long action, void *hcpu) | 706 | unsigned long action, void *hcpu) |
| 707 | { | 707 | { |
| 708 | if (action == CPU_STARTING) | 708 | if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
| 709 | gic_cpu_init(&gic_data[0]); | 709 | gic_cpu_init(&gic_data[0]); |
| 710 | return NOTIFY_OK; | 710 | return NOTIFY_OK; |
| 711 | } | 711 | } |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index 7f5a7cac6dc7..8270388e2a0d 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
| @@ -136,9 +136,9 @@ config DVB_NET | |||
| 136 | 136 | ||
| 137 | # This Kconfig option is used by both PCI and USB drivers | 137 | # This Kconfig option is used by both PCI and USB drivers |
| 138 | config TTPCI_EEPROM | 138 | config TTPCI_EEPROM |
| 139 | tristate | 139 | tristate |
| 140 | depends on I2C | 140 | depends on I2C |
| 141 | default n | 141 | default n |
| 142 | 142 | ||
| 143 | source "drivers/media/dvb-core/Kconfig" | 143 | source "drivers/media/dvb-core/Kconfig" |
| 144 | 144 | ||
| @@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT | |||
| 189 | 189 | ||
| 190 | If unsure say Y. | 190 | If unsure say Y. |
| 191 | 191 | ||
| 192 | config MEDIA_ATTACH | ||
| 193 | bool | ||
| 194 | depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT | ||
| 195 | depends on MODULES | ||
| 196 | default MODULES | ||
| 197 | |||
| 192 | source "drivers/media/i2c/Kconfig" | 198 | source "drivers/media/i2c/Kconfig" |
| 193 | source "drivers/media/tuners/Kconfig" | 199 | source "drivers/media/tuners/Kconfig" |
| 194 | source "drivers/media/dvb-frontends/Kconfig" | 200 | source "drivers/media/dvb-frontends/Kconfig" |
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 520e4398b69c..0741945b79ed 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c | |||
| @@ -834,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev) | |||
| 834 | goto err_clk; | 834 | goto err_clk; |
| 835 | } | 835 | } |
| 836 | pm_runtime_enable(dev); | 836 | pm_runtime_enable(dev); |
| 837 | /* | ||
| 838 | * Enable only the ISP power domain, keep FIMC-IS clocks off until | ||
| 839 | * the whole clock tree is configured. The ISP power domain needs | ||
| 840 | * be active in order to acces any CMU_ISP clock registers. | ||
| 841 | */ | ||
| 842 | ret = pm_runtime_get_sync(dev); | ||
| 843 | if (ret < 0) | ||
| 844 | goto err_irq; | ||
| 845 | |||
| 846 | ret = fimc_is_setup_clocks(is); | ||
| 847 | pm_runtime_put_sync(dev); | ||
| 848 | 837 | ||
| 838 | ret = pm_runtime_get_sync(dev); | ||
| 849 | if (ret < 0) | 839 | if (ret < 0) |
| 850 | goto err_irq; | 840 | goto err_irq; |
| 851 | 841 | ||
| 852 | is->clk_init = true; | ||
| 853 | |||
| 854 | is->alloc_ctx = vb2_dma_contig_init_ctx(dev); | 842 | is->alloc_ctx = vb2_dma_contig_init_ctx(dev); |
| 855 | if (IS_ERR(is->alloc_ctx)) { | 843 | if (IS_ERR(is->alloc_ctx)) { |
| 856 | ret = PTR_ERR(is->alloc_ctx); | 844 | ret = PTR_ERR(is->alloc_ctx); |
| @@ -872,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev) | |||
| 872 | if (ret < 0) | 860 | if (ret < 0) |
| 873 | goto err_dfs; | 861 | goto err_dfs; |
| 874 | 862 | ||
| 863 | pm_runtime_put_sync(dev); | ||
| 864 | |||
| 875 | dev_dbg(dev, "FIMC-IS registered successfully\n"); | 865 | dev_dbg(dev, "FIMC-IS registered successfully\n"); |
| 876 | return 0; | 866 | return 0; |
| 877 | 867 | ||
| @@ -891,9 +881,11 @@ err_clk: | |||
| 891 | static int fimc_is_runtime_resume(struct device *dev) | 881 | static int fimc_is_runtime_resume(struct device *dev) |
| 892 | { | 882 | { |
| 893 | struct fimc_is *is = dev_get_drvdata(dev); | 883 | struct fimc_is *is = dev_get_drvdata(dev); |
| 884 | int ret; | ||
| 894 | 885 | ||
| 895 | if (!is->clk_init) | 886 | ret = fimc_is_setup_clocks(is); |
| 896 | return 0; | 887 | if (ret) |
| 888 | return ret; | ||
| 897 | 889 | ||
| 898 | return fimc_is_enable_clocks(is); | 890 | return fimc_is_enable_clocks(is); |
| 899 | } | 891 | } |
| @@ -902,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev) | |||
| 902 | { | 894 | { |
| 903 | struct fimc_is *is = dev_get_drvdata(dev); | 895 | struct fimc_is *is = dev_get_drvdata(dev); |
| 904 | 896 | ||
| 905 | if (is->clk_init) | 897 | fimc_is_disable_clocks(is); |
| 906 | fimc_is_disable_clocks(is); | ||
| 907 | |||
| 908 | return 0; | 898 | return 0; |
| 909 | } | 899 | } |
| 910 | 900 | ||
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h index 606a7c9fe526..d7db133b493f 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.h +++ b/drivers/media/platform/exynos4-is/fimc-is.h | |||
| @@ -264,7 +264,6 @@ struct fimc_is { | |||
| 264 | spinlock_t slock; | 264 | spinlock_t slock; |
| 265 | 265 | ||
| 266 | struct clk *clocks[ISS_CLKS_MAX]; | 266 | struct clk *clocks[ISS_CLKS_MAX]; |
| 267 | bool clk_init; | ||
| 268 | void __iomem *regs; | 267 | void __iomem *regs; |
| 269 | void __iomem *pmu_regs; | 268 | void __iomem *pmu_regs; |
| 270 | int irq; | 269 | int irq; |
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile index ddc2900d88a2..d18cb5edd2d5 100644 --- a/drivers/media/platform/s5p-jpeg/Makefile +++ b/drivers/media/platform/s5p-jpeg/Makefile | |||
| @@ -1,2 +1,2 @@ | |||
| 1 | s5p-jpeg-objs := jpeg-core.o | 1 | s5p-jpeg-objs := jpeg-core.o |
| 2 | obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o | 2 | obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o |
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile index 379008c6d09a..15f59b324fef 100644 --- a/drivers/media/platform/s5p-mfc/Makefile +++ b/drivers/media/platform/s5p-mfc/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o | 1 | obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o |
| 2 | s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o | 2 | s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o |
| 3 | s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o | 3 | s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o |
| 4 | s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o | 4 | s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o |
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig index f6768cad001a..15665debc572 100644 --- a/drivers/media/tuners/Kconfig +++ b/drivers/media/tuners/Kconfig | |||
| @@ -1,23 +1,3 @@ | |||
| 1 | config MEDIA_ATTACH | ||
| 2 | bool "Load and attach frontend and tuner driver modules as needed" | ||
| 3 | depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT | ||
| 4 | depends on MODULES | ||
| 5 | default y if !EXPERT | ||
| 6 | help | ||
| 7 | Remove the static dependency of DVB card drivers on all | ||
| 8 | frontend modules for all possible card variants. Instead, | ||
| 9 | allow the card drivers to only load the frontend modules | ||
| 10 | they require. | ||
| 11 | |||
| 12 | Also, tuner module will automatically load a tuner driver | ||
| 13 | when needed, for analog mode. | ||
| 14 | |||
| 15 | This saves several KBytes of memory. | ||
| 16 | |||
| 17 | Note: You will need module-init-tools v3.2 or later for this feature. | ||
| 18 | |||
| 19 | If unsure say Y. | ||
| 20 | |||
| 21 | # Analog TV tuners, auto-loaded via tuner.ko | 1 | # Analog TV tuners, auto-loaded via tuner.ko |
| 22 | config MEDIA_TUNER | 2 | config MEDIA_TUNER |
| 23 | tristate | 3 | tristate |
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 22015fe1a0f3..2cc8ec70e3b6 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c | |||
| @@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) | |||
| 376 | struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; | 376 | struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf}; |
| 377 | struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; | 377 | struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf}; |
| 378 | struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; | 378 | struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf}; |
| 379 | struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf}; | 379 | struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf}; |
| 380 | 380 | ||
| 381 | dev_dbg(&d->udev->dev, "%s:\n", __func__); | 381 | dev_dbg(&d->udev->dev, "%s:\n", __func__); |
| 382 | 382 | ||
| @@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d) | |||
| 481 | goto found; | 481 | goto found; |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | /* check R820T by reading tuner stats at I2C addr 0x1a */ | 484 | /* check R820T ID register; reg=00 val=69 */ |
| 485 | ret = rtl28xxu_ctrl_msg(d, &req_r820t); | 485 | ret = rtl28xxu_ctrl_msg(d, &req_r820t); |
| 486 | if (ret == 0) { | 486 | if (ret == 0 && buf[0] == 0x69) { |
| 487 | priv->tuner = TUNER_RTL2832_R820T; | 487 | priv->tuner = TUNER_RTL2832_R820T; |
| 488 | priv->tuner_name = "R820T"; | 488 | priv->tuner_name = "R820T"; |
| 489 | goto found; | 489 | goto found; |
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index 9544cdc0d1af..e79e006eb9ab 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c | |||
| @@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev) | |||
| 811 | return pcidev->irq; | 811 | return pcidev->irq; |
| 812 | } | 812 | } |
| 813 | 813 | ||
| 814 | static struct iosapic_info *first_isi = NULL; | ||
| 815 | |||
| 816 | #ifdef CONFIG_64BIT | ||
| 817 | int iosapic_serial_irq(int num) | ||
| 818 | { | ||
| 819 | struct iosapic_info *isi = first_isi; | ||
| 820 | struct irt_entry *irte = NULL; /* only used if PAT PDC */ | ||
| 821 | struct vector_info *vi; | ||
| 822 | int isi_line; /* line used by device */ | ||
| 823 | |||
| 824 | /* lookup IRT entry for isi/slot/pin set */ | ||
| 825 | irte = &irt_cell[num]; | ||
| 826 | |||
| 827 | DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n", | ||
| 828 | irte, | ||
| 829 | irte->entry_type, | ||
| 830 | irte->entry_length, | ||
| 831 | irte->polarity_trigger, | ||
| 832 | irte->src_bus_irq_devno, | ||
| 833 | irte->src_bus_id, | ||
| 834 | irte->src_seg_id, | ||
| 835 | irte->dest_iosapic_intin, | ||
| 836 | (u32) irte->dest_iosapic_addr); | ||
| 837 | isi_line = irte->dest_iosapic_intin; | ||
| 838 | |||
| 839 | /* get vector info for this input line */ | ||
| 840 | vi = isi->isi_vector + isi_line; | ||
| 841 | DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi); | ||
| 842 | |||
| 843 | /* If this IRQ line has already been setup, skip it */ | ||
| 844 | if (vi->irte) | ||
| 845 | goto out; | ||
| 846 | |||
| 847 | vi->irte = irte; | ||
| 848 | |||
| 849 | /* | ||
| 850 | * Allocate processor IRQ | ||
| 851 | * | ||
| 852 | * XXX/FIXME The txn_alloc_irq() code and related code should be | ||
| 853 | * moved to enable_irq(). That way we only allocate processor IRQ | ||
| 854 | * bits for devices that actually have drivers claiming them. | ||
| 855 | * Right now we assign an IRQ to every PCI device present, | ||
| 856 | * regardless of whether it's used or not. | ||
| 857 | */ | ||
| 858 | vi->txn_irq = txn_alloc_irq(8); | ||
| 859 | |||
| 860 | if (vi->txn_irq < 0) | ||
| 861 | panic("I/O sapic: couldn't get TXN IRQ\n"); | ||
| 862 | |||
| 863 | /* enable_irq() will use txn_* to program IRdT */ | ||
| 864 | vi->txn_addr = txn_alloc_addr(vi->txn_irq); | ||
| 865 | vi->txn_data = txn_alloc_data(vi->txn_irq); | ||
| 866 | |||
| 867 | vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI; | ||
| 868 | vi->eoi_data = cpu_to_le32(vi->txn_data); | ||
| 869 | |||
| 870 | cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi); | ||
| 871 | |||
| 872 | out: | ||
| 873 | |||
| 874 | return vi->txn_irq; | ||
| 875 | } | ||
| 876 | #endif | ||
| 877 | |||
| 814 | 878 | ||
| 815 | /* | 879 | /* |
| 816 | ** squirrel away the I/O Sapic Version | 880 | ** squirrel away the I/O Sapic Version |
| @@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa) | |||
| 877 | vip->irqline = (unsigned char) cnt; | 941 | vip->irqline = (unsigned char) cnt; |
| 878 | vip->iosapic = isi; | 942 | vip->iosapic = isi; |
| 879 | } | 943 | } |
| 944 | if (!first_isi) | ||
| 945 | first_isi = isi; | ||
| 880 | return isi; | 946 | return isi; |
| 881 | } | 947 | } |
| 882 | 948 | ||
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7a3870f385f6..66b0b26a1381 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) | |||
| 688 | * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen | 688 | * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen |
| 689 | * for qla_tgt_xmit_response LLD code | 689 | * for qla_tgt_xmit_response LLD code |
| 690 | */ | 690 | */ |
| 691 | if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { | ||
| 692 | se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; | ||
| 693 | se_cmd->residual_count = 0; | ||
| 694 | } | ||
| 691 | se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; | 695 | se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; |
| 692 | se_cmd->residual_count = se_cmd->data_length; | 696 | se_cmd->residual_count += se_cmd->data_length; |
| 693 | 697 | ||
| 694 | cmd->bufflen = 0; | 698 | cmd->bufflen = 0; |
| 695 | } | 699 | } |
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index c735c5a008a2..6427600b5bbe 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c | |||
| @@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, | |||
| 59 | int ret; | 59 | int ret; |
| 60 | 60 | ||
| 61 | sg_free_table(sgt); | 61 | sg_free_table(sgt); |
| 62 | ret = sg_alloc_table(sgt, nents, GFP_KERNEL); | 62 | ret = sg_alloc_table(sgt, nents, GFP_ATOMIC); |
| 63 | if (ret) | 63 | if (ret) |
| 64 | return ret; | 64 | return ret; |
| 65 | } | 65 | } |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index f5d84d6f8222..48b396fced0a 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
| @@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | |||
| 1075 | acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) | 1075 | acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) |
| 1076 | return NULL; | 1076 | return NULL; |
| 1077 | 1077 | ||
| 1078 | pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); | 1078 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); |
| 1079 | if (!pdata) { | 1079 | if (!pdata) { |
| 1080 | dev_err(&pdev->dev, | 1080 | dev_err(&pdev->dev, |
| 1081 | "failed to allocate memory for platform data\n"); | 1081 | "failed to allocate memory for platform data\n"); |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 5000586cb98d..71cc3e6ef47c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
| @@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | |||
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | ret = pm_runtime_get_sync(&sdd->pdev->dev); | 446 | ret = pm_runtime_get_sync(&sdd->pdev->dev); |
| 447 | if (ret != 0) { | 447 | if (ret < 0) { |
| 448 | dev_err(dev, "Failed to enable device: %d\n", ret); | 448 | dev_err(dev, "Failed to enable device: %d\n", ret); |
| 449 | goto out_tx; | 449 | goto out_tx; |
| 450 | } | 450 | } |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 13e9e715ad2e..8d8b3ff68490 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
| @@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser( | |||
| 155 | struct iscsi_tpg_np *tpg_np_iser = NULL; | 155 | struct iscsi_tpg_np *tpg_np_iser = NULL; |
| 156 | char *endptr; | 156 | char *endptr; |
| 157 | u32 op; | 157 | u32 op; |
| 158 | int rc; | 158 | int rc = 0; |
| 159 | 159 | ||
| 160 | op = simple_strtoul(page, &endptr, 0); | 160 | op = simple_strtoul(page, &endptr, 0); |
| 161 | if ((op != 1) && (op != 0)) { | 161 | if ((op != 1) && (op != 0)) { |
| @@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser( | |||
| 174 | return -EINVAL; | 174 | return -EINVAL; |
| 175 | 175 | ||
| 176 | if (op) { | 176 | if (op) { |
| 177 | int rc = request_module("ib_isert"); | 177 | rc = request_module("ib_isert"); |
| 178 | if (rc != 0) | 178 | if (rc != 0) { |
| 179 | pr_warn("Unable to request_module for ib_isert\n"); | 179 | pr_warn("Unable to request_module for ib_isert\n"); |
| 180 | rc = 0; | ||
| 181 | } | ||
| 180 | 182 | ||
| 181 | tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, | 183 | tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr, |
| 182 | np->np_ip, tpg_np, ISCSI_INFINIBAND); | 184 | np->np_ip, tpg_np, ISCSI_INFINIBAND); |
| 183 | if (!tpg_np_iser || IS_ERR(tpg_np_iser)) | 185 | if (IS_ERR(tpg_np_iser)) { |
| 186 | rc = PTR_ERR(tpg_np_iser); | ||
| 184 | goto out; | 187 | goto out; |
| 188 | } | ||
| 185 | } else { | 189 | } else { |
| 186 | tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); | 190 | tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND); |
| 187 | if (!tpg_np_iser) | 191 | if (tpg_np_iser) { |
| 188 | goto out; | 192 | rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); |
| 189 | 193 | if (rc < 0) | |
| 190 | rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser); | 194 | goto out; |
| 191 | if (rc < 0) | 195 | } |
| 192 | goto out; | ||
| 193 | } | 196 | } |
| 194 | 197 | ||
| 195 | printk("lio_target_np_store_iser() done, op: %d\n", op); | ||
| 196 | |||
| 197 | iscsit_put_tpg(tpg); | 198 | iscsit_put_tpg(tpg); |
| 198 | return count; | 199 | return count; |
| 199 | out: | 200 | out: |
| 200 | iscsit_put_tpg(tpg); | 201 | iscsit_put_tpg(tpg); |
| 201 | return -EINVAL; | 202 | return rc; |
| 202 | } | 203 | } |
| 203 | 204 | ||
| 204 | TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); | 205 | TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR); |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 8e6298cc8839..dcb199da06b9 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
| @@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess) | |||
| 842 | return 0; | 842 | return 0; |
| 843 | 843 | ||
| 844 | sess->time2retain_timer_flags |= ISCSI_TF_STOP; | 844 | sess->time2retain_timer_flags |= ISCSI_TF_STOP; |
| 845 | spin_unlock_bh(&se_tpg->session_lock); | 845 | spin_unlock(&se_tpg->session_lock); |
| 846 | 846 | ||
| 847 | del_timer_sync(&sess->time2retain_timer); | 847 | del_timer_sync(&sess->time2retain_timer); |
| 848 | 848 | ||
| 849 | spin_lock_bh(&se_tpg->session_lock); | 849 | spin_lock(&se_tpg->session_lock); |
| 850 | sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; | 850 | sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING; |
| 851 | pr_debug("Stopped Time2Retain Timer for SID: %u\n", | 851 | pr_debug("Stopped Time2Retain Timer for SID: %u\n", |
| 852 | sess->sid); | 852 | sess->sid); |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bb5d5c5bce65..3402241be87c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
| @@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket( | |||
| 984 | } | 984 | } |
| 985 | 985 | ||
| 986 | np->np_transport = t; | 986 | np->np_transport = t; |
| 987 | printk("Set np->np_transport to %p -> %s\n", np->np_transport, | ||
| 988 | np->np_transport->name); | ||
| 989 | return 0; | 987 | return 0; |
| 990 | } | 988 | } |
| 991 | 989 | ||
| @@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) | |||
| 1002 | 1000 | ||
| 1003 | conn->sock = new_sock; | 1001 | conn->sock = new_sock; |
| 1004 | conn->login_family = np->np_sockaddr.ss_family; | 1002 | conn->login_family = np->np_sockaddr.ss_family; |
| 1005 | printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock); | ||
| 1006 | 1003 | ||
| 1007 | if (np->np_sockaddr.ss_family == AF_INET6) { | 1004 | if (np->np_sockaddr.ss_family == AF_INET6) { |
| 1008 | memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); | 1005 | memset(&sock_in6, 0, sizeof(struct sockaddr_in6)); |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ad912060e21..cd5018ff9cd7 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
| @@ -721,9 +721,6 @@ int iscsi_target_locate_portal( | |||
| 721 | 721 | ||
| 722 | start += strlen(key) + strlen(value) + 2; | 722 | start += strlen(key) + strlen(value) + 2; |
| 723 | } | 723 | } |
| 724 | |||
| 725 | printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf); | ||
| 726 | |||
| 727 | /* | 724 | /* |
| 728 | * See 5.3. Login Phase. | 725 | * See 5.3. Login Phase. |
| 729 | */ | 726 | */ |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index 59bfaecc4e14..abfd99089781 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty) | |||
| 244 | 244 | ||
| 245 | static int pty_open(struct tty_struct *tty, struct file *filp) | 245 | static int pty_open(struct tty_struct *tty, struct file *filp) |
| 246 | { | 246 | { |
| 247 | int retval = -ENODEV; | ||
| 248 | |||
| 249 | if (!tty || !tty->link) | 247 | if (!tty || !tty->link) |
| 250 | goto out; | 248 | return -ENODEV; |
| 251 | |||
| 252 | set_bit(TTY_IO_ERROR, &tty->flags); | ||
| 253 | 249 | ||
| 254 | retval = -EIO; | ||
| 255 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) | 250 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
| 256 | goto out; | 251 | goto out; |
| 257 | if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) | 252 | if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) |
| @@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp) | |||
| 262 | clear_bit(TTY_IO_ERROR, &tty->flags); | 257 | clear_bit(TTY_IO_ERROR, &tty->flags); |
| 263 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); | 258 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
| 264 | set_bit(TTY_THROTTLED, &tty->flags); | 259 | set_bit(TTY_THROTTLED, &tty->flags); |
| 265 | retval = 0; | 260 | return 0; |
| 261 | |||
| 266 | out: | 262 | out: |
| 267 | return retval; | 263 | set_bit(TTY_IO_ERROR, &tty->flags); |
| 264 | return -EIO; | ||
| 268 | } | 265 | } |
| 269 | 266 | ||
| 270 | static void pty_set_termios(struct tty_struct *tty, | 267 | static void pty_set_termios(struct tty_struct *tty, |
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c index 097dff9c08ad..bb91b4713ebd 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_gsc.c | |||
| @@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev) | |||
| 30 | unsigned long address; | 30 | unsigned long address; |
| 31 | int err; | 31 | int err; |
| 32 | 32 | ||
| 33 | #ifdef CONFIG_64BIT | ||
| 34 | extern int iosapic_serial_irq(int cellnum); | ||
| 35 | if (!dev->irq && (dev->id.sversion == 0xad)) | ||
| 36 | dev->irq = iosapic_serial_irq(dev->mod_index-1); | ||
| 37 | #endif | ||
| 38 | |||
| 33 | if (!dev->irq) { | 39 | if (!dev->irq) { |
| 34 | /* We find some unattached serial ports by walking native | 40 | /* We find some unattached serial ports by walking native |
| 35 | * busses. These should be silently ignored. Otherwise, | 41 | * busses. These should be silently ignored. Otherwise, |
| @@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev) | |||
| 51 | memset(&uart, 0, sizeof(uart)); | 57 | memset(&uart, 0, sizeof(uart)); |
| 52 | uart.port.iotype = UPIO_MEM; | 58 | uart.port.iotype = UPIO_MEM; |
| 53 | /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ | 59 | /* 7.272727MHz on Lasi. Assumed the same for Dino, Wax and Timi. */ |
| 54 | uart.port.uartclk = 7272727; | 60 | uart.port.uartclk = (dev->id.sversion != 0xad) ? |
| 61 | 7272727 : 1843200; | ||
| 55 | uart.port.mapbase = address; | 62 | uart.port.mapbase = address; |
| 56 | uart.port.membase = ioremap_nocache(address, 16); | 63 | uart.port.membase = ioremap_nocache(address, 16); |
| 57 | uart.port.irq = dev->irq; | 64 | uart.port.irq = dev->irq; |
| @@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = { | |||
| 73 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, | 80 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 }, |
| 74 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, | 81 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c }, |
| 75 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, | 82 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d }, |
| 83 | { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad }, | ||
| 76 | { 0 } | 84 | { 0 } |
| 77 | }; | 85 | }; |
| 78 | 86 | ||
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index fc2c06c66e89..2bd78e2ac8ec 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c | |||
| @@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num) | |||
| 289 | struct vc_data *vc = NULL; | 289 | struct vc_data *vc = NULL; |
| 290 | int ret = 0; | 290 | int ret = 0; |
| 291 | 291 | ||
| 292 | if (!vc_num) | ||
| 293 | return 0; | ||
| 294 | |||
| 295 | console_lock(); | 292 | console_lock(); |
| 296 | if (VT_BUSY(vc_num)) | 293 | if (VT_BUSY(vc_num)) |
| 297 | ret = -EBUSY; | 294 | ret = -EBUSY; |
| 298 | else | 295 | else if (vc_num) |
| 299 | vc = vc_deallocate(vc_num); | 296 | vc = vc_deallocate(vc_num); |
| 300 | console_unlock(); | 297 | console_unlock(); |
| 301 | 298 | ||
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 7ef3eb8617a6..2311b1e4e43c 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
| @@ -4,11 +4,17 @@ | |||
| 4 | menuconfig USB_PHY | 4 | menuconfig USB_PHY |
| 5 | bool "USB Physical Layer drivers" | 5 | bool "USB Physical Layer drivers" |
| 6 | help | 6 | help |
| 7 | USB controllers (those which are host, device or DRD) need a | 7 | Most USB controllers have the physical layer signalling part |
| 8 | device to handle the physical layer signalling, commonly called | 8 | (commonly called a PHY) built in. However, dual-role devices |
| 9 | a PHY. | 9 | (a.k.a. USB on-the-go) which support being USB master or slave |
| 10 | with the same connector often use an external PHY. | ||
| 10 | 11 | ||
| 11 | The following drivers add support for such PHY devices. | 12 | The drivers in this submenu add support for such PHY devices. |
| 13 | They are not needed for standard master-only (or the vast | ||
| 14 | majority of slave-only) USB interfaces. | ||
| 15 | |||
| 16 | If you're not sure if this applies to you, it probably doesn't; | ||
| 17 | say N here. | ||
| 12 | 18 | ||
| 13 | if USB_PHY | 19 | if USB_PHY |
| 14 | 20 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index c92c5ed4e580..e581c2549a57 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { | |||
| 172 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, | 172 | { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, |
| 173 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, | 173 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
| 174 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, | 174 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
| 175 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, | 175 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, |
| 176 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, | ||
| 176 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, | 177 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
| 177 | }; | 178 | }; |
| 178 | 179 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index b353e7e3d480..4a2423e84d55 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
| @@ -52,7 +52,9 @@ | |||
| 52 | 52 | ||
| 53 | /* Abbott Diabetics vendor and product ids */ | 53 | /* Abbott Diabetics vendor and product ids */ |
| 54 | #define ABBOTT_VENDOR_ID 0x1a61 | 54 | #define ABBOTT_VENDOR_ID 0x1a61 |
| 55 | #define ABBOTT_PRODUCT_ID 0x3410 | 55 | #define ABBOTT_STEREO_PLUG_ID 0x3410 |
| 56 | #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID | ||
| 57 | #define ABBOTT_STRIP_PORT_ID 0x3420 | ||
| 56 | 58 | ||
| 57 | /* Commands */ | 59 | /* Commands */ |
| 58 | #define TI_GET_VERSION 0x01 | 60 | #define TI_GET_VERSION 0x01 |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index e570081f9f76..35f281033142 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
| @@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | |||
| 2470 | .mode = mode | 2470 | .mode = mode |
| 2471 | }; | 2471 | }; |
| 2472 | int err; | 2472 | int err; |
| 2473 | bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || | ||
| 2474 | (mode & FALLOC_FL_PUNCH_HOLE); | ||
| 2473 | 2475 | ||
| 2474 | if (fc->no_fallocate) | 2476 | if (fc->no_fallocate) |
| 2475 | return -EOPNOTSUPP; | 2477 | return -EOPNOTSUPP; |
| 2476 | 2478 | ||
| 2477 | if (mode & FALLOC_FL_PUNCH_HOLE) { | 2479 | if (lock_inode) { |
| 2478 | mutex_lock(&inode->i_mutex); | 2480 | mutex_lock(&inode->i_mutex); |
| 2479 | fuse_set_nowrite(inode); | 2481 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 2482 | fuse_set_nowrite(inode); | ||
| 2480 | } | 2483 | } |
| 2481 | 2484 | ||
| 2482 | req = fuse_get_req_nopages(fc); | 2485 | req = fuse_get_req_nopages(fc); |
| @@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, | |||
| 2511 | fuse_invalidate_attr(inode); | 2514 | fuse_invalidate_attr(inode); |
| 2512 | 2515 | ||
| 2513 | out: | 2516 | out: |
| 2514 | if (mode & FALLOC_FL_PUNCH_HOLE) { | 2517 | if (lock_inode) { |
| 2515 | fuse_release_nowrite(inode); | 2518 | if (mode & FALLOC_FL_PUNCH_HOLE) |
| 2519 | fuse_release_nowrite(inode); | ||
| 2516 | mutex_unlock(&inode->i_mutex); | 2520 | mutex_unlock(&inode->i_mutex); |
| 2517 | } | 2521 | } |
| 2518 | 2522 | ||
diff --git a/fs/internal.h b/fs/internal.h index eaa75f75b625..68121584ae37 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
| @@ -132,6 +132,12 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); | |||
| 132 | extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); | 132 | extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); |
| 133 | 133 | ||
| 134 | /* | 134 | /* |
| 135 | * splice.c | ||
| 136 | */ | ||
| 137 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | ||
| 138 | loff_t *opos, size_t len, unsigned int flags); | ||
| 139 | |||
| 140 | /* | ||
| 135 | * pipe.c | 141 | * pipe.c |
| 136 | */ | 142 | */ |
| 137 | extern const struct file_operations pipefifo_fops; | 143 | extern const struct file_operations pipefifo_fops; |
diff --git a/fs/read_write.c b/fs/read_write.c index 03430008704e..2cefa417be34 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -1064,6 +1064,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, | |||
| 1064 | struct fd in, out; | 1064 | struct fd in, out; |
| 1065 | struct inode *in_inode, *out_inode; | 1065 | struct inode *in_inode, *out_inode; |
| 1066 | loff_t pos; | 1066 | loff_t pos; |
| 1067 | loff_t out_pos; | ||
| 1067 | ssize_t retval; | 1068 | ssize_t retval; |
| 1068 | int fl; | 1069 | int fl; |
| 1069 | 1070 | ||
| @@ -1077,12 +1078,14 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, | |||
| 1077 | if (!(in.file->f_mode & FMODE_READ)) | 1078 | if (!(in.file->f_mode & FMODE_READ)) |
| 1078 | goto fput_in; | 1079 | goto fput_in; |
| 1079 | retval = -ESPIPE; | 1080 | retval = -ESPIPE; |
| 1080 | if (!ppos) | 1081 | if (!ppos) { |
| 1081 | ppos = &in.file->f_pos; | 1082 | pos = in.file->f_pos; |
| 1082 | else | 1083 | } else { |
| 1084 | pos = *ppos; | ||
| 1083 | if (!(in.file->f_mode & FMODE_PREAD)) | 1085 | if (!(in.file->f_mode & FMODE_PREAD)) |
| 1084 | goto fput_in; | 1086 | goto fput_in; |
| 1085 | retval = rw_verify_area(READ, in.file, ppos, count); | 1087 | } |
| 1088 | retval = rw_verify_area(READ, in.file, &pos, count); | ||
| 1086 | if (retval < 0) | 1089 | if (retval < 0) |
| 1087 | goto fput_in; | 1090 | goto fput_in; |
| 1088 | count = retval; | 1091 | count = retval; |
| @@ -1099,7 +1102,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, | |||
| 1099 | retval = -EINVAL; | 1102 | retval = -EINVAL; |
| 1100 | in_inode = file_inode(in.file); | 1103 | in_inode = file_inode(in.file); |
| 1101 | out_inode = file_inode(out.file); | 1104 | out_inode = file_inode(out.file); |
| 1102 | retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count); | 1105 | out_pos = out.file->f_pos; |
| 1106 | retval = rw_verify_area(WRITE, out.file, &out_pos, count); | ||
| 1103 | if (retval < 0) | 1107 | if (retval < 0) |
| 1104 | goto fput_out; | 1108 | goto fput_out; |
| 1105 | count = retval; | 1109 | count = retval; |
| @@ -1107,7 +1111,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, | |||
| 1107 | if (!max) | 1111 | if (!max) |
| 1108 | max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); | 1112 | max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); |
| 1109 | 1113 | ||
| 1110 | pos = *ppos; | ||
| 1111 | if (unlikely(pos + count > max)) { | 1114 | if (unlikely(pos + count > max)) { |
| 1112 | retval = -EOVERFLOW; | 1115 | retval = -EOVERFLOW; |
| 1113 | if (pos >= max) | 1116 | if (pos >= max) |
| @@ -1126,18 +1129,23 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, | |||
| 1126 | if (in.file->f_flags & O_NONBLOCK) | 1129 | if (in.file->f_flags & O_NONBLOCK) |
| 1127 | fl = SPLICE_F_NONBLOCK; | 1130 | fl = SPLICE_F_NONBLOCK; |
| 1128 | #endif | 1131 | #endif |
| 1129 | retval = do_splice_direct(in.file, ppos, out.file, count, fl); | 1132 | retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl); |
| 1130 | 1133 | ||
| 1131 | if (retval > 0) { | 1134 | if (retval > 0) { |
| 1132 | add_rchar(current, retval); | 1135 | add_rchar(current, retval); |
| 1133 | add_wchar(current, retval); | 1136 | add_wchar(current, retval); |
| 1134 | fsnotify_access(in.file); | 1137 | fsnotify_access(in.file); |
| 1135 | fsnotify_modify(out.file); | 1138 | fsnotify_modify(out.file); |
| 1139 | out.file->f_pos = out_pos; | ||
| 1140 | if (ppos) | ||
| 1141 | *ppos = pos; | ||
| 1142 | else | ||
| 1143 | in.file->f_pos = pos; | ||
| 1136 | } | 1144 | } |
| 1137 | 1145 | ||
| 1138 | inc_syscr(current); | 1146 | inc_syscr(current); |
| 1139 | inc_syscw(current); | 1147 | inc_syscw(current); |
| 1140 | if (*ppos > max) | 1148 | if (pos > max) |
| 1141 | retval = -EOVERFLOW; | 1149 | retval = -EOVERFLOW; |
| 1142 | 1150 | ||
| 1143 | fput_out: | 1151 | fput_out: |
diff --git a/fs/splice.c b/fs/splice.c index e6b25598c8c4..d37431dd60a1 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, | |||
| 1274 | { | 1274 | { |
| 1275 | struct file *file = sd->u.file; | 1275 | struct file *file = sd->u.file; |
| 1276 | 1276 | ||
| 1277 | return do_splice_from(pipe, file, &file->f_pos, sd->total_len, | 1277 | return do_splice_from(pipe, file, sd->opos, sd->total_len, |
| 1278 | sd->flags); | 1278 | sd->flags); |
| 1279 | } | 1279 | } |
| 1280 | 1280 | ||
| @@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, | |||
| 1283 | * @in: file to splice from | 1283 | * @in: file to splice from |
| 1284 | * @ppos: input file offset | 1284 | * @ppos: input file offset |
| 1285 | * @out: file to splice to | 1285 | * @out: file to splice to |
| 1286 | * @opos: output file offset | ||
| 1286 | * @len: number of bytes to splice | 1287 | * @len: number of bytes to splice |
| 1287 | * @flags: splice modifier flags | 1288 | * @flags: splice modifier flags |
| 1288 | * | 1289 | * |
| @@ -1294,7 +1295,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe, | |||
| 1294 | * | 1295 | * |
| 1295 | */ | 1296 | */ |
| 1296 | long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | 1297 | long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, |
| 1297 | size_t len, unsigned int flags) | 1298 | loff_t *opos, size_t len, unsigned int flags) |
| 1298 | { | 1299 | { |
| 1299 | struct splice_desc sd = { | 1300 | struct splice_desc sd = { |
| 1300 | .len = len, | 1301 | .len = len, |
| @@ -1302,6 +1303,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | |||
| 1302 | .flags = flags, | 1303 | .flags = flags, |
| 1303 | .pos = *ppos, | 1304 | .pos = *ppos, |
| 1304 | .u.file = out, | 1305 | .u.file = out, |
| 1306 | .opos = opos, | ||
| 1305 | }; | 1307 | }; |
| 1306 | long ret; | 1308 | long ret; |
| 1307 | 1309 | ||
| @@ -1325,7 +1327,7 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
| 1325 | { | 1327 | { |
| 1326 | struct pipe_inode_info *ipipe; | 1328 | struct pipe_inode_info *ipipe; |
| 1327 | struct pipe_inode_info *opipe; | 1329 | struct pipe_inode_info *opipe; |
| 1328 | loff_t offset, *off; | 1330 | loff_t offset; |
| 1329 | long ret; | 1331 | long ret; |
| 1330 | 1332 | ||
| 1331 | ipipe = get_pipe_info(in); | 1333 | ipipe = get_pipe_info(in); |
| @@ -1356,13 +1358,15 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
| 1356 | return -EINVAL; | 1358 | return -EINVAL; |
| 1357 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) | 1359 | if (copy_from_user(&offset, off_out, sizeof(loff_t))) |
| 1358 | return -EFAULT; | 1360 | return -EFAULT; |
| 1359 | off = &offset; | 1361 | } else { |
| 1360 | } else | 1362 | offset = out->f_pos; |
| 1361 | off = &out->f_pos; | 1363 | } |
| 1362 | 1364 | ||
| 1363 | ret = do_splice_from(ipipe, out, off, len, flags); | 1365 | ret = do_splice_from(ipipe, out, &offset, len, flags); |
| 1364 | 1366 | ||
| 1365 | if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) | 1367 | if (!off_out) |
| 1368 | out->f_pos = offset; | ||
| 1369 | else if (copy_to_user(off_out, &offset, sizeof(loff_t))) | ||
| 1366 | ret = -EFAULT; | 1370 | ret = -EFAULT; |
| 1367 | 1371 | ||
| 1368 | return ret; | 1372 | return ret; |
| @@ -1376,13 +1380,15 @@ static long do_splice(struct file *in, loff_t __user *off_in, | |||
| 1376 | return -EINVAL; | 1380 | return -EINVAL; |
| 1377 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) | 1381 | if (copy_from_user(&offset, off_in, sizeof(loff_t))) |
| 1378 | return -EFAULT; | 1382 | return -EFAULT; |
| 1379 | off = &offset; | 1383 | } else { |
| 1380 | } else | 1384 | offset = in->f_pos; |
| 1381 | off = &in->f_pos; | 1385 | } |
| 1382 | 1386 | ||
| 1383 | ret = do_splice_to(in, off, opipe, len, flags); | 1387 | ret = do_splice_to(in, &offset, opipe, len, flags); |
| 1384 | 1388 | ||
| 1385 | if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) | 1389 | if (!off_in) |
| 1390 | in->f_pos = offset; | ||
| 1391 | else if (copy_to_user(off_in, &offset, sizeof(loff_t))) | ||
| 1386 | ret = -EFAULT; | 1392 | ret = -EFAULT; |
| 1387 | 1393 | ||
| 1388 | return ret; | 1394 | return ret; |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 636c59f2003a..c13c919ab99e 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
| @@ -382,6 +382,7 @@ const char *acpi_power_state_string(int state); | |||
| 382 | int acpi_device_get_power(struct acpi_device *device, int *state); | 382 | int acpi_device_get_power(struct acpi_device *device, int *state); |
| 383 | int acpi_device_set_power(struct acpi_device *device, int state); | 383 | int acpi_device_set_power(struct acpi_device *device, int state); |
| 384 | int acpi_bus_init_power(struct acpi_device *device); | 384 | int acpi_bus_init_power(struct acpi_device *device); |
| 385 | int acpi_device_fix_up_power(struct acpi_device *device); | ||
| 385 | int acpi_bus_update_power(acpi_handle handle, int *state_p); | 386 | int acpi_bus_update_power(acpi_handle handle, int *state_p); |
| 386 | bool acpi_bus_power_manageable(acpi_handle handle); | 387 | bool acpi_bus_power_manageable(acpi_handle handle); |
| 387 | 388 | ||
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 365f4a61bf04..fc09d7b0dacf 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
| 6 | #include <linux/vtime.h> | ||
| 6 | #include <asm/ptrace.h> | 7 | #include <asm/ptrace.h> |
| 7 | 8 | ||
| 8 | struct context_tracking { | 9 | struct context_tracking { |
| @@ -19,6 +20,26 @@ struct context_tracking { | |||
| 19 | } state; | 20 | } state; |
| 20 | }; | 21 | }; |
| 21 | 22 | ||
| 23 | static inline void __guest_enter(void) | ||
| 24 | { | ||
| 25 | /* | ||
| 26 | * This is running in ioctl context so we can avoid | ||
| 27 | * the call to vtime_account() with its unnecessary idle check. | ||
| 28 | */ | ||
| 29 | vtime_account_system(current); | ||
| 30 | current->flags |= PF_VCPU; | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void __guest_exit(void) | ||
| 34 | { | ||
| 35 | /* | ||
| 36 | * This is running in ioctl context so we can avoid | ||
| 37 | * the call to vtime_account() with its unnecessary idle check. | ||
| 38 | */ | ||
| 39 | vtime_account_system(current); | ||
| 40 | current->flags &= ~PF_VCPU; | ||
| 41 | } | ||
| 42 | |||
| 22 | #ifdef CONFIG_CONTEXT_TRACKING | 43 | #ifdef CONFIG_CONTEXT_TRACKING |
| 23 | DECLARE_PER_CPU(struct context_tracking, context_tracking); | 44 | DECLARE_PER_CPU(struct context_tracking, context_tracking); |
| 24 | 45 | ||
| @@ -35,6 +56,9 @@ static inline bool context_tracking_active(void) | |||
| 35 | extern void user_enter(void); | 56 | extern void user_enter(void); |
| 36 | extern void user_exit(void); | 57 | extern void user_exit(void); |
| 37 | 58 | ||
| 59 | extern void guest_enter(void); | ||
| 60 | extern void guest_exit(void); | ||
| 61 | |||
| 38 | static inline enum ctx_state exception_enter(void) | 62 | static inline enum ctx_state exception_enter(void) |
| 39 | { | 63 | { |
| 40 | enum ctx_state prev_ctx; | 64 | enum ctx_state prev_ctx; |
| @@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev, | |||
| 57 | static inline bool context_tracking_in_user(void) { return false; } | 81 | static inline bool context_tracking_in_user(void) { return false; } |
| 58 | static inline void user_enter(void) { } | 82 | static inline void user_enter(void) { } |
| 59 | static inline void user_exit(void) { } | 83 | static inline void user_exit(void) { } |
| 84 | |||
| 85 | static inline void guest_enter(void) | ||
| 86 | { | ||
| 87 | __guest_enter(); | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline void guest_exit(void) | ||
| 91 | { | ||
| 92 | __guest_exit(); | ||
| 93 | } | ||
| 94 | |||
| 60 | static inline enum ctx_state exception_enter(void) { return 0; } | 95 | static inline enum ctx_state exception_enter(void) { return 0; } |
| 61 | static inline void exception_exit(enum ctx_state prev_ctx) { } | 96 | static inline void exception_exit(enum ctx_state prev_ctx) { } |
| 62 | static inline void context_tracking_task_switch(struct task_struct *prev, | 97 | static inline void context_tracking_task_switch(struct task_struct *prev, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 43db02e9c9fa..65c2be22b601 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2414,8 +2414,6 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | |||
| 2414 | struct file *, loff_t *, size_t, unsigned int); | 2414 | struct file *, loff_t *, size_t, unsigned int); |
| 2415 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2415 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
| 2416 | struct file *out, loff_t *, size_t len, unsigned int flags); | 2416 | struct file *out, loff_t *, size_t len, unsigned int flags); |
| 2417 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | ||
| 2418 | size_t len, unsigned int flags); | ||
| 2419 | 2417 | ||
| 2420 | extern void | 2418 | extern void |
| 2421 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); | 2419 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f0eea07d2c2b..8db53cfaccdb 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/ratelimit.h> | 23 | #include <linux/ratelimit.h> |
| 24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
| 25 | #include <linux/irqflags.h> | 25 | #include <linux/irqflags.h> |
| 26 | #include <linux/context_tracking.h> | ||
| 26 | #include <asm/signal.h> | 27 | #include <asm/signal.h> |
| 27 | 28 | ||
| 28 | #include <linux/kvm.h> | 29 | #include <linux/kvm.h> |
| @@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) | |||
| 760 | } | 761 | } |
| 761 | #endif | 762 | #endif |
| 762 | 763 | ||
| 763 | static inline void __guest_enter(void) | ||
| 764 | { | ||
| 765 | /* | ||
| 766 | * This is running in ioctl context so we can avoid | ||
| 767 | * the call to vtime_account() with its unnecessary idle check. | ||
| 768 | */ | ||
| 769 | vtime_account_system(current); | ||
| 770 | current->flags |= PF_VCPU; | ||
| 771 | } | ||
| 772 | |||
| 773 | static inline void __guest_exit(void) | ||
| 774 | { | ||
| 775 | /* | ||
| 776 | * This is running in ioctl context so we can avoid | ||
| 777 | * the call to vtime_account() with its unnecessary idle check. | ||
| 778 | */ | ||
| 779 | vtime_account_system(current); | ||
| 780 | current->flags &= ~PF_VCPU; | ||
| 781 | } | ||
| 782 | |||
| 783 | #ifdef CONFIG_CONTEXT_TRACKING | ||
| 784 | extern void guest_enter(void); | ||
| 785 | extern void guest_exit(void); | ||
| 786 | |||
| 787 | #else /* !CONFIG_CONTEXT_TRACKING */ | ||
| 788 | static inline void guest_enter(void) | ||
| 789 | { | ||
| 790 | __guest_enter(); | ||
| 791 | } | ||
| 792 | |||
| 793 | static inline void guest_exit(void) | ||
| 794 | { | ||
| 795 | __guest_exit(); | ||
| 796 | } | ||
| 797 | #endif /* !CONFIG_CONTEXT_TRACKING */ | ||
| 798 | |||
| 799 | static inline void kvm_guest_enter(void) | 764 | static inline void kvm_guest_enter(void) |
| 800 | { | 765 | { |
| 801 | unsigned long flags; | 766 | unsigned long flags; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index f463a46424e2..c5b6dbf9c2fc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -389,8 +389,7 @@ struct perf_event { | |||
| 389 | /* mmap bits */ | 389 | /* mmap bits */ |
| 390 | struct mutex mmap_mutex; | 390 | struct mutex mmap_mutex; |
| 391 | atomic_t mmap_count; | 391 | atomic_t mmap_count; |
| 392 | int mmap_locked; | 392 | |
| 393 | struct user_struct *mmap_user; | ||
| 394 | struct ring_buffer *rb; | 393 | struct ring_buffer *rb; |
| 395 | struct list_head rb_entry; | 394 | struct list_head rb_entry; |
| 396 | 395 | ||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 87a03c746f17..f5d4723cdb3d 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -33,9 +33,25 @@ do { \ | |||
| 33 | preempt_schedule(); \ | 33 | preempt_schedule(); \ |
| 34 | } while (0) | 34 | } while (0) |
| 35 | 35 | ||
| 36 | #ifdef CONFIG_CONTEXT_TRACKING | ||
| 37 | |||
| 38 | void preempt_schedule_context(void); | ||
| 39 | |||
| 40 | #define preempt_check_resched_context() \ | ||
| 41 | do { \ | ||
| 42 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ | ||
| 43 | preempt_schedule_context(); \ | ||
| 44 | } while (0) | ||
| 45 | #else | ||
| 46 | |||
| 47 | #define preempt_check_resched_context() preempt_check_resched() | ||
| 48 | |||
| 49 | #endif /* CONFIG_CONTEXT_TRACKING */ | ||
| 50 | |||
| 36 | #else /* !CONFIG_PREEMPT */ | 51 | #else /* !CONFIG_PREEMPT */ |
| 37 | 52 | ||
| 38 | #define preempt_check_resched() do { } while (0) | 53 | #define preempt_check_resched() do { } while (0) |
| 54 | #define preempt_check_resched_context() do { } while (0) | ||
| 39 | 55 | ||
| 40 | #endif /* CONFIG_PREEMPT */ | 56 | #endif /* CONFIG_PREEMPT */ |
| 41 | 57 | ||
| @@ -88,7 +104,7 @@ do { \ | |||
| 88 | do { \ | 104 | do { \ |
| 89 | preempt_enable_no_resched_notrace(); \ | 105 | preempt_enable_no_resched_notrace(); \ |
| 90 | barrier(); \ | 106 | barrier(); \ |
| 91 | preempt_check_resched(); \ | 107 | preempt_check_resched_context(); \ |
| 92 | } while (0) | 108 | } while (0) |
| 93 | 109 | ||
| 94 | #else /* !CONFIG_PREEMPT_COUNT */ | 110 | #else /* !CONFIG_PREEMPT_COUNT */ |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 09a545a7dfa3..74575cbf2d6f 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
| @@ -35,6 +35,7 @@ struct splice_desc { | |||
| 35 | void *data; /* cookie */ | 35 | void *data; /* cookie */ |
| 36 | } u; | 36 | } u; |
| 37 | loff_t pos; /* file position */ | 37 | loff_t pos; /* file position */ |
| 38 | loff_t *opos; /* sendfile: output position */ | ||
| 38 | size_t num_spliced; /* number of bytes already spliced */ | 39 | size_t num_spliced; /* number of bytes already spliced */ |
| 39 | bool need_wakeup; /* need to wake up writer */ | 40 | bool need_wakeup; /* need to wake up writer */ |
| 40 | }; | 41 | }; |
diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 71a5782d8c59..b1dd2db80076 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h | |||
| @@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk) | |||
| 34 | } | 34 | } |
| 35 | extern void vtime_guest_enter(struct task_struct *tsk); | 35 | extern void vtime_guest_enter(struct task_struct *tsk); |
| 36 | extern void vtime_guest_exit(struct task_struct *tsk); | 36 | extern void vtime_guest_exit(struct task_struct *tsk); |
| 37 | extern void vtime_init_idle(struct task_struct *tsk); | 37 | extern void vtime_init_idle(struct task_struct *tsk, int cpu); |
| 38 | #else | 38 | #else |
| 39 | static inline void vtime_account_irq_exit(struct task_struct *tsk) | 39 | static inline void vtime_account_irq_exit(struct task_struct *tsk) |
| 40 | { | 40 | { |
| @@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { } | |||
| 45 | static inline void vtime_user_exit(struct task_struct *tsk) { } | 45 | static inline void vtime_user_exit(struct task_struct *tsk) { } |
| 46 | static inline void vtime_guest_enter(struct task_struct *tsk) { } | 46 | static inline void vtime_guest_enter(struct task_struct *tsk) { } |
| 47 | static inline void vtime_guest_exit(struct task_struct *tsk) { } | 47 | static inline void vtime_guest_exit(struct task_struct *tsk) { } |
| 48 | static inline void vtime_init_idle(struct task_struct *tsk) { } | 48 | static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } |
| 49 | #endif | 49 | #endif |
| 50 | 50 | ||
| 51 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 51 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c index 65349f07b878..383f8231e436 100644 --- a/kernel/context_tracking.c +++ b/kernel/context_tracking.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/context_tracking.h> | 17 | #include <linux/context_tracking.h> |
| 18 | #include <linux/kvm_host.h> | ||
| 19 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
| 20 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 21 | #include <linux/hardirq.h> | 20 | #include <linux/hardirq.h> |
| @@ -71,6 +70,46 @@ void user_enter(void) | |||
| 71 | local_irq_restore(flags); | 70 | local_irq_restore(flags); |
| 72 | } | 71 | } |
| 73 | 72 | ||
| 73 | #ifdef CONFIG_PREEMPT | ||
| 74 | /** | ||
| 75 | * preempt_schedule_context - preempt_schedule called by tracing | ||
| 76 | * | ||
| 77 | * The tracing infrastructure uses preempt_enable_notrace to prevent | ||
| 78 | * recursion and tracing preempt enabling caused by the tracing | ||
| 79 | * infrastructure itself. But as tracing can happen in areas coming | ||
| 80 | * from userspace or just about to enter userspace, a preempt enable | ||
| 81 | * can occur before user_exit() is called. This will cause the scheduler | ||
| 82 | * to be called when the system is still in usermode. | ||
| 83 | * | ||
| 84 | * To prevent this, the preempt_enable_notrace will use this function | ||
| 85 | * instead of preempt_schedule() to exit user context if needed before | ||
| 86 | * calling the scheduler. | ||
| 87 | */ | ||
| 88 | void __sched notrace preempt_schedule_context(void) | ||
| 89 | { | ||
| 90 | struct thread_info *ti = current_thread_info(); | ||
| 91 | enum ctx_state prev_ctx; | ||
| 92 | |||
| 93 | if (likely(ti->preempt_count || irqs_disabled())) | ||
| 94 | return; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Need to disable preemption in case user_exit() is traced | ||
| 98 | * and the tracer calls preempt_enable_notrace() causing | ||
| 99 | * an infinite recursion. | ||
| 100 | */ | ||
| 101 | preempt_disable_notrace(); | ||
| 102 | prev_ctx = exception_enter(); | ||
| 103 | preempt_enable_no_resched_notrace(); | ||
| 104 | |||
| 105 | preempt_schedule(); | ||
| 106 | |||
| 107 | preempt_disable_notrace(); | ||
| 108 | exception_exit(prev_ctx); | ||
| 109 | preempt_enable_notrace(); | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(preempt_schedule_context); | ||
| 112 | #endif /* CONFIG_PREEMPT */ | ||
| 74 | 113 | ||
| 75 | /** | 114 | /** |
| 76 | * user_exit - Inform the context tracking that the CPU is | 115 | * user_exit - Inform the context tracking that the CPU is |
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index d5585f5e038e..e695c0a0bcb5 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/cpu.h> | 5 | #include <linux/cpu.h> |
| 6 | #include <linux/tick.h> | 6 | #include <linux/tick.h> |
| 7 | #include <linux/mm.h> | 7 | #include <linux/mm.h> |
| 8 | #include <linux/stackprotector.h> | ||
| 8 | 9 | ||
| 9 | #include <asm/tlb.h> | 10 | #include <asm/tlb.h> |
| 10 | 11 | ||
| @@ -58,6 +59,7 @@ void __weak arch_cpu_idle_dead(void) { } | |||
| 58 | void __weak arch_cpu_idle(void) | 59 | void __weak arch_cpu_idle(void) |
| 59 | { | 60 | { |
| 60 | cpu_idle_force_poll = 1; | 61 | cpu_idle_force_poll = 1; |
| 62 | local_irq_enable(); | ||
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | /* | 65 | /* |
| @@ -112,6 +114,21 @@ static void cpu_idle_loop(void) | |||
| 112 | 114 | ||
| 113 | void cpu_startup_entry(enum cpuhp_state state) | 115 | void cpu_startup_entry(enum cpuhp_state state) |
| 114 | { | 116 | { |
| 117 | /* | ||
| 118 | * This #ifdef needs to die, but it's too late in the cycle to | ||
| 119 | * make this generic (arm and sh have never invoked the canary | ||
| 120 | * init for the non boot cpus!). Will be fixed in 3.11 | ||
| 121 | */ | ||
| 122 | #ifdef CONFIG_X86 | ||
| 123 | /* | ||
| 124 | * If we're the non-boot CPU, nothing set the stack canary up | ||
| 125 | * for us. The boot CPU already has it initialized but no harm | ||
| 126 | * in doing it again. This is a good place for updating it, as | ||
| 127 | * we wont ever return from this function (so the invalid | ||
| 128 | * canaries already on the stack wont ever trigger). | ||
| 129 | */ | ||
| 130 | boot_init_stack_canary(); | ||
| 131 | #endif | ||
| 115 | current_set_polling(); | 132 | current_set_polling(); |
| 116 | arch_cpu_idle_prepare(); | 133 | arch_cpu_idle_prepare(); |
| 117 | cpu_idle_loop(); | 134 | cpu_idle_loop(); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 9dc297faf7c0..b391907d5352 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -196,9 +196,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, | |||
| 196 | static void update_context_time(struct perf_event_context *ctx); | 196 | static void update_context_time(struct perf_event_context *ctx); |
| 197 | static u64 perf_event_time(struct perf_event *event); | 197 | static u64 perf_event_time(struct perf_event *event); |
| 198 | 198 | ||
| 199 | static void ring_buffer_attach(struct perf_event *event, | ||
| 200 | struct ring_buffer *rb); | ||
| 201 | |||
| 202 | void __weak perf_event_print_debug(void) { } | 199 | void __weak perf_event_print_debug(void) { } |
| 203 | 200 | ||
| 204 | extern __weak const char *perf_pmu_name(void) | 201 | extern __weak const char *perf_pmu_name(void) |
| @@ -2918,6 +2915,7 @@ static void free_event_rcu(struct rcu_head *head) | |||
| 2918 | } | 2915 | } |
| 2919 | 2916 | ||
| 2920 | static void ring_buffer_put(struct ring_buffer *rb); | 2917 | static void ring_buffer_put(struct ring_buffer *rb); |
| 2918 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); | ||
| 2921 | 2919 | ||
| 2922 | static void free_event(struct perf_event *event) | 2920 | static void free_event(struct perf_event *event) |
| 2923 | { | 2921 | { |
| @@ -2942,15 +2940,30 @@ static void free_event(struct perf_event *event) | |||
| 2942 | if (has_branch_stack(event)) { | 2940 | if (has_branch_stack(event)) { |
| 2943 | static_key_slow_dec_deferred(&perf_sched_events); | 2941 | static_key_slow_dec_deferred(&perf_sched_events); |
| 2944 | /* is system-wide event */ | 2942 | /* is system-wide event */ |
| 2945 | if (!(event->attach_state & PERF_ATTACH_TASK)) | 2943 | if (!(event->attach_state & PERF_ATTACH_TASK)) { |
| 2946 | atomic_dec(&per_cpu(perf_branch_stack_events, | 2944 | atomic_dec(&per_cpu(perf_branch_stack_events, |
| 2947 | event->cpu)); | 2945 | event->cpu)); |
| 2946 | } | ||
| 2948 | } | 2947 | } |
| 2949 | } | 2948 | } |
| 2950 | 2949 | ||
| 2951 | if (event->rb) { | 2950 | if (event->rb) { |
| 2952 | ring_buffer_put(event->rb); | 2951 | struct ring_buffer *rb; |
| 2953 | event->rb = NULL; | 2952 | |
| 2953 | /* | ||
| 2954 | * Can happen when we close an event with re-directed output. | ||
| 2955 | * | ||
| 2956 | * Since we have a 0 refcount, perf_mmap_close() will skip | ||
| 2957 | * over us; possibly making our ring_buffer_put() the last. | ||
| 2958 | */ | ||
| 2959 | mutex_lock(&event->mmap_mutex); | ||
| 2960 | rb = event->rb; | ||
| 2961 | if (rb) { | ||
| 2962 | rcu_assign_pointer(event->rb, NULL); | ||
| 2963 | ring_buffer_detach(event, rb); | ||
| 2964 | ring_buffer_put(rb); /* could be last */ | ||
| 2965 | } | ||
| 2966 | mutex_unlock(&event->mmap_mutex); | ||
| 2954 | } | 2967 | } |
| 2955 | 2968 | ||
| 2956 | if (is_cgroup_event(event)) | 2969 | if (is_cgroup_event(event)) |
| @@ -3188,30 +3201,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) | |||
| 3188 | unsigned int events = POLL_HUP; | 3201 | unsigned int events = POLL_HUP; |
| 3189 | 3202 | ||
| 3190 | /* | 3203 | /* |
| 3191 | * Race between perf_event_set_output() and perf_poll(): perf_poll() | 3204 | * Pin the event->rb by taking event->mmap_mutex; otherwise |
| 3192 | * grabs the rb reference but perf_event_set_output() overrides it. | 3205 | * perf_event_set_output() can swizzle our rb and make us miss wakeups. |
| 3193 | * Here is the timeline for two threads T1, T2: | ||
| 3194 | * t0: T1, rb = rcu_dereference(event->rb) | ||
| 3195 | * t1: T2, old_rb = event->rb | ||
| 3196 | * t2: T2, event->rb = new rb | ||
| 3197 | * t3: T2, ring_buffer_detach(old_rb) | ||
| 3198 | * t4: T1, ring_buffer_attach(rb1) | ||
| 3199 | * t5: T1, poll_wait(event->waitq) | ||
| 3200 | * | ||
| 3201 | * To avoid this problem, we grab mmap_mutex in perf_poll() | ||
| 3202 | * thereby ensuring that the assignment of the new ring buffer | ||
| 3203 | * and the detachment of the old buffer appear atomic to perf_poll() | ||
| 3204 | */ | 3206 | */ |
| 3205 | mutex_lock(&event->mmap_mutex); | 3207 | mutex_lock(&event->mmap_mutex); |
| 3206 | 3208 | rb = event->rb; | |
| 3207 | rcu_read_lock(); | 3209 | if (rb) |
| 3208 | rb = rcu_dereference(event->rb); | ||
| 3209 | if (rb) { | ||
| 3210 | ring_buffer_attach(event, rb); | ||
| 3211 | events = atomic_xchg(&rb->poll, 0); | 3210 | events = atomic_xchg(&rb->poll, 0); |
| 3212 | } | ||
| 3213 | rcu_read_unlock(); | ||
| 3214 | |||
| 3215 | mutex_unlock(&event->mmap_mutex); | 3211 | mutex_unlock(&event->mmap_mutex); |
| 3216 | 3212 | ||
| 3217 | poll_wait(file, &event->waitq, wait); | 3213 | poll_wait(file, &event->waitq, wait); |
| @@ -3521,16 +3517,12 @@ static void ring_buffer_attach(struct perf_event *event, | |||
| 3521 | return; | 3517 | return; |
| 3522 | 3518 | ||
| 3523 | spin_lock_irqsave(&rb->event_lock, flags); | 3519 | spin_lock_irqsave(&rb->event_lock, flags); |
| 3524 | if (!list_empty(&event->rb_entry)) | 3520 | if (list_empty(&event->rb_entry)) |
| 3525 | goto unlock; | 3521 | list_add(&event->rb_entry, &rb->event_list); |
| 3526 | |||
| 3527 | list_add(&event->rb_entry, &rb->event_list); | ||
| 3528 | unlock: | ||
| 3529 | spin_unlock_irqrestore(&rb->event_lock, flags); | 3522 | spin_unlock_irqrestore(&rb->event_lock, flags); |
| 3530 | } | 3523 | } |
| 3531 | 3524 | ||
| 3532 | static void ring_buffer_detach(struct perf_event *event, | 3525 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) |
| 3533 | struct ring_buffer *rb) | ||
| 3534 | { | 3526 | { |
| 3535 | unsigned long flags; | 3527 | unsigned long flags; |
| 3536 | 3528 | ||
| @@ -3549,13 +3541,10 @@ static void ring_buffer_wakeup(struct perf_event *event) | |||
| 3549 | 3541 | ||
| 3550 | rcu_read_lock(); | 3542 | rcu_read_lock(); |
| 3551 | rb = rcu_dereference(event->rb); | 3543 | rb = rcu_dereference(event->rb); |
| 3552 | if (!rb) | 3544 | if (rb) { |
| 3553 | goto unlock; | 3545 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) |
| 3554 | 3546 | wake_up_all(&event->waitq); | |
| 3555 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) | 3547 | } |
| 3556 | wake_up_all(&event->waitq); | ||
| 3557 | |||
| 3558 | unlock: | ||
| 3559 | rcu_read_unlock(); | 3548 | rcu_read_unlock(); |
| 3560 | } | 3549 | } |
| 3561 | 3550 | ||
| @@ -3584,18 +3573,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) | |||
| 3584 | 3573 | ||
| 3585 | static void ring_buffer_put(struct ring_buffer *rb) | 3574 | static void ring_buffer_put(struct ring_buffer *rb) |
| 3586 | { | 3575 | { |
| 3587 | struct perf_event *event, *n; | ||
| 3588 | unsigned long flags; | ||
| 3589 | |||
| 3590 | if (!atomic_dec_and_test(&rb->refcount)) | 3576 | if (!atomic_dec_and_test(&rb->refcount)) |
| 3591 | return; | 3577 | return; |
| 3592 | 3578 | ||
| 3593 | spin_lock_irqsave(&rb->event_lock, flags); | 3579 | WARN_ON_ONCE(!list_empty(&rb->event_list)); |
| 3594 | list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { | ||
| 3595 | list_del_init(&event->rb_entry); | ||
| 3596 | wake_up_all(&event->waitq); | ||
| 3597 | } | ||
| 3598 | spin_unlock_irqrestore(&rb->event_lock, flags); | ||
| 3599 | 3580 | ||
| 3600 | call_rcu(&rb->rcu_head, rb_free_rcu); | 3581 | call_rcu(&rb->rcu_head, rb_free_rcu); |
| 3601 | } | 3582 | } |
| @@ -3605,26 +3586,100 @@ static void perf_mmap_open(struct vm_area_struct *vma) | |||
| 3605 | struct perf_event *event = vma->vm_file->private_data; | 3586 | struct perf_event *event = vma->vm_file->private_data; |
| 3606 | 3587 | ||
| 3607 | atomic_inc(&event->mmap_count); | 3588 | atomic_inc(&event->mmap_count); |
| 3589 | atomic_inc(&event->rb->mmap_count); | ||
| 3608 | } | 3590 | } |
| 3609 | 3591 | ||
| 3592 | /* | ||
| 3593 | * A buffer can be mmap()ed multiple times; either directly through the same | ||
| 3594 | * event, or through other events by use of perf_event_set_output(). | ||
| 3595 | * | ||
| 3596 | * In order to undo the VM accounting done by perf_mmap() we need to destroy | ||
| 3597 | * the buffer here, where we still have a VM context. This means we need | ||
| 3598 | * to detach all events redirecting to us. | ||
| 3599 | */ | ||
| 3610 | static void perf_mmap_close(struct vm_area_struct *vma) | 3600 | static void perf_mmap_close(struct vm_area_struct *vma) |
| 3611 | { | 3601 | { |
| 3612 | struct perf_event *event = vma->vm_file->private_data; | 3602 | struct perf_event *event = vma->vm_file->private_data; |
| 3613 | 3603 | ||
| 3614 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 3604 | struct ring_buffer *rb = event->rb; |
| 3615 | unsigned long size = perf_data_size(event->rb); | 3605 | struct user_struct *mmap_user = rb->mmap_user; |
| 3616 | struct user_struct *user = event->mmap_user; | 3606 | int mmap_locked = rb->mmap_locked; |
| 3617 | struct ring_buffer *rb = event->rb; | 3607 | unsigned long size = perf_data_size(rb); |
| 3608 | |||
| 3609 | atomic_dec(&rb->mmap_count); | ||
| 3610 | |||
| 3611 | if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) | ||
| 3612 | return; | ||
| 3618 | 3613 | ||
| 3619 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 3614 | /* Detach current event from the buffer. */ |
| 3620 | vma->vm_mm->pinned_vm -= event->mmap_locked; | 3615 | rcu_assign_pointer(event->rb, NULL); |
| 3621 | rcu_assign_pointer(event->rb, NULL); | 3616 | ring_buffer_detach(event, rb); |
| 3622 | ring_buffer_detach(event, rb); | 3617 | mutex_unlock(&event->mmap_mutex); |
| 3618 | |||
| 3619 | /* If there's still other mmap()s of this buffer, we're done. */ | ||
| 3620 | if (atomic_read(&rb->mmap_count)) { | ||
| 3621 | ring_buffer_put(rb); /* can't be last */ | ||
| 3622 | return; | ||
| 3623 | } | ||
| 3624 | |||
| 3625 | /* | ||
| 3626 | * No other mmap()s, detach from all other events that might redirect | ||
| 3627 | * into the now unreachable buffer. Somewhat complicated by the | ||
| 3628 | * fact that rb::event_lock otherwise nests inside mmap_mutex. | ||
| 3629 | */ | ||
| 3630 | again: | ||
| 3631 | rcu_read_lock(); | ||
| 3632 | list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { | ||
| 3633 | if (!atomic_long_inc_not_zero(&event->refcount)) { | ||
| 3634 | /* | ||
| 3635 | * This event is en-route to free_event() which will | ||
| 3636 | * detach it and remove it from the list. | ||
| 3637 | */ | ||
| 3638 | continue; | ||
| 3639 | } | ||
| 3640 | rcu_read_unlock(); | ||
| 3641 | |||
| 3642 | mutex_lock(&event->mmap_mutex); | ||
| 3643 | /* | ||
| 3644 | * Check we didn't race with perf_event_set_output() which can | ||
| 3645 | * swizzle the rb from under us while we were waiting to | ||
| 3646 | * acquire mmap_mutex. | ||
| 3647 | * | ||
| 3648 | * If we find a different rb; ignore this event, a next | ||
| 3649 | * iteration will no longer find it on the list. We have to | ||
| 3650 | * still restart the iteration to make sure we're not now | ||
| 3651 | * iterating the wrong list. | ||
| 3652 | */ | ||
| 3653 | if (event->rb == rb) { | ||
| 3654 | rcu_assign_pointer(event->rb, NULL); | ||
| 3655 | ring_buffer_detach(event, rb); | ||
| 3656 | ring_buffer_put(rb); /* can't be last, we still have one */ | ||
| 3657 | } | ||
| 3623 | mutex_unlock(&event->mmap_mutex); | 3658 | mutex_unlock(&event->mmap_mutex); |
| 3659 | put_event(event); | ||
| 3624 | 3660 | ||
| 3625 | ring_buffer_put(rb); | 3661 | /* |
| 3626 | free_uid(user); | 3662 | * Restart the iteration; either we're on the wrong list or |
| 3663 | * destroyed its integrity by doing a deletion. | ||
| 3664 | */ | ||
| 3665 | goto again; | ||
| 3627 | } | 3666 | } |
| 3667 | rcu_read_unlock(); | ||
| 3668 | |||
| 3669 | /* | ||
| 3670 | * It could be there's still a few 0-ref events on the list; they'll | ||
| 3671 | * get cleaned up by free_event() -- they'll also still have their | ||
| 3672 | * ref on the rb and will free it whenever they are done with it. | ||
| 3673 | * | ||
| 3674 | * Aside from that, this buffer is 'fully' detached and unmapped, | ||
| 3675 | * undo the VM accounting. | ||
| 3676 | */ | ||
| 3677 | |||
| 3678 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); | ||
| 3679 | vma->vm_mm->pinned_vm -= mmap_locked; | ||
| 3680 | free_uid(mmap_user); | ||
| 3681 | |||
| 3682 | ring_buffer_put(rb); /* could be last */ | ||
| 3628 | } | 3683 | } |
| 3629 | 3684 | ||
| 3630 | static const struct vm_operations_struct perf_mmap_vmops = { | 3685 | static const struct vm_operations_struct perf_mmap_vmops = { |
| @@ -3674,12 +3729,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 3674 | return -EINVAL; | 3729 | return -EINVAL; |
| 3675 | 3730 | ||
| 3676 | WARN_ON_ONCE(event->ctx->parent_ctx); | 3731 | WARN_ON_ONCE(event->ctx->parent_ctx); |
| 3732 | again: | ||
| 3677 | mutex_lock(&event->mmap_mutex); | 3733 | mutex_lock(&event->mmap_mutex); |
| 3678 | if (event->rb) { | 3734 | if (event->rb) { |
| 3679 | if (event->rb->nr_pages == nr_pages) | 3735 | if (event->rb->nr_pages != nr_pages) { |
| 3680 | atomic_inc(&event->rb->refcount); | ||
| 3681 | else | ||
| 3682 | ret = -EINVAL; | 3736 | ret = -EINVAL; |
| 3737 | goto unlock; | ||
| 3738 | } | ||
| 3739 | |||
| 3740 | if (!atomic_inc_not_zero(&event->rb->mmap_count)) { | ||
| 3741 | /* | ||
| 3742 | * Raced against perf_mmap_close() through | ||
| 3743 | * perf_event_set_output(). Try again, hope for better | ||
| 3744 | * luck. | ||
| 3745 | */ | ||
| 3746 | mutex_unlock(&event->mmap_mutex); | ||
| 3747 | goto again; | ||
| 3748 | } | ||
| 3749 | |||
| 3683 | goto unlock; | 3750 | goto unlock; |
| 3684 | } | 3751 | } |
| 3685 | 3752 | ||
| @@ -3720,12 +3787,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 3720 | ret = -ENOMEM; | 3787 | ret = -ENOMEM; |
| 3721 | goto unlock; | 3788 | goto unlock; |
| 3722 | } | 3789 | } |
| 3723 | rcu_assign_pointer(event->rb, rb); | 3790 | |
| 3791 | atomic_set(&rb->mmap_count, 1); | ||
| 3792 | rb->mmap_locked = extra; | ||
| 3793 | rb->mmap_user = get_current_user(); | ||
| 3724 | 3794 | ||
| 3725 | atomic_long_add(user_extra, &user->locked_vm); | 3795 | atomic_long_add(user_extra, &user->locked_vm); |
| 3726 | event->mmap_locked = extra; | 3796 | vma->vm_mm->pinned_vm += extra; |
| 3727 | event->mmap_user = get_current_user(); | 3797 | |
| 3728 | vma->vm_mm->pinned_vm += event->mmap_locked; | 3798 | ring_buffer_attach(event, rb); |
| 3799 | rcu_assign_pointer(event->rb, rb); | ||
| 3729 | 3800 | ||
| 3730 | perf_event_update_userpage(event); | 3801 | perf_event_update_userpage(event); |
| 3731 | 3802 | ||
| @@ -3734,7 +3805,11 @@ unlock: | |||
| 3734 | atomic_inc(&event->mmap_count); | 3805 | atomic_inc(&event->mmap_count); |
| 3735 | mutex_unlock(&event->mmap_mutex); | 3806 | mutex_unlock(&event->mmap_mutex); |
| 3736 | 3807 | ||
| 3737 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; | 3808 | /* |
| 3809 | * Since pinned accounting is per vm we cannot allow fork() to copy our | ||
| 3810 | * vma. | ||
| 3811 | */ | ||
| 3812 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; | ||
| 3738 | vma->vm_ops = &perf_mmap_vmops; | 3813 | vma->vm_ops = &perf_mmap_vmops; |
| 3739 | 3814 | ||
| 3740 | return ret; | 3815 | return ret; |
| @@ -6412,6 +6487,8 @@ set: | |||
| 6412 | if (atomic_read(&event->mmap_count)) | 6487 | if (atomic_read(&event->mmap_count)) |
| 6413 | goto unlock; | 6488 | goto unlock; |
| 6414 | 6489 | ||
| 6490 | old_rb = event->rb; | ||
| 6491 | |||
| 6415 | if (output_event) { | 6492 | if (output_event) { |
| 6416 | /* get the rb we want to redirect to */ | 6493 | /* get the rb we want to redirect to */ |
| 6417 | rb = ring_buffer_get(output_event); | 6494 | rb = ring_buffer_get(output_event); |
| @@ -6419,16 +6496,28 @@ set: | |||
| 6419 | goto unlock; | 6496 | goto unlock; |
| 6420 | } | 6497 | } |
| 6421 | 6498 | ||
| 6422 | old_rb = event->rb; | ||
| 6423 | rcu_assign_pointer(event->rb, rb); | ||
| 6424 | if (old_rb) | 6499 | if (old_rb) |
| 6425 | ring_buffer_detach(event, old_rb); | 6500 | ring_buffer_detach(event, old_rb); |
| 6501 | |||
| 6502 | if (rb) | ||
| 6503 | ring_buffer_attach(event, rb); | ||
| 6504 | |||
| 6505 | rcu_assign_pointer(event->rb, rb); | ||
| 6506 | |||
| 6507 | if (old_rb) { | ||
| 6508 | ring_buffer_put(old_rb); | ||
| 6509 | /* | ||
| 6510 | * Since we detached before setting the new rb, so that we | ||
| 6511 | * could attach the new rb, we could have missed a wakeup. | ||
| 6512 | * Provide it now. | ||
| 6513 | */ | ||
| 6514 | wake_up_all(&event->waitq); | ||
| 6515 | } | ||
| 6516 | |||
| 6426 | ret = 0; | 6517 | ret = 0; |
| 6427 | unlock: | 6518 | unlock: |
| 6428 | mutex_unlock(&event->mmap_mutex); | 6519 | mutex_unlock(&event->mmap_mutex); |
| 6429 | 6520 | ||
| 6430 | if (old_rb) | ||
| 6431 | ring_buffer_put(old_rb); | ||
| 6432 | out: | 6521 | out: |
| 6433 | return ret; | 6522 | return ret; |
| 6434 | } | 6523 | } |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index eb675c4d59df..ca6599723be5 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
| @@ -31,6 +31,10 @@ struct ring_buffer { | |||
| 31 | spinlock_t event_lock; | 31 | spinlock_t event_lock; |
| 32 | struct list_head event_list; | 32 | struct list_head event_list; |
| 33 | 33 | ||
| 34 | atomic_t mmap_count; | ||
| 35 | unsigned long mmap_locked; | ||
| 36 | struct user_struct *mmap_user; | ||
| 37 | |||
| 34 | struct perf_event_mmap_page *user_page; | 38 | struct perf_event_mmap_page *user_page; |
| 35 | void *data_pages[0]; | 39 | void *data_pages[0]; |
| 36 | }; | 40 | }; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 3fed7f0cbcdf..bddf3b201a48 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) | |||
| 467 | /* Optimization staging list, protected by kprobe_mutex */ | 467 | /* Optimization staging list, protected by kprobe_mutex */ |
| 468 | static LIST_HEAD(optimizing_list); | 468 | static LIST_HEAD(optimizing_list); |
| 469 | static LIST_HEAD(unoptimizing_list); | 469 | static LIST_HEAD(unoptimizing_list); |
| 470 | static LIST_HEAD(freeing_list); | ||
| 470 | 471 | ||
| 471 | static void kprobe_optimizer(struct work_struct *work); | 472 | static void kprobe_optimizer(struct work_struct *work); |
| 472 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); | 473 | static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); |
| @@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void) | |||
| 504 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint | 505 | * Unoptimize (replace a jump with a breakpoint and remove the breakpoint |
| 505 | * if need) kprobes listed on unoptimizing_list. | 506 | * if need) kprobes listed on unoptimizing_list. |
| 506 | */ | 507 | */ |
| 507 | static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | 508 | static __kprobes void do_unoptimize_kprobes(void) |
| 508 | { | 509 | { |
| 509 | struct optimized_kprobe *op, *tmp; | 510 | struct optimized_kprobe *op, *tmp; |
| 510 | 511 | ||
| @@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
| 515 | /* Ditto to do_optimize_kprobes */ | 516 | /* Ditto to do_optimize_kprobes */ |
| 516 | get_online_cpus(); | 517 | get_online_cpus(); |
| 517 | mutex_lock(&text_mutex); | 518 | mutex_lock(&text_mutex); |
| 518 | arch_unoptimize_kprobes(&unoptimizing_list, free_list); | 519 | arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); |
| 519 | /* Loop free_list for disarming */ | 520 | /* Loop free_list for disarming */ |
| 520 | list_for_each_entry_safe(op, tmp, free_list, list) { | 521 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
| 521 | /* Disarm probes if marked disabled */ | 522 | /* Disarm probes if marked disabled */ |
| 522 | if (kprobe_disabled(&op->kp)) | 523 | if (kprobe_disabled(&op->kp)) |
| 523 | arch_disarm_kprobe(&op->kp); | 524 | arch_disarm_kprobe(&op->kp); |
| @@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) | |||
| 536 | } | 537 | } |
| 537 | 538 | ||
| 538 | /* Reclaim all kprobes on the free_list */ | 539 | /* Reclaim all kprobes on the free_list */ |
| 539 | static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) | 540 | static __kprobes void do_free_cleaned_kprobes(void) |
| 540 | { | 541 | { |
| 541 | struct optimized_kprobe *op, *tmp; | 542 | struct optimized_kprobe *op, *tmp; |
| 542 | 543 | ||
| 543 | list_for_each_entry_safe(op, tmp, free_list, list) { | 544 | list_for_each_entry_safe(op, tmp, &freeing_list, list) { |
| 544 | BUG_ON(!kprobe_unused(&op->kp)); | 545 | BUG_ON(!kprobe_unused(&op->kp)); |
| 545 | list_del_init(&op->list); | 546 | list_del_init(&op->list); |
| 546 | free_aggr_kprobe(&op->kp); | 547 | free_aggr_kprobe(&op->kp); |
| @@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void) | |||
| 556 | /* Kprobe jump optimizer */ | 557 | /* Kprobe jump optimizer */ |
| 557 | static __kprobes void kprobe_optimizer(struct work_struct *work) | 558 | static __kprobes void kprobe_optimizer(struct work_struct *work) |
| 558 | { | 559 | { |
| 559 | LIST_HEAD(free_list); | ||
| 560 | |||
| 561 | mutex_lock(&kprobe_mutex); | 560 | mutex_lock(&kprobe_mutex); |
| 562 | /* Lock modules while optimizing kprobes */ | 561 | /* Lock modules while optimizing kprobes */ |
| 563 | mutex_lock(&module_mutex); | 562 | mutex_lock(&module_mutex); |
| @@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
| 566 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) | 565 | * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) |
| 567 | * kprobes before waiting for quiesence period. | 566 | * kprobes before waiting for quiesence period. |
| 568 | */ | 567 | */ |
| 569 | do_unoptimize_kprobes(&free_list); | 568 | do_unoptimize_kprobes(); |
| 570 | 569 | ||
| 571 | /* | 570 | /* |
| 572 | * Step 2: Wait for quiesence period to ensure all running interrupts | 571 | * Step 2: Wait for quiesence period to ensure all running interrupts |
| @@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) | |||
| 581 | do_optimize_kprobes(); | 580 | do_optimize_kprobes(); |
| 582 | 581 | ||
| 583 | /* Step 4: Free cleaned kprobes after quiesence period */ | 582 | /* Step 4: Free cleaned kprobes after quiesence period */ |
| 584 | do_free_cleaned_kprobes(&free_list); | 583 | do_free_cleaned_kprobes(); |
| 585 | 584 | ||
| 586 | mutex_unlock(&module_mutex); | 585 | mutex_unlock(&module_mutex); |
| 587 | mutex_unlock(&kprobe_mutex); | 586 | mutex_unlock(&kprobe_mutex); |
| @@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p) | |||
| 723 | if (!list_empty(&op->list)) | 722 | if (!list_empty(&op->list)) |
| 724 | /* Dequeue from the (un)optimization queue */ | 723 | /* Dequeue from the (un)optimization queue */ |
| 725 | list_del_init(&op->list); | 724 | list_del_init(&op->list); |
| 726 | |||
| 727 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; | 725 | op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; |
| 726 | |||
| 727 | if (kprobe_unused(p)) { | ||
| 728 | /* Enqueue if it is unused */ | ||
| 729 | list_add(&op->list, &freeing_list); | ||
| 730 | /* | ||
| 731 | * Remove unused probes from the hash list. After waiting | ||
| 732 | * for synchronization, this probe is reclaimed. | ||
| 733 | * (reclaiming is done by do_free_cleaned_kprobes().) | ||
| 734 | */ | ||
| 735 | hlist_del_rcu(&op->kp.hlist); | ||
| 736 | } | ||
| 737 | |||
| 728 | /* Don't touch the code, because it is already freed. */ | 738 | /* Don't touch the code, because it is already freed. */ |
| 729 | arch_remove_optimized_kprobe(op); | 739 | arch_remove_optimized_kprobe(op); |
| 730 | } | 740 | } |
diff --git a/kernel/range.c b/kernel/range.c index eb911dbce267..322ea8e93e4b 100644 --- a/kernel/range.c +++ b/kernel/range.c | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 5 | #include <linux/init.h> | 5 | #include <linux/init.h> |
| 6 | #include <linux/sort.h> | 6 | #include <linux/sort.h> |
| 7 | 7 | #include <linux/string.h> | |
| 8 | #include <linux/range.h> | 8 | #include <linux/range.h> |
| 9 | 9 | ||
| 10 | int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) | 10 | int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) |
| @@ -32,9 +32,8 @@ int add_range_with_merge(struct range *range, int az, int nr_range, | |||
| 32 | if (start >= end) | 32 | if (start >= end) |
| 33 | return nr_range; | 33 | return nr_range; |
| 34 | 34 | ||
| 35 | /* Try to merge it with old one: */ | 35 | /* get new start/end: */ |
| 36 | for (i = 0; i < nr_range; i++) { | 36 | for (i = 0; i < nr_range; i++) { |
| 37 | u64 final_start, final_end; | ||
| 38 | u64 common_start, common_end; | 37 | u64 common_start, common_end; |
| 39 | 38 | ||
| 40 | if (!range[i].end) | 39 | if (!range[i].end) |
| @@ -45,14 +44,16 @@ int add_range_with_merge(struct range *range, int az, int nr_range, | |||
| 45 | if (common_start > common_end) | 44 | if (common_start > common_end) |
| 46 | continue; | 45 | continue; |
| 47 | 46 | ||
| 48 | final_start = min(range[i].start, start); | 47 | /* new start/end, will add it back at last */ |
| 49 | final_end = max(range[i].end, end); | 48 | start = min(range[i].start, start); |
| 49 | end = max(range[i].end, end); | ||
| 50 | 50 | ||
| 51 | /* clear it and add it back for further merge */ | 51 | memmove(&range[i], &range[i + 1], |
| 52 | range[i].start = 0; | 52 | (nr_range - (i + 1)) * sizeof(range[i])); |
| 53 | range[i].end = 0; | 53 | range[nr_range - 1].start = 0; |
| 54 | return add_range_with_merge(range, az, nr_range, | 54 | range[nr_range - 1].end = 0; |
| 55 | final_start, final_end); | 55 | nr_range--; |
| 56 | i--; | ||
| 56 | } | 57 | } |
| 57 | 58 | ||
| 58 | /* Need to add it: */ | 59 | /* Need to add it: */ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 58453b8272fd..e8b335016c52 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu) | |||
| 633 | static inline bool got_nohz_idle_kick(void) | 633 | static inline bool got_nohz_idle_kick(void) |
| 634 | { | 634 | { |
| 635 | int cpu = smp_processor_id(); | 635 | int cpu = smp_processor_id(); |
| 636 | return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | 636 | |
| 637 | if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) | ||
| 638 | return false; | ||
| 639 | |||
| 640 | if (idle_cpu(cpu) && !need_resched()) | ||
| 641 | return true; | ||
| 642 | |||
| 643 | /* | ||
| 644 | * We can't run Idle Load Balance on this CPU for this time so we | ||
| 645 | * cancel it and clear NOHZ_BALANCE_KICK | ||
| 646 | */ | ||
| 647 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); | ||
| 648 | return false; | ||
| 637 | } | 649 | } |
| 638 | 650 | ||
| 639 | #else /* CONFIG_NO_HZ_COMMON */ | 651 | #else /* CONFIG_NO_HZ_COMMON */ |
| @@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void) | |||
| 1393 | 1405 | ||
| 1394 | void scheduler_ipi(void) | 1406 | void scheduler_ipi(void) |
| 1395 | { | 1407 | { |
| 1396 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() | 1408 | if (llist_empty(&this_rq()->wake_list) |
| 1397 | && !tick_nohz_full_cpu(smp_processor_id())) | 1409 | && !tick_nohz_full_cpu(smp_processor_id()) |
| 1410 | && !got_nohz_idle_kick()) | ||
| 1398 | return; | 1411 | return; |
| 1399 | 1412 | ||
| 1400 | /* | 1413 | /* |
| @@ -1417,7 +1430,7 @@ void scheduler_ipi(void) | |||
| 1417 | /* | 1430 | /* |
| 1418 | * Check if someone kicked us for doing the nohz idle load balance. | 1431 | * Check if someone kicked us for doing the nohz idle load balance. |
| 1419 | */ | 1432 | */ |
| 1420 | if (unlikely(got_nohz_idle_kick() && !need_resched())) { | 1433 | if (unlikely(got_nohz_idle_kick())) { |
| 1421 | this_rq()->idle_balance = 1; | 1434 | this_rq()->idle_balance = 1; |
| 1422 | raise_softirq_irqoff(SCHED_SOFTIRQ); | 1435 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
| 1423 | } | 1436 | } |
| @@ -4745,7 +4758,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
| 4745 | */ | 4758 | */ |
| 4746 | idle->sched_class = &idle_sched_class; | 4759 | idle->sched_class = &idle_sched_class; |
| 4747 | ftrace_graph_init_idle_task(idle, cpu); | 4760 | ftrace_graph_init_idle_task(idle, cpu); |
| 4748 | vtime_init_idle(idle); | 4761 | vtime_init_idle(idle, cpu); |
| 4749 | #if defined(CONFIG_SMP) | 4762 | #if defined(CONFIG_SMP) |
| 4750 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); | 4763 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
| 4751 | #endif | 4764 | #endif |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index cc2dc3eea8a3..b5ccba22603b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev) | |||
| 747 | 747 | ||
| 748 | write_seqlock(¤t->vtime_seqlock); | 748 | write_seqlock(¤t->vtime_seqlock); |
| 749 | current->vtime_snap_whence = VTIME_SYS; | 749 | current->vtime_snap_whence = VTIME_SYS; |
| 750 | current->vtime_snap = sched_clock(); | 750 | current->vtime_snap = sched_clock_cpu(smp_processor_id()); |
| 751 | write_sequnlock(¤t->vtime_seqlock); | 751 | write_sequnlock(¤t->vtime_seqlock); |
| 752 | } | 752 | } |
| 753 | 753 | ||
| 754 | void vtime_init_idle(struct task_struct *t) | 754 | void vtime_init_idle(struct task_struct *t, int cpu) |
| 755 | { | 755 | { |
| 756 | unsigned long flags; | 756 | unsigned long flags; |
| 757 | 757 | ||
| 758 | write_seqlock_irqsave(&t->vtime_seqlock, flags); | 758 | write_seqlock_irqsave(&t->vtime_seqlock, flags); |
| 759 | t->vtime_snap_whence = VTIME_SYS; | 759 | t->vtime_snap_whence = VTIME_SYS; |
| 760 | t->vtime_snap = sched_clock(); | 760 | t->vtime_snap = sched_clock_cpu(cpu); |
| 761 | write_sequnlock_irqrestore(&t->vtime_seqlock, flags); | 761 | write_sequnlock_irqrestore(&t->vtime_seqlock, flags); |
| 762 | } | 762 | } |
| 763 | 763 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0c739423b0f9..b4c245580b79 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -698,10 +698,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
| 698 | 698 | ||
| 699 | bc->event_handler = tick_handle_oneshot_broadcast; | 699 | bc->event_handler = tick_handle_oneshot_broadcast; |
| 700 | 700 | ||
| 701 | /* Take the do_timer update */ | ||
| 702 | if (!tick_nohz_full_cpu(cpu)) | ||
| 703 | tick_do_timer_cpu = cpu; | ||
| 704 | |||
| 705 | /* | 701 | /* |
| 706 | * We must be careful here. There might be other CPUs | 702 | * We must be careful here. There might be other CPUs |
| 707 | * waiting for periodic broadcast. We need to set the | 703 | * waiting for periodic broadcast. We need to set the |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f4208138fbf4..0cf1c1453181 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, | |||
| 306 | * we can't safely shutdown that CPU. | 306 | * we can't safely shutdown that CPU. |
| 307 | */ | 307 | */ |
| 308 | if (have_nohz_full_mask && tick_do_timer_cpu == cpu) | 308 | if (have_nohz_full_mask && tick_do_timer_cpu == cpu) |
| 309 | return -EINVAL; | 309 | return NOTIFY_BAD; |
| 310 | break; | 310 | break; |
| 311 | } | 311 | } |
| 312 | return NOTIFY_OK; | 312 | return NOTIFY_OK; |
