diff options
271 files changed, 3794 insertions, 2213 deletions
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 31d1d658827f..c0d8788e75d3 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt | |||
| @@ -587,7 +587,7 @@ used to control it: | |||
| 587 | 587 | ||
| 588 | modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> | 588 | modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type> |
| 589 | preaction=<preaction type> preop=<preop type> start_now=x | 589 | preaction=<preaction type> preop=<preop type> start_now=x |
| 590 | nowayout=x ifnum_to_use=n | 590 | nowayout=x ifnum_to_use=n panic_wdt_timeout=<t> |
| 591 | 591 | ||
| 592 | ifnum_to_use specifies which interface the watchdog timer should use. | 592 | ifnum_to_use specifies which interface the watchdog timer should use. |
| 593 | The default is -1, which means to pick the first one registered. | 593 | The default is -1, which means to pick the first one registered. |
| @@ -597,7 +597,9 @@ is the amount of seconds before the reset that the pre-timeout panic will | |||
| 597 | occur (if pretimeout is zero, then pretimeout will not be enabled). Note | 597 | occur (if pretimeout is zero, then pretimeout will not be enabled). Note |
| 598 | that the pretimeout is the time before the final timeout. So if the | 598 | that the pretimeout is the time before the final timeout. So if the |
| 599 | timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout | 599 | timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout |
| 600 | will occur in 40 second (10 seconds before the timeout). | 600 | will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout |
| 601 | is the value of timeout which is set on kernel panic, in order to let actions | ||
| 602 | such as kdump to occur during panic. | ||
| 601 | 603 | ||
| 602 | The action may be "reset", "power_cycle", or "power_off", and | 604 | The action may be "reset", "power_cycle", or "power_off", and |
| 603 | specifies what to do when the timer times out, and defaults to | 605 | specifies what to do when the timer times out, and defaults to |
| @@ -634,6 +636,7 @@ for configuring the watchdog: | |||
| 634 | ipmi_watchdog.preop=<preop type> | 636 | ipmi_watchdog.preop=<preop type> |
| 635 | ipmi_watchdog.start_now=x | 637 | ipmi_watchdog.start_now=x |
| 636 | ipmi_watchdog.nowayout=x | 638 | ipmi_watchdog.nowayout=x |
| 639 | ipmi_watchdog.panic_wdt_timeout=<t> | ||
| 637 | 640 | ||
| 638 | The options are the same as the module parameter options. | 641 | The options are the same as the module parameter options. |
| 639 | 642 | ||
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt index 2f6c6ff7161d..d8880ca30af4 100644 --- a/Documentation/block/null_blk.txt +++ b/Documentation/block/null_blk.txt | |||
| @@ -70,3 +70,6 @@ use_per_node_hctx=[0/1]: Default: 0 | |||
| 70 | parameter. | 70 | parameter. |
| 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch | 71 | 1: The multi-queue block layer is instantiated with a hardware dispatch |
| 72 | queue for each CPU node in the system. | 72 | queue for each CPU node in the system. |
| 73 | |||
| 74 | use_lightnvm=[0/1]: Default: 0 | ||
| 75 | Register device with LightNVM. Requires blk-mq to be used. | ||
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 6a4b1af724f8..1bba38dd2637 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
| @@ -32,6 +32,7 @@ Supported adapters: | |||
| 32 | * Intel Sunrise Point-LP (PCH) | 32 | * Intel Sunrise Point-LP (PCH) |
| 33 | * Intel DNV (SOC) | 33 | * Intel DNV (SOC) |
| 34 | * Intel Broxton (SOC) | 34 | * Intel Broxton (SOC) |
| 35 | * Intel Lewisburg (PCH) | ||
| 35 | Datasheets: Publicly available at the Intel website | 36 | Datasheets: Publicly available at the Intel website |
| 36 | 37 | ||
| 37 | On Intel Patsburg and later chipsets, both the normal host SMBus controller | 38 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f8aae632f02f..742f69d18fc8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
| 1583 | hwp_only | 1583 | hwp_only |
| 1584 | Only load intel_pstate on systems which support | 1584 | Only load intel_pstate on systems which support |
| 1585 | hardware P state control (HWP) if available. | 1585 | hardware P state control (HWP) if available. |
| 1586 | no_acpi | ||
| 1587 | Don't use ACPI processor performance control objects | ||
| 1588 | _PSS and _PPC specified limits. | ||
| 1589 | 1586 | ||
| 1590 | intremap= [X86-64, Intel-IOMMU] | 1587 | intremap= [X86-64, Intel-IOMMU] |
| 1591 | on enable Interrupt Remapping (default) | 1588 | on enable Interrupt Remapping (default) |
diff --git a/MAINTAINERS b/MAINTAINERS index b16bffabe70a..3f92804f5dac 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2449,7 +2449,9 @@ F: drivers/firmware/broadcom/* | |||
| 2449 | 2449 | ||
| 2450 | BROADCOM STB NAND FLASH DRIVER | 2450 | BROADCOM STB NAND FLASH DRIVER |
| 2451 | M: Brian Norris <computersforpeace@gmail.com> | 2451 | M: Brian Norris <computersforpeace@gmail.com> |
| 2452 | M: Kamal Dasu <kdasu.kdev@gmail.com> | ||
| 2452 | L: linux-mtd@lists.infradead.org | 2453 | L: linux-mtd@lists.infradead.org |
| 2454 | L: bcm-kernel-feedback-list@broadcom.com | ||
| 2453 | S: Maintained | 2455 | S: Maintained |
| 2454 | F: drivers/mtd/nand/brcmnand/ | 2456 | F: drivers/mtd/nand/brcmnand/ |
| 2455 | 2457 | ||
| @@ -2929,10 +2931,9 @@ S: Maintained | |||
| 2929 | F: drivers/platform/x86/compal-laptop.c | 2931 | F: drivers/platform/x86/compal-laptop.c |
| 2930 | 2932 | ||
| 2931 | CONEXANT ACCESSRUNNER USB DRIVER | 2933 | CONEXANT ACCESSRUNNER USB DRIVER |
| 2932 | M: Simon Arlott <cxacru@fire.lp0.eu> | ||
| 2933 | L: accessrunner-general@lists.sourceforge.net | 2934 | L: accessrunner-general@lists.sourceforge.net |
| 2934 | W: http://accessrunner.sourceforge.net/ | 2935 | W: http://accessrunner.sourceforge.net/ |
| 2935 | S: Maintained | 2936 | S: Orphan |
| 2936 | F: drivers/usb/atm/cxacru.c | 2937 | F: drivers/usb/atm/cxacru.c |
| 2937 | 2938 | ||
| 2938 | CONFIGFS | 2939 | CONFIGFS |
| @@ -4409,6 +4410,7 @@ K: fmc_d.*register | |||
| 4409 | 4410 | ||
| 4410 | FPGA MANAGER FRAMEWORK | 4411 | FPGA MANAGER FRAMEWORK |
| 4411 | M: Alan Tull <atull@opensource.altera.com> | 4412 | M: Alan Tull <atull@opensource.altera.com> |
| 4413 | R: Moritz Fischer <moritz.fischer@ettus.com> | ||
| 4412 | S: Maintained | 4414 | S: Maintained |
| 4413 | F: drivers/fpga/ | 4415 | F: drivers/fpga/ |
| 4414 | F: include/linux/fpga/fpga-mgr.h | 4416 | F: include/linux/fpga/fpga-mgr.h |
| @@ -6364,6 +6366,7 @@ F: arch/*/include/asm/pmem.h | |||
| 6364 | LIGHTNVM PLATFORM SUPPORT | 6366 | LIGHTNVM PLATFORM SUPPORT |
| 6365 | M: Matias Bjorling <mb@lightnvm.io> | 6367 | M: Matias Bjorling <mb@lightnvm.io> |
| 6366 | W: http://github/OpenChannelSSD | 6368 | W: http://github/OpenChannelSSD |
| 6369 | L: linux-block@vger.kernel.org | ||
| 6367 | S: Maintained | 6370 | S: Maintained |
| 6368 | F: drivers/lightnvm/ | 6371 | F: drivers/lightnvm/ |
| 6369 | F: include/linux/lightnvm.h | 6372 | F: include/linux/lightnvm.h |
| @@ -7902,6 +7905,18 @@ S: Maintained | |||
| 7902 | F: net/openvswitch/ | 7905 | F: net/openvswitch/ |
| 7903 | F: include/uapi/linux/openvswitch.h | 7906 | F: include/uapi/linux/openvswitch.h |
| 7904 | 7907 | ||
| 7908 | OPERATING PERFORMANCE POINTS (OPP) | ||
| 7909 | M: Viresh Kumar <vireshk@kernel.org> | ||
| 7910 | M: Nishanth Menon <nm@ti.com> | ||
| 7911 | M: Stephen Boyd <sboyd@codeaurora.org> | ||
| 7912 | L: linux-pm@vger.kernel.org | ||
| 7913 | S: Maintained | ||
| 7914 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git | ||
| 7915 | F: drivers/base/power/opp/ | ||
| 7916 | F: include/linux/pm_opp.h | ||
| 7917 | F: Documentation/power/opp.txt | ||
| 7918 | F: Documentation/devicetree/bindings/opp/ | ||
| 7919 | |||
| 7905 | OPL4 DRIVER | 7920 | OPL4 DRIVER |
| 7906 | M: Clemens Ladisch <clemens@ladisch.de> | 7921 | M: Clemens Ladisch <clemens@ladisch.de> |
| 7907 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 7922 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 4 | 2 | PATCHLEVEL = 4 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc2 |
| 5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 2fd00b7077e4..936bc8f89a67 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c | |||
| @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, | |||
| 160 | 160 | ||
| 161 | /* The small sections were sorted to the end of the segment. | 161 | /* The small sections were sorted to the end of the segment. |
| 162 | The following should definitely cover them. */ | 162 | The following should definitely cover them. */ |
| 163 | gp = (u64)me->module_core + me->core_size - 0x8000; | 163 | gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; |
| 164 | got = sechdrs[me->arch.gotsecindex].sh_addr; | 164 | got = sechdrs[me->arch.gotsecindex].sh_addr; |
| 165 | 165 | ||
| 166 | for (i = 0; i < n; i++) { | 166 | for (i = 0; i < n; i++) { |
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 93c6ea52b671..e0034a6656ef 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
| @@ -372,8 +372,8 @@ void *unwind_add_table(struct module *module, const void *table_start, | |||
| 372 | return NULL; | 372 | return NULL; |
| 373 | 373 | ||
| 374 | init_unwind_table(table, module->name, | 374 | init_unwind_table(table, module->name, |
| 375 | module->module_core, module->core_size, | 375 | module->core_layout.base, module->core_layout.size, |
| 376 | module->module_init, module->init_size, | 376 | module->init_layout.base, module->init_layout.size, |
| 377 | table_start, table_size, | 377 | table_start, table_size, |
| 378 | NULL, 0); | 378 | NULL, 0); |
| 379 | 379 | ||
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index feb9d34b239c..f818ea483aeb 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi | |||
| @@ -486,7 +486,10 @@ | |||
| 486 | compatible = "fsl,imx27-usb"; | 486 | compatible = "fsl,imx27-usb"; |
| 487 | reg = <0x10024000 0x200>; | 487 | reg = <0x10024000 0x200>; |
| 488 | interrupts = <56>; | 488 | interrupts = <56>; |
| 489 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 489 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 490 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 491 | <&clks IMX27_CLK_USB_DIV>; | ||
| 492 | clock-names = "ipg", "ahb", "per"; | ||
| 490 | fsl,usbmisc = <&usbmisc 0>; | 493 | fsl,usbmisc = <&usbmisc 0>; |
| 491 | status = "disabled"; | 494 | status = "disabled"; |
| 492 | }; | 495 | }; |
| @@ -495,7 +498,10 @@ | |||
| 495 | compatible = "fsl,imx27-usb"; | 498 | compatible = "fsl,imx27-usb"; |
| 496 | reg = <0x10024200 0x200>; | 499 | reg = <0x10024200 0x200>; |
| 497 | interrupts = <54>; | 500 | interrupts = <54>; |
| 498 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 501 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 502 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 503 | <&clks IMX27_CLK_USB_DIV>; | ||
| 504 | clock-names = "ipg", "ahb", "per"; | ||
| 499 | fsl,usbmisc = <&usbmisc 1>; | 505 | fsl,usbmisc = <&usbmisc 1>; |
| 500 | dr_mode = "host"; | 506 | dr_mode = "host"; |
| 501 | status = "disabled"; | 507 | status = "disabled"; |
| @@ -505,7 +511,10 @@ | |||
| 505 | compatible = "fsl,imx27-usb"; | 511 | compatible = "fsl,imx27-usb"; |
| 506 | reg = <0x10024400 0x200>; | 512 | reg = <0x10024400 0x200>; |
| 507 | interrupts = <55>; | 513 | interrupts = <55>; |
| 508 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>; | 514 | clocks = <&clks IMX27_CLK_USB_IPG_GATE>, |
| 515 | <&clks IMX27_CLK_USB_AHB_GATE>, | ||
| 516 | <&clks IMX27_CLK_USB_DIV>; | ||
| 517 | clock-names = "ipg", "ahb", "per"; | ||
| 509 | fsl,usbmisc = <&usbmisc 2>; | 518 | fsl,usbmisc = <&usbmisc 2>; |
| 510 | dr_mode = "host"; | 519 | dr_mode = "host"; |
| 511 | status = "disabled"; | 520 | status = "disabled"; |
| @@ -515,7 +524,6 @@ | |||
| 515 | #index-cells = <1>; | 524 | #index-cells = <1>; |
| 516 | compatible = "fsl,imx27-usbmisc"; | 525 | compatible = "fsl,imx27-usbmisc"; |
| 517 | reg = <0x10024600 0x200>; | 526 | reg = <0x10024600 0x200>; |
| 518 | clocks = <&clks IMX27_CLK_USB_AHB_GATE>; | ||
| 519 | }; | 527 | }; |
| 520 | 528 | ||
| 521 | sahara2: sahara@10025000 { | 529 | sahara2: sahara@10025000 { |
diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c index 097e2e201b9f..0c7efc3446c0 100644 --- a/arch/arm/kernel/module-plts.c +++ b/arch/arm/kernel/module-plts.c | |||
| @@ -32,7 +32,7 @@ struct plt_entries { | |||
| 32 | 32 | ||
| 33 | static bool in_init(const struct module *mod, u32 addr) | 33 | static bool in_init(const struct module *mod, u32 addr) |
| 34 | { | 34 | { |
| 35 | return addr - (u32)mod->module_init < mod->init_size; | 35 | return addr - (u32)mod->init_layout.base < mod->init_layout.size; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) | 38 | u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index eab83b2435b8..e06fd299de08 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
| @@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 564 | vcpu_sleep(vcpu); | 564 | vcpu_sleep(vcpu); |
| 565 | 565 | ||
| 566 | /* | 566 | /* |
| 567 | * Disarming the background timer must be done in a | ||
| 568 | * preemptible context, as this call may sleep. | ||
| 569 | */ | ||
| 570 | kvm_timer_flush_hwstate(vcpu); | ||
| 571 | |||
| 572 | /* | ||
| 573 | * Preparing the interrupts to be injected also | 567 | * Preparing the interrupts to be injected also |
| 574 | * involves poking the GIC, which must be done in a | 568 | * involves poking the GIC, which must be done in a |
| 575 | * non-preemptible context. | 569 | * non-preemptible context. |
| 576 | */ | 570 | */ |
| 577 | preempt_disable(); | 571 | preempt_disable(); |
| 572 | kvm_timer_flush_hwstate(vcpu); | ||
| 578 | kvm_vgic_flush_hwstate(vcpu); | 573 | kvm_vgic_flush_hwstate(vcpu); |
| 579 | 574 | ||
| 580 | local_irq_disable(); | 575 | local_irq_disable(); |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 6984342da13d..7dace909d5cf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
| @@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud) | |||
| 98 | __kvm_flush_dcache_pud(pud); | 98 | __kvm_flush_dcache_pud(pud); |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | static bool kvm_is_device_pfn(unsigned long pfn) | ||
| 102 | { | ||
| 103 | return !pfn_valid(pfn); | ||
| 104 | } | ||
| 105 | |||
| 101 | /** | 106 | /** |
| 102 | * stage2_dissolve_pmd() - clear and flush huge PMD entry | 107 | * stage2_dissolve_pmd() - clear and flush huge PMD entry |
| 103 | * @kvm: pointer to kvm structure. | 108 | * @kvm: pointer to kvm structure. |
| @@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | |||
| 213 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 218 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
| 214 | 219 | ||
| 215 | /* No need to invalidate the cache for device mappings */ | 220 | /* No need to invalidate the cache for device mappings */ |
| 216 | if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | 221 | if (!kvm_is_device_pfn(__phys_to_pfn(addr))) |
| 217 | kvm_flush_dcache_pte(old_pte); | 222 | kvm_flush_dcache_pte(old_pte); |
| 218 | 223 | ||
| 219 | put_page(virt_to_page(pte)); | 224 | put_page(virt_to_page(pte)); |
| @@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | |||
| 305 | 310 | ||
| 306 | pte = pte_offset_kernel(pmd, addr); | 311 | pte = pte_offset_kernel(pmd, addr); |
| 307 | do { | 312 | do { |
| 308 | if (!pte_none(*pte) && | 313 | if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) |
| 309 | (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | ||
| 310 | kvm_flush_dcache_pte(*pte); | 314 | kvm_flush_dcache_pte(*pte); |
| 311 | } while (pte++, addr += PAGE_SIZE, addr != end); | 315 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| 312 | } | 316 | } |
| @@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu) | |||
| 1037 | return kvm_vcpu_dabt_iswrite(vcpu); | 1041 | return kvm_vcpu_dabt_iswrite(vcpu); |
| 1038 | } | 1042 | } |
| 1039 | 1043 | ||
| 1040 | static bool kvm_is_device_pfn(unsigned long pfn) | ||
| 1041 | { | ||
| 1042 | return !pfn_valid(pfn); | ||
| 1043 | } | ||
| 1044 | |||
| 1045 | /** | 1044 | /** |
| 1046 | * stage2_wp_ptes - write protect PMD range | 1045 | * stage2_wp_ptes - write protect PMD range |
| 1047 | * @pmd: pointer to pmd entry | 1046 | * @pmd: pointer to pmd entry |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 9ac16a482ff1..e55848c1edf4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075 | |||
| 316 | 316 | ||
| 317 | If unsure, say Y. | 317 | If unsure, say Y. |
| 318 | 318 | ||
| 319 | config ARM64_ERRATUM_834220 | ||
| 320 | bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault" | ||
| 321 | depends on KVM | ||
| 322 | default y | ||
| 323 | help | ||
| 324 | This option adds an alternative code sequence to work around ARM | ||
| 325 | erratum 834220 on Cortex-A57 parts up to r1p2. | ||
| 326 | |||
| 327 | Affected Cortex-A57 parts might report a Stage 2 translation | ||
| 328 | fault as the result of a Stage 1 fault for load crossing a | ||
| 329 | page boundary when there is a permission or device memory | ||
| 330 | alignment fault at Stage 1 and a translation fault at Stage 2. | ||
| 331 | |||
| 332 | The workaround is to verify that the Stage 1 translation | ||
| 333 | doesn't generate a fault before handling the Stage 2 fault. | ||
| 334 | Please note that this does not necessarily enable the workaround, | ||
| 335 | as it depends on the alternative framework, which will only patch | ||
| 336 | the kernel if an affected CPU is detected. | ||
| 337 | |||
| 338 | If unsure, say Y. | ||
| 339 | |||
| 319 | config ARM64_ERRATUM_845719 | 340 | config ARM64_ERRATUM_845719 |
| 320 | bool "Cortex-A53: 845719: a load might read incorrect data" | 341 | bool "Cortex-A53: 845719: a load might read incorrect data" |
| 321 | depends on COMPAT | 342 | depends on COMPAT |
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c index ce47792a983d..f7bd9bf0bbb3 100644 --- a/arch/arm64/crypto/aes-ce-cipher.c +++ b/arch/arm64/crypto/aes-ce-cipher.c | |||
| @@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey); | |||
| 237 | static struct crypto_alg aes_alg = { | 237 | static struct crypto_alg aes_alg = { |
| 238 | .cra_name = "aes", | 238 | .cra_name = "aes", |
| 239 | .cra_driver_name = "aes-ce", | 239 | .cra_driver_name = "aes-ce", |
| 240 | .cra_priority = 300, | 240 | .cra_priority = 250, |
| 241 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | 241 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, |
| 242 | .cra_blocksize = AES_BLOCK_SIZE, | 242 | .cra_blocksize = AES_BLOCK_SIZE, |
| 243 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), | 243 | .cra_ctxsize = sizeof(struct crypto_aes_ctx), |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 624f9679f4b0..9622eb48f894 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
| @@ -64,27 +64,31 @@ do { \ | |||
| 64 | 64 | ||
| 65 | #define smp_load_acquire(p) \ | 65 | #define smp_load_acquire(p) \ |
| 66 | ({ \ | 66 | ({ \ |
| 67 | typeof(*p) ___p1; \ | 67 | union { typeof(*p) __val; char __c[1]; } __u; \ |
| 68 | compiletime_assert_atomic_type(*p); \ | 68 | compiletime_assert_atomic_type(*p); \ |
| 69 | switch (sizeof(*p)) { \ | 69 | switch (sizeof(*p)) { \ |
| 70 | case 1: \ | 70 | case 1: \ |
| 71 | asm volatile ("ldarb %w0, %1" \ | 71 | asm volatile ("ldarb %w0, %1" \ |
| 72 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 72 | : "=r" (*(__u8 *)__u.__c) \ |
| 73 | : "Q" (*p) : "memory"); \ | ||
| 73 | break; \ | 74 | break; \ |
| 74 | case 2: \ | 75 | case 2: \ |
| 75 | asm volatile ("ldarh %w0, %1" \ | 76 | asm volatile ("ldarh %w0, %1" \ |
| 76 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 77 | : "=r" (*(__u16 *)__u.__c) \ |
| 78 | : "Q" (*p) : "memory"); \ | ||
| 77 | break; \ | 79 | break; \ |
| 78 | case 4: \ | 80 | case 4: \ |
| 79 | asm volatile ("ldar %w0, %1" \ | 81 | asm volatile ("ldar %w0, %1" \ |
| 80 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 82 | : "=r" (*(__u32 *)__u.__c) \ |
| 83 | : "Q" (*p) : "memory"); \ | ||
| 81 | break; \ | 84 | break; \ |
| 82 | case 8: \ | 85 | case 8: \ |
| 83 | asm volatile ("ldar %0, %1" \ | 86 | asm volatile ("ldar %0, %1" \ |
| 84 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 87 | : "=r" (*(__u64 *)__u.__c) \ |
| 88 | : "Q" (*p) : "memory"); \ | ||
| 85 | break; \ | 89 | break; \ |
| 86 | } \ | 90 | } \ |
| 87 | ___p1; \ | 91 | __u.__val; \ |
| 88 | }) | 92 | }) |
| 89 | 93 | ||
| 90 | #define read_barrier_depends() do { } while(0) | 94 | #define read_barrier_depends() do { } while(0) |
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 7fbed6919b54..eb8432bb82b8 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | */ | 23 | */ |
| 24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
| 25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
| 26 | #include <linux/ptrace.h> | ||
| 27 | 26 | ||
| 28 | #define COMPAT_USER_HZ 100 | 27 | #define COMPAT_USER_HZ 100 |
| 29 | #ifdef __AARCH64EB__ | 28 | #ifdef __AARCH64EB__ |
| @@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) | |||
| 234 | return (u32)(unsigned long)uptr; | 233 | return (u32)(unsigned long)uptr; |
| 235 | } | 234 | } |
| 236 | 235 | ||
| 237 | #define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) | 236 | #define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current))) |
| 238 | 237 | ||
| 239 | static inline void __user *arch_compat_alloc_user_space(long len) | 238 | static inline void __user *arch_compat_alloc_user_space(long len) |
| 240 | { | 239 | { |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 11d5bb0fdd54..52722ee73dba 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
| @@ -29,8 +29,9 @@ | |||
| 29 | #define ARM64_HAS_PAN 4 | 29 | #define ARM64_HAS_PAN 4 |
| 30 | #define ARM64_HAS_LSE_ATOMICS 5 | 30 | #define ARM64_HAS_LSE_ATOMICS 5 |
| 31 | #define ARM64_WORKAROUND_CAVIUM_23154 6 | 31 | #define ARM64_WORKAROUND_CAVIUM_23154 6 |
| 32 | #define ARM64_WORKAROUND_834220 7 | ||
| 32 | 33 | ||
| 33 | #define ARM64_NCAPS 7 | 34 | #define ARM64_NCAPS 8 |
| 34 | 35 | ||
| 35 | #ifndef __ASSEMBLY__ | 36 | #ifndef __ASSEMBLY__ |
| 36 | 37 | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 54d0ead41afc..61e08f360e31 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | #ifdef __KERNEL__ | 19 | #ifdef __KERNEL__ |
| 20 | 20 | ||
| 21 | #include <linux/acpi.h> | ||
| 22 | #include <linux/types.h> | 21 | #include <linux/types.h> |
| 23 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
| 24 | 23 | ||
| @@ -26,22 +25,16 @@ | |||
| 26 | #include <asm/xen/hypervisor.h> | 25 | #include <asm/xen/hypervisor.h> |
| 27 | 26 | ||
| 28 | #define DMA_ERROR_CODE (~(dma_addr_t)0) | 27 | #define DMA_ERROR_CODE (~(dma_addr_t)0) |
| 29 | extern struct dma_map_ops *dma_ops; | ||
| 30 | extern struct dma_map_ops dummy_dma_ops; | 28 | extern struct dma_map_ops dummy_dma_ops; |
| 31 | 29 | ||
| 32 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) | 30 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
| 33 | { | 31 | { |
| 34 | if (unlikely(!dev)) | 32 | if (dev && dev->archdata.dma_ops) |
| 35 | return dma_ops; | ||
| 36 | else if (dev->archdata.dma_ops) | ||
| 37 | return dev->archdata.dma_ops; | 33 | return dev->archdata.dma_ops; |
| 38 | else if (acpi_disabled) | ||
| 39 | return dma_ops; | ||
| 40 | 34 | ||
| 41 | /* | 35 | /* |
| 42 | * When ACPI is enabled, if arch_set_dma_ops is not called, | 36 | * We expect no ISA devices, and all other DMA masters are expected to |
| 43 | * we will disable device DMA capability by setting it | 37 | * have someone call arch_setup_dma_ops at device creation time. |
| 44 | * to dummy_dma_ops. | ||
| 45 | */ | 38 | */ |
| 46 | return &dummy_dma_ops; | 39 | return &dummy_dma_ops; |
| 47 | } | 40 | } |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 17e92f05b1fe..3ca894ecf699 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
| @@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | |||
| 99 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; | 99 | *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | /* | ||
| 103 | * vcpu_reg should always be passed a register number coming from a | ||
| 104 | * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 | ||
| 105 | * with banked registers. | ||
| 106 | */ | ||
| 102 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | 107 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) |
| 103 | { | 108 | { |
| 104 | if (vcpu_mode_is_32bit(vcpu)) | ||
| 105 | return vcpu_reg32(vcpu, reg_num); | ||
| 106 | |||
| 107 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | 109 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
| 108 | } | 110 | } |
| 109 | 111 | ||
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index c0e87898ba96..24165784b803 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
| @@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void) | |||
| 101 | #define destroy_context(mm) do { } while(0) | 101 | #define destroy_context(mm) do { } while(0) |
| 102 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); | 102 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu); |
| 103 | 103 | ||
| 104 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 104 | #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; }) |
| 105 | 105 | ||
| 106 | /* | 106 | /* |
| 107 | * This is called when "tsk" is about to enter lazy TLB mode. | 107 | * This is called when "tsk" is about to enter lazy TLB mode. |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 9819a9426b69..7e074f93f383 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
| 81 | 81 | ||
| 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
| 84 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | ||
| 84 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 85 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 85 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) | 86 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
| 86 | 87 | ||
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 24926f2504f7..feb6b4efa641 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
| @@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
| 75 | (1 << MIDR_VARIANT_SHIFT) | 2), | 75 | (1 << MIDR_VARIANT_SHIFT) | 2), |
| 76 | }, | 76 | }, |
| 77 | #endif | 77 | #endif |
| 78 | #ifdef CONFIG_ARM64_ERRATUM_834220 | ||
| 79 | { | ||
| 80 | /* Cortex-A57 r0p0 - r1p2 */ | ||
| 81 | .desc = "ARM erratum 834220", | ||
| 82 | .capability = ARM64_WORKAROUND_834220, | ||
| 83 | MIDR_RANGE(MIDR_CORTEX_A57, 0x00, | ||
| 84 | (1 << MIDR_VARIANT_SHIFT) | 2), | ||
| 85 | }, | ||
| 86 | #endif | ||
| 78 | #ifdef CONFIG_ARM64_ERRATUM_845719 | 87 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
| 79 | { | 88 | { |
| 80 | /* Cortex-A53 r0p[01234] */ | 89 | /* Cortex-A53 r0p[01234] */ |
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 706679d0a0b4..212ae6361d8b 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
| 31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
| 32 | #include <linux/smp.h> | 32 | #include <linux/smp.h> |
| 33 | #include <linux/delay.h> | ||
| 33 | 34 | ||
| 34 | /* | 35 | /* |
| 35 | * In case the boot CPU is hotpluggable, we record its initial state and | 36 | * In case the boot CPU is hotpluggable, we record its initial state and |
| @@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v) | |||
| 112 | */ | 113 | */ |
| 113 | seq_printf(m, "processor\t: %d\n", i); | 114 | seq_printf(m, "processor\t: %d\n", i); |
| 114 | 115 | ||
| 116 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | ||
| 117 | loops_per_jiffy / (500000UL/HZ), | ||
| 118 | loops_per_jiffy / (5000UL/HZ) % 100); | ||
| 119 | |||
| 115 | /* | 120 | /* |
| 116 | * Dump out the common processor features in a single line. | 121 | * Dump out the common processor features in a single line. |
| 117 | * Userspace should read the hwcaps with getauxval(AT_HWCAP) | 122 | * Userspace should read the hwcaps with getauxval(AT_HWCAP) |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index de46b50f4cdf..fc5508e0df57 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
| @@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void) | |||
| 224 | { | 224 | { |
| 225 | efi_memory_desc_t *md; | 225 | efi_memory_desc_t *md; |
| 226 | 226 | ||
| 227 | init_new_context(NULL, &efi_mm); | ||
| 228 | |||
| 227 | for_each_efi_memory_desc(&memmap, md) { | 229 | for_each_efi_memory_desc(&memmap, md) { |
| 228 | u64 paddr, npages, size; | 230 | u64 paddr, npages, size; |
| 229 | pgprot_t prot; | 231 | pgprot_t prot; |
| @@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void) | |||
| 254 | else | 256 | else |
| 255 | prot = PAGE_KERNEL; | 257 | prot = PAGE_KERNEL; |
| 256 | 258 | ||
| 257 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); | 259 | create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, |
| 260 | __pgprot(pgprot_val(prot) | PTE_NG)); | ||
| 258 | } | 261 | } |
| 259 | return true; | 262 | return true; |
| 260 | } | 263 | } |
| @@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init); | |||
| 329 | 332 | ||
| 330 | static void efi_set_pgd(struct mm_struct *mm) | 333 | static void efi_set_pgd(struct mm_struct *mm) |
| 331 | { | 334 | { |
| 332 | if (mm == &init_mm) | 335 | switch_mm(NULL, mm, NULL); |
| 333 | cpu_set_reserved_ttbr0(); | ||
| 334 | else | ||
| 335 | cpu_switch_mm(mm->pgd, mm); | ||
| 336 | |||
| 337 | local_flush_tlb_all(); | ||
| 338 | if (icache_is_aivivt()) | ||
| 339 | __local_flush_icache_all(); | ||
| 340 | } | 336 | } |
| 341 | 337 | ||
| 342 | void efi_virtmap_load(void) | 338 | void efi_virtmap_load(void) |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index fce95e17cf7f..1095aa483a1c 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #include <linux/ftrace.h> | ||
| 1 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
| 2 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
| 3 | #include <asm/cacheflush.h> | 4 | #include <asm/cacheflush.h> |
| @@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
| 71 | local_dbg_save(flags); | 72 | local_dbg_save(flags); |
| 72 | 73 | ||
| 73 | /* | 74 | /* |
| 75 | * Function graph tracer state gets incosistent when the kernel | ||
| 76 | * calls functions that never return (aka suspend finishers) hence | ||
| 77 | * disable graph tracing during their execution. | ||
| 78 | */ | ||
| 79 | pause_graph_tracing(); | ||
| 80 | |||
| 81 | /* | ||
| 74 | * mm context saved on the stack, it will be restored when | 82 | * mm context saved on the stack, it will be restored when |
| 75 | * the cpu comes out of reset through the identity mapped | 83 | * the cpu comes out of reset through the identity mapped |
| 76 | * page tables, so that the thread address space is properly | 84 | * page tables, so that the thread address space is properly |
| @@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
| 111 | hw_breakpoint_restore(NULL); | 119 | hw_breakpoint_restore(NULL); |
| 112 | } | 120 | } |
| 113 | 121 | ||
| 122 | unpause_graph_tracing(); | ||
| 123 | |||
| 114 | /* | 124 | /* |
| 115 | * Restore pstate flags. OS lock and mdscr have been already | 125 | * Restore pstate flags. OS lock and mdscr have been already |
| 116 | * restored, so from this point onwards, debugging is fully | 126 | * restored, so from this point onwards, debugging is fully |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1599701ef044..86c289832272 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
| @@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context) | |||
| 864 | ENDPROC(__kvm_flush_vm_context) | 864 | ENDPROC(__kvm_flush_vm_context) |
| 865 | 865 | ||
| 866 | __kvm_hyp_panic: | 866 | __kvm_hyp_panic: |
| 867 | // Stash PAR_EL1 before corrupting it in __restore_sysregs | ||
| 868 | mrs x0, par_el1 | ||
| 869 | push x0, xzr | ||
| 870 | |||
| 867 | // Guess the context by looking at VTTBR: | 871 | // Guess the context by looking at VTTBR: |
| 868 | // If zero, then we're already a host. | 872 | // If zero, then we're already a host. |
| 869 | // Otherwise restore a minimal host context before panicing. | 873 | // Otherwise restore a minimal host context before panicing. |
| @@ -898,7 +902,7 @@ __kvm_hyp_panic: | |||
| 898 | mrs x3, esr_el2 | 902 | mrs x3, esr_el2 |
| 899 | mrs x4, far_el2 | 903 | mrs x4, far_el2 |
| 900 | mrs x5, hpfar_el2 | 904 | mrs x5, hpfar_el2 |
| 901 | mrs x6, par_el1 | 905 | pop x6, xzr // active context PAR_EL1 |
| 902 | mrs x7, tpidr_el2 | 906 | mrs x7, tpidr_el2 |
| 903 | 907 | ||
| 904 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | 908 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
| @@ -914,7 +918,7 @@ __kvm_hyp_panic: | |||
| 914 | ENDPROC(__kvm_hyp_panic) | 918 | ENDPROC(__kvm_hyp_panic) |
| 915 | 919 | ||
| 916 | __hyp_panic_str: | 920 | __hyp_panic_str: |
| 917 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" | 921 | .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0" |
| 918 | 922 | ||
| 919 | .align 2 | 923 | .align 2 |
| 920 | 924 | ||
| @@ -1015,9 +1019,15 @@ el1_trap: | |||
| 1015 | b.ne 1f // Not an abort we care about | 1019 | b.ne 1f // Not an abort we care about |
| 1016 | 1020 | ||
| 1017 | /* This is an abort. Check for permission fault */ | 1021 | /* This is an abort. Check for permission fault */ |
| 1022 | alternative_if_not ARM64_WORKAROUND_834220 | ||
| 1018 | and x2, x1, #ESR_ELx_FSC_TYPE | 1023 | and x2, x1, #ESR_ELx_FSC_TYPE |
| 1019 | cmp x2, #FSC_PERM | 1024 | cmp x2, #FSC_PERM |
| 1020 | b.ne 1f // Not a permission fault | 1025 | b.ne 1f // Not a permission fault |
| 1026 | alternative_else | ||
| 1027 | nop // Use the permission fault path to | ||
| 1028 | nop // check for a valid S1 translation, | ||
| 1029 | nop // regardless of the ESR value. | ||
| 1030 | alternative_endif | ||
| 1021 | 1031 | ||
| 1022 | /* | 1032 | /* |
| 1023 | * Check for Stage-1 page table walk, which is guaranteed | 1033 | * Check for Stage-1 page table walk, which is guaranteed |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 85c57158dcd9..648112e90ed5 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
| @@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | |||
| 48 | 48 | ||
| 49 | /* Note: These now point to the banked copies */ | 49 | /* Note: These now point to the banked copies */ |
| 50 | *vcpu_spsr(vcpu) = new_spsr_value; | 50 | *vcpu_spsr(vcpu) = new_spsr_value; |
| 51 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | 51 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; |
| 52 | 52 | ||
| 53 | /* Branch to exception vector */ | 53 | /* Branch to exception vector */ |
| 54 | if (sctlr & (1 << 13)) | 54 | if (sctlr & (1 << 13)) |
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 131a199114b4..7963aa4b5d28 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
| 21 | #include <linux/acpi.h> | ||
| 21 | #include <linux/export.h> | 22 | #include <linux/export.h> |
| 22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
| 23 | #include <linux/genalloc.h> | 24 | #include <linux/genalloc.h> |
| @@ -28,9 +29,6 @@ | |||
| 28 | 29 | ||
| 29 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
| 30 | 31 | ||
| 31 | struct dma_map_ops *dma_ops; | ||
| 32 | EXPORT_SYMBOL(dma_ops); | ||
| 33 | |||
| 34 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, | 32 | static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, |
| 35 | bool coherent) | 33 | bool coherent) |
| 36 | { | 34 | { |
| @@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops); | |||
| 515 | 513 | ||
| 516 | static int __init arm64_dma_init(void) | 514 | static int __init arm64_dma_init(void) |
| 517 | { | 515 | { |
| 518 | int ret; | 516 | return atomic_pool_init(); |
| 519 | |||
| 520 | dma_ops = &swiotlb_dma_ops; | ||
| 521 | |||
| 522 | ret = atomic_pool_init(); | ||
| 523 | |||
| 524 | return ret; | ||
| 525 | } | 517 | } |
| 526 | arch_initcall(arm64_dma_init); | 518 | arch_initcall(arm64_dma_init); |
| 527 | 519 | ||
| @@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 552 | { | 544 | { |
| 553 | bool coherent = is_device_dma_coherent(dev); | 545 | bool coherent = is_device_dma_coherent(dev); |
| 554 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); | 546 | int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); |
| 547 | size_t iosize = size; | ||
| 555 | void *addr; | 548 | void *addr; |
| 556 | 549 | ||
| 557 | if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) | 550 | if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n")) |
| 558 | return NULL; | 551 | return NULL; |
| 552 | |||
| 553 | size = PAGE_ALIGN(size); | ||
| 554 | |||
| 559 | /* | 555 | /* |
| 560 | * Some drivers rely on this, and we probably don't want the | 556 | * Some drivers rely on this, and we probably don't want the |
| 561 | * possibility of stale kernel data being read by devices anyway. | 557 | * possibility of stale kernel data being read by devices anyway. |
| @@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 566 | struct page **pages; | 562 | struct page **pages; |
| 567 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); | 563 | pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent); |
| 568 | 564 | ||
| 569 | pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle, | 565 | pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle, |
| 570 | flush_page); | 566 | flush_page); |
| 571 | if (!pages) | 567 | if (!pages) |
| 572 | return NULL; | 568 | return NULL; |
| @@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 574 | addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, | 570 | addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, |
| 575 | __builtin_return_address(0)); | 571 | __builtin_return_address(0)); |
| 576 | if (!addr) | 572 | if (!addr) |
| 577 | iommu_dma_free(dev, pages, size, handle); | 573 | iommu_dma_free(dev, pages, iosize, handle); |
| 578 | } else { | 574 | } else { |
| 579 | struct page *page; | 575 | struct page *page; |
| 580 | /* | 576 | /* |
| @@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 591 | if (!addr) | 587 | if (!addr) |
| 592 | return NULL; | 588 | return NULL; |
| 593 | 589 | ||
| 594 | *handle = iommu_dma_map_page(dev, page, 0, size, ioprot); | 590 | *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot); |
| 595 | if (iommu_dma_mapping_error(dev, *handle)) { | 591 | if (iommu_dma_mapping_error(dev, *handle)) { |
| 596 | if (coherent) | 592 | if (coherent) |
| 597 | __free_pages(page, get_order(size)); | 593 | __free_pages(page, get_order(size)); |
| @@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size, | |||
| 606 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 602 | static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 607 | dma_addr_t handle, struct dma_attrs *attrs) | 603 | dma_addr_t handle, struct dma_attrs *attrs) |
| 608 | { | 604 | { |
| 605 | size_t iosize = size; | ||
| 606 | |||
| 607 | size = PAGE_ALIGN(size); | ||
| 609 | /* | 608 | /* |
| 610 | * @cpu_addr will be one of 3 things depending on how it was allocated: | 609 | * @cpu_addr will be one of 3 things depending on how it was allocated: |
| 611 | * - A remapped array of pages from iommu_dma_alloc(), for all | 610 | * - A remapped array of pages from iommu_dma_alloc(), for all |
| @@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
| 617 | * Hence how dodgy the below logic looks... | 616 | * Hence how dodgy the below logic looks... |
| 618 | */ | 617 | */ |
| 619 | if (__in_atomic_pool(cpu_addr, size)) { | 618 | if (__in_atomic_pool(cpu_addr, size)) { |
| 620 | iommu_dma_unmap_page(dev, handle, size, 0, NULL); | 619 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); |
| 621 | __free_from_pool(cpu_addr, size); | 620 | __free_from_pool(cpu_addr, size); |
| 622 | } else if (is_vmalloc_addr(cpu_addr)){ | 621 | } else if (is_vmalloc_addr(cpu_addr)){ |
| 623 | struct vm_struct *area = find_vm_area(cpu_addr); | 622 | struct vm_struct *area = find_vm_area(cpu_addr); |
| 624 | 623 | ||
| 625 | if (WARN_ON(!area || !area->pages)) | 624 | if (WARN_ON(!area || !area->pages)) |
| 626 | return; | 625 | return; |
| 627 | iommu_dma_free(dev, area->pages, size, &handle); | 626 | iommu_dma_free(dev, area->pages, iosize, &handle); |
| 628 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); | 627 | dma_common_free_remap(cpu_addr, size, VM_USERMAP); |
| 629 | } else { | 628 | } else { |
| 630 | iommu_dma_unmap_page(dev, handle, size, 0, NULL); | 629 | iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); |
| 631 | __free_pages(virt_to_page(cpu_addr), get_order(size)); | 630 | __free_pages(virt_to_page(cpu_addr), get_order(size)); |
| 632 | } | 631 | } |
| 633 | } | 632 | } |
| @@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
| 984 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | 983 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 985 | struct iommu_ops *iommu, bool coherent) | 984 | struct iommu_ops *iommu, bool coherent) |
| 986 | { | 985 | { |
| 987 | if (!acpi_disabled && !dev->archdata.dma_ops) | 986 | if (!dev->archdata.dma_ops) |
| 988 | dev->archdata.dma_ops = dma_ops; | 987 | dev->archdata.dma_ops = &swiotlb_dma_ops; |
| 989 | 988 | ||
| 990 | dev->archdata.dma_coherent = coherent; | 989 | dev->archdata.dma_coherent = coherent; |
| 991 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); | 990 | __iommu_setup_dma_ops(dev, dma_base, size, iommu); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index e3f563c81c48..abb66f84d4ac 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
| @@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end) | |||
| 362 | * for now. This will get more fine grained later once all memory | 362 | * for now. This will get more fine grained later once all memory |
| 363 | * is mapped | 363 | * is mapped |
| 364 | */ | 364 | */ |
| 365 | unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); | 365 | unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE); |
| 366 | unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); | 366 | unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE); |
| 367 | 367 | ||
| 368 | if (end < kernel_x_start) { | 368 | if (end < kernel_x_start) { |
| 369 | create_mapping(start, __phys_to_virt(start), | 369 | create_mapping(start, __phys_to_virt(start), |
| @@ -451,18 +451,18 @@ static void __init fixup_executable(void) | |||
| 451 | { | 451 | { |
| 452 | #ifdef CONFIG_DEBUG_RODATA | 452 | #ifdef CONFIG_DEBUG_RODATA |
| 453 | /* now that we are actually fully mapped, make the start/end more fine grained */ | 453 | /* now that we are actually fully mapped, make the start/end more fine grained */ |
| 454 | if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) { | 454 | if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) { |
| 455 | unsigned long aligned_start = round_down(__pa(_stext), | 455 | unsigned long aligned_start = round_down(__pa(_stext), |
| 456 | SECTION_SIZE); | 456 | SWAPPER_BLOCK_SIZE); |
| 457 | 457 | ||
| 458 | create_mapping(aligned_start, __phys_to_virt(aligned_start), | 458 | create_mapping(aligned_start, __phys_to_virt(aligned_start), |
| 459 | __pa(_stext) - aligned_start, | 459 | __pa(_stext) - aligned_start, |
| 460 | PAGE_KERNEL); | 460 | PAGE_KERNEL); |
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) { | 463 | if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) { |
| 464 | unsigned long aligned_end = round_up(__pa(__init_end), | 464 | unsigned long aligned_end = round_up(__pa(__init_end), |
| 465 | SECTION_SIZE); | 465 | SWAPPER_BLOCK_SIZE); |
| 466 | create_mapping(__pa(__init_end), (unsigned long)__init_end, | 466 | create_mapping(__pa(__init_end), (unsigned long)__init_end, |
| 467 | aligned_end - __pa(__init_end), | 467 | aligned_end - __pa(__init_end), |
| 468 | PAGE_KERNEL); | 468 | PAGE_KERNEL); |
| @@ -475,7 +475,7 @@ void mark_rodata_ro(void) | |||
| 475 | { | 475 | { |
| 476 | create_mapping_late(__pa(_stext), (unsigned long)_stext, | 476 | create_mapping_late(__pa(_stext), (unsigned long)_stext, |
| 477 | (unsigned long)_etext - (unsigned long)_stext, | 477 | (unsigned long)_etext - (unsigned long)_stext, |
| 478 | PAGE_KERNEL_EXEC | PTE_RDONLY); | 478 | PAGE_KERNEL_ROX); |
| 479 | 479 | ||
| 480 | } | 480 | } |
| 481 | #endif | 481 | #endif |
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index 164efa009e5b..2b4c54c04cb6 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c | |||
| @@ -118,9 +118,9 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
| 118 | * Increase core size to make room for GOT and set start | 118 | * Increase core size to make room for GOT and set start |
| 119 | * offset for GOT. | 119 | * offset for GOT. |
| 120 | */ | 120 | */ |
| 121 | module->core_size = ALIGN(module->core_size, 4); | 121 | module->core_layout.size = ALIGN(module->core_layout.size, 4); |
| 122 | module->arch.got_offset = module->core_size; | 122 | module->arch.got_offset = module->core_layout.size; |
| 123 | module->core_size += module->arch.got_size; | 123 | module->core_layout.size += module->arch.got_size; |
| 124 | 124 | ||
| 125 | return 0; | 125 | return 0; |
| 126 | 126 | ||
| @@ -177,7 +177,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
| 177 | if (!info->got_initialized) { | 177 | if (!info->got_initialized) { |
| 178 | Elf32_Addr *gotent; | 178 | Elf32_Addr *gotent; |
| 179 | 179 | ||
| 180 | gotent = (module->module_core | 180 | gotent = (module->core_layout.base |
| 181 | + module->arch.got_offset | 181 | + module->arch.got_offset |
| 182 | + info->got_offset); | 182 | + info->got_offset); |
| 183 | *gotent = relocation; | 183 | *gotent = relocation; |
| @@ -255,8 +255,8 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, | |||
| 255 | */ | 255 | */ |
| 256 | pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", | 256 | pr_debug("GOTPC: PC=0x%x, got_offset=0x%lx, core=0x%p\n", |
| 257 | relocation, module->arch.got_offset, | 257 | relocation, module->arch.got_offset, |
| 258 | module->module_core); | 258 | module->core_layout.base); |
| 259 | relocation -= ((unsigned long)module->module_core | 259 | relocation -= ((unsigned long)module->core_layout.base |
| 260 | + module->arch.got_offset); | 260 | + module->arch.got_offset); |
| 261 | *location = relocation; | 261 | *location = relocation; |
| 262 | break; | 262 | break; |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index b15933c31b2f..6ab0ae7d6535 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
| @@ -486,13 +486,13 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |||
| 486 | static inline int | 486 | static inline int |
| 487 | in_init (const struct module *mod, uint64_t addr) | 487 | in_init (const struct module *mod, uint64_t addr) |
| 488 | { | 488 | { |
| 489 | return addr - (uint64_t) mod->module_init < mod->init_size; | 489 | return addr - (uint64_t) mod->init_layout.base < mod->init_layout.size; |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | static inline int | 492 | static inline int |
| 493 | in_core (const struct module *mod, uint64_t addr) | 493 | in_core (const struct module *mod, uint64_t addr) |
| 494 | { | 494 | { |
| 495 | return addr - (uint64_t) mod->module_core < mod->core_size; | 495 | return addr - (uint64_t) mod->core_layout.base < mod->core_layout.size; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | static inline int | 498 | static inline int |
| @@ -675,7 +675,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |||
| 675 | break; | 675 | break; |
| 676 | 676 | ||
| 677 | case RV_BDREL: | 677 | case RV_BDREL: |
| 678 | val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); | 678 | val -= (uint64_t) (in_init(mod, val) ? mod->init_layout.base : mod->core_layout.base); |
| 679 | break; | 679 | break; |
| 680 | 680 | ||
| 681 | case RV_LTV: | 681 | case RV_LTV: |
| @@ -810,15 +810,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |||
| 810 | * addresses have been selected... | 810 | * addresses have been selected... |
| 811 | */ | 811 | */ |
| 812 | uint64_t gp; | 812 | uint64_t gp; |
| 813 | if (mod->core_size > MAX_LTOFF) | 813 | if (mod->core_layout.size > MAX_LTOFF) |
| 814 | /* | 814 | /* |
| 815 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated | 815 | * This takes advantage of fact that SHF_ARCH_SMALL gets allocated |
| 816 | * at the end of the module. | 816 | * at the end of the module. |
| 817 | */ | 817 | */ |
| 818 | gp = mod->core_size - MAX_LTOFF / 2; | 818 | gp = mod->core_layout.size - MAX_LTOFF / 2; |
| 819 | else | 819 | else |
| 820 | gp = mod->core_size / 2; | 820 | gp = mod->core_layout.size / 2; |
| 821 | gp = (uint64_t) mod->module_core + ((gp + 7) & -8); | 821 | gp = (uint64_t) mod->core_layout.base + ((gp + 7) & -8); |
| 822 | mod->arch.gp = gp; | 822 | mod->arch.gp = gp; |
| 823 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); | 823 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); |
| 824 | } | 824 | } |
diff --git a/arch/metag/kernel/module.c b/arch/metag/kernel/module.c index 986331cd0a52..bb8dfba9a763 100644 --- a/arch/metag/kernel/module.c +++ b/arch/metag/kernel/module.c | |||
| @@ -176,8 +176,8 @@ static uint32_t do_plt_call(void *location, Elf32_Addr val, | |||
| 176 | tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); | 176 | tramp[1] = 0xac000001 | ((val & 0x0000ffff) << 3); |
| 177 | 177 | ||
| 178 | /* Init, or core PLT? */ | 178 | /* Init, or core PLT? */ |
| 179 | if (location >= mod->module_core | 179 | if (location >= mod->core_layout.base |
| 180 | && location < mod->module_core + mod->core_size) | 180 | && location < mod->core_layout.base + mod->core_layout.size) |
| 181 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | 181 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; |
| 182 | else | 182 | else |
| 183 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | 183 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; |
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c index 1ba21204ebe0..8755d618e116 100644 --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c | |||
| @@ -216,9 +216,9 @@ void __init plat_mem_setup(void) | |||
| 216 | AR71XX_RESET_SIZE); | 216 | AR71XX_RESET_SIZE); |
| 217 | ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, | 217 | ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE, |
| 218 | AR71XX_PLL_SIZE); | 218 | AR71XX_PLL_SIZE); |
| 219 | ath79_detect_sys_type(); | ||
| 219 | ath79_ddr_ctrl_init(); | 220 | ath79_ddr_ctrl_init(); |
| 220 | 221 | ||
| 221 | ath79_detect_sys_type(); | ||
| 222 | if (mips_machtype != ATH79_MACH_GENERIC_OF) | 222 | if (mips_machtype != ATH79_MACH_GENERIC_OF) |
| 223 | detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); | 223 | detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); |
| 224 | 224 | ||
| @@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC, | |||
| 281 | "Generic", | 281 | "Generic", |
| 282 | "Generic AR71XX/AR724X/AR913X based board", | 282 | "Generic AR71XX/AR724X/AR913X based board", |
| 283 | ath79_generic_init); | 283 | ath79_generic_init); |
| 284 | |||
| 285 | MIPS_MACHINE(ATH79_MACH_GENERIC_OF, | ||
| 286 | "DTB", | ||
| 287 | "Generic AR71XX/AR724X/AR913X based board (DT)", | ||
| 288 | NULL); | ||
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi index fb7734eadbf0..13d0439496a9 100644 --- a/arch/mips/boot/dts/qca/ar9132.dtsi +++ b/arch/mips/boot/dts/qca/ar9132.dtsi | |||
| @@ -107,7 +107,7 @@ | |||
| 107 | miscintc: interrupt-controller@18060010 { | 107 | miscintc: interrupt-controller@18060010 { |
| 108 | compatible = "qca,ar9132-misc-intc", | 108 | compatible = "qca,ar9132-misc-intc", |
| 109 | "qca,ar7100-misc-intc"; | 109 | "qca,ar7100-misc-intc"; |
| 110 | reg = <0x18060010 0x4>; | 110 | reg = <0x18060010 0x8>; |
| 111 | 111 | ||
| 112 | interrupt-parent = <&cpuintc>; | 112 | interrupt-parent = <&cpuintc>; |
| 113 | interrupts = <6>; | 113 | interrupts = <6>; |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index ad1fccdb8d13..2046c0230224 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
| @@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn) | |||
| 200 | { | 200 | { |
| 201 | /* avoid <linux/mm.h> include hell */ | 201 | /* avoid <linux/mm.h> include hell */ |
| 202 | extern unsigned long max_mapnr; | 202 | extern unsigned long max_mapnr; |
| 203 | unsigned long pfn_offset = ARCH_PFN_OFFSET; | ||
| 203 | 204 | ||
| 204 | return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; | 205 | return pfn >= pfn_offset && pfn < max_mapnr; |
| 205 | } | 206 | } |
| 206 | 207 | ||
| 207 | #elif defined(CONFIG_SPARSEMEM) | 208 | #elif defined(CONFIG_SPARSEMEM) |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 9067b651c7a2..544ea21bfef9 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
| @@ -205,11 +205,11 @@ static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, | |||
| 205 | || s->sh_entsize != ~0UL) | 205 | || s->sh_entsize != ~0UL) |
| 206 | continue; | 206 | continue; |
| 207 | s->sh_entsize = | 207 | s->sh_entsize = |
| 208 | get_offset((unsigned long *)&mod->core_size, s); | 208 | get_offset((unsigned long *)&mod->core_layout.size, s); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | if (m == 0) | 211 | if (m == 0) |
| 212 | mod->core_text_size = mod->core_size; | 212 | mod->core_layout.text_size = mod->core_layout.size; |
| 213 | 213 | ||
| 214 | } | 214 | } |
| 215 | } | 215 | } |
| @@ -641,7 +641,7 @@ static int vpe_elfload(struct vpe *v) | |||
| 641 | layout_sections(&mod, hdr, sechdrs, secstrings); | 641 | layout_sections(&mod, hdr, sechdrs, secstrings); |
| 642 | } | 642 | } |
| 643 | 643 | ||
| 644 | v->load_addr = alloc_progmem(mod.core_size); | 644 | v->load_addr = alloc_progmem(mod.core_layout.size); |
| 645 | if (!v->load_addr) | 645 | if (!v->load_addr) |
| 646 | return -ENOMEM; | 646 | return -ENOMEM; |
| 647 | 647 | ||
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index d5fa3eaf39a1..41b1b090f56f 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
| @@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | |||
| 1581 | 1581 | ||
| 1582 | base = (inst >> 21) & 0x1f; | 1582 | base = (inst >> 21) & 0x1f; |
| 1583 | op_inst = (inst >> 16) & 0x1f; | 1583 | op_inst = (inst >> 16) & 0x1f; |
| 1584 | offset = inst & 0xffff; | 1584 | offset = (int16_t)inst; |
| 1585 | cache = (inst >> 16) & 0x3; | 1585 | cache = (inst >> 16) & 0x3; |
| 1586 | op = (inst >> 18) & 0x7; | 1586 | op = (inst >> 18) & 0x7; |
| 1587 | 1587 | ||
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 7bab3a4e8f7d..7e2210846b8b 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S | |||
| @@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
| 157 | 157 | ||
| 158 | FEXPORT(__kvm_mips_load_asid) | 158 | FEXPORT(__kvm_mips_load_asid) |
| 159 | /* Set the ASID for the Guest Kernel */ | 159 | /* Set the ASID for the Guest Kernel */ |
| 160 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 160 | PTR_L t0, VCPU_COP0(k1) |
| 161 | /* addresses shift to 0x80000000 */ | 161 | LONG_L t0, COP0_STATUS(t0) |
| 162 | bltz t0, 1f /* If kernel */ | 162 | andi t0, KSU_USER | ST0_ERL | ST0_EXL |
| 163 | xori t0, KSU_USER | ||
| 164 | bnez t0, 1f /* If kernel */ | ||
| 163 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 165 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
| 164 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 166 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
| 165 | 1: | 167 | 1: |
| @@ -474,9 +476,11 @@ __kvm_mips_return_to_guest: | |||
| 474 | mtc0 t0, CP0_EPC | 476 | mtc0 t0, CP0_EPC |
| 475 | 477 | ||
| 476 | /* Set the ASID for the Guest Kernel */ | 478 | /* Set the ASID for the Guest Kernel */ |
| 477 | INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ | 479 | PTR_L t0, VCPU_COP0(k1) |
| 478 | /* addresses shift to 0x80000000 */ | 480 | LONG_L t0, COP0_STATUS(t0) |
| 479 | bltz t0, 1f /* If kernel */ | 481 | andi t0, KSU_USER | ST0_ERL | ST0_EXL |
| 482 | xori t0, KSU_USER | ||
| 483 | bnez t0, 1f /* If kernel */ | ||
| 480 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 484 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
| 481 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 485 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
| 482 | 1: | 486 | 1: |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 49ff3bfc007e..b9b803facdbf 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
| 279 | 279 | ||
| 280 | if (!gebase) { | 280 | if (!gebase) { |
| 281 | err = -ENOMEM; | 281 | err = -ENOMEM; |
| 282 | goto out_free_cpu; | 282 | goto out_uninit_cpu; |
| 283 | } | 283 | } |
| 284 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", | 284 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", |
| 285 | ALIGN(size, PAGE_SIZE), gebase); | 285 | ALIGN(size, PAGE_SIZE), gebase); |
| @@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
| 343 | out_free_gebase: | 343 | out_free_gebase: |
| 344 | kfree(gebase); | 344 | kfree(gebase); |
| 345 | 345 | ||
| 346 | out_uninit_cpu: | ||
| 347 | kvm_vcpu_uninit(vcpu); | ||
| 348 | |||
| 346 | out_free_cpu: | 349 | out_free_cpu: |
| 347 | kfree(vcpu); | 350 | kfree(vcpu); |
| 348 | 351 | ||
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index c36546959e86..729f89163bc3 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
| @@ -108,6 +108,9 @@ config PGTABLE_LEVELS | |||
| 108 | default 3 if 64BIT && PARISC_PAGE_SIZE_4KB | 108 | default 3 if 64BIT && PARISC_PAGE_SIZE_4KB |
| 109 | default 2 | 109 | default 2 |
| 110 | 110 | ||
| 111 | config SYS_SUPPORTS_HUGETLBFS | ||
| 112 | def_bool y if PA20 | ||
| 113 | |||
| 111 | source "init/Kconfig" | 114 | source "init/Kconfig" |
| 112 | 115 | ||
| 113 | source "kernel/Kconfig.freezer" | 116 | source "kernel/Kconfig.freezer" |
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h new file mode 100644 index 000000000000..7d56a9ccb752 --- /dev/null +++ b/arch/parisc/include/asm/hugetlb.h | |||
| @@ -0,0 +1,85 @@ | |||
| 1 | #ifndef _ASM_PARISC64_HUGETLB_H | ||
| 2 | #define _ASM_PARISC64_HUGETLB_H | ||
| 3 | |||
| 4 | #include <asm/page.h> | ||
| 5 | #include <asm-generic/hugetlb.h> | ||
| 6 | |||
| 7 | |||
| 8 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 9 | pte_t *ptep, pte_t pte); | ||
| 10 | |||
| 11 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
| 12 | pte_t *ptep); | ||
| 13 | |||
| 14 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
| 15 | unsigned long addr, | ||
| 16 | unsigned long len) { | ||
| 17 | return 0; | ||
| 18 | } | ||
| 19 | |||
| 20 | /* | ||
| 21 | * If the arch doesn't supply something else, assume that hugepage | ||
| 22 | * size aligned regions are ok without further preparation. | ||
| 23 | */ | ||
| 24 | static inline int prepare_hugepage_range(struct file *file, | ||
| 25 | unsigned long addr, unsigned long len) | ||
| 26 | { | ||
| 27 | if (len & ~HPAGE_MASK) | ||
| 28 | return -EINVAL; | ||
| 29 | if (addr & ~HPAGE_MASK) | ||
| 30 | return -EINVAL; | ||
| 31 | return 0; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
| 35 | unsigned long addr, unsigned long end, | ||
| 36 | unsigned long floor, | ||
| 37 | unsigned long ceiling) | ||
| 38 | { | ||
| 39 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
| 43 | unsigned long addr, pte_t *ptep) | ||
| 44 | { | ||
| 45 | } | ||
| 46 | |||
| 47 | static inline int huge_pte_none(pte_t pte) | ||
| 48 | { | ||
| 49 | return pte_none(pte); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
| 53 | { | ||
| 54 | return pte_wrprotect(pte); | ||
| 55 | } | ||
| 56 | |||
| 57 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
| 58 | unsigned long addr, pte_t *ptep) | ||
| 59 | { | ||
| 60 | pte_t old_pte = *ptep; | ||
| 61 | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
| 65 | unsigned long addr, pte_t *ptep, | ||
| 66 | pte_t pte, int dirty) | ||
| 67 | { | ||
| 68 | int changed = !pte_same(*ptep, pte); | ||
| 69 | if (changed) { | ||
| 70 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | ||
| 71 | flush_tlb_page(vma, addr); | ||
| 72 | } | ||
| 73 | return changed; | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
| 77 | { | ||
| 78 | return *ptep; | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void arch_clear_hugepage_flags(struct page *page) | ||
| 82 | { | ||
| 83 | } | ||
| 84 | |||
| 85 | #endif /* _ASM_PARISC64_HUGETLB_H */ | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 60d5d174dfe4..80e742a1c162 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
| @@ -145,11 +145,22 @@ extern int npmem_ranges; | |||
| 145 | #endif /* CONFIG_DISCONTIGMEM */ | 145 | #endif /* CONFIG_DISCONTIGMEM */ |
| 146 | 146 | ||
| 147 | #ifdef CONFIG_HUGETLB_PAGE | 147 | #ifdef CONFIG_HUGETLB_PAGE |
| 148 | #define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */ | 148 | #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ |
| 149 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) | 149 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) |
| 150 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | 150 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
| 151 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | 151 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
| 152 | |||
| 153 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 154 | # define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */ | ||
| 155 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M | ||
| 156 | #elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
| 157 | # define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */ | ||
| 158 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M | ||
| 159 | #else | ||
| 160 | # define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */ | ||
| 161 | # define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M | ||
| 152 | #endif | 162 | #endif |
| 163 | #endif /* CONFIG_HUGETLB_PAGE */ | ||
| 153 | 164 | ||
| 154 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 165 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| 155 | 166 | ||
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 3edbb9fc91b4..f2fd327dce2e 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
| @@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
| 35 | PxD_FLAG_VALID | | 35 | PxD_FLAG_VALID | |
| 36 | PxD_FLAG_ATTACHED) | 36 | PxD_FLAG_ATTACHED) |
| 37 | + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); | 37 | + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); |
| 38 | /* The first pmd entry also is marked with _PAGE_GATEWAY as | 38 | /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as |
| 39 | * a signal that this pmd may not be freed */ | 39 | * a signal that this pmd may not be freed */ |
| 40 | __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); | 40 | __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); |
| 41 | #endif | 41 | #endif |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index f93c4a4e6580..d8534f95915a 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
| @@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 83 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) | 83 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) |
| 84 | 84 | ||
| 85 | /* This is the size of the initially mapped kernel memory */ | 85 | /* This is the size of the initially mapped kernel memory */ |
| 86 | #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ | 86 | #ifdef CONFIG_64BIT |
| 87 | #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ | ||
| 88 | #else | ||
| 89 | #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ | ||
| 90 | #endif | ||
| 87 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) | 91 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) |
| 88 | 92 | ||
| 89 | #if CONFIG_PGTABLE_LEVELS == 3 | 93 | #if CONFIG_PGTABLE_LEVELS == 3 |
| @@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 167 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ | 171 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ |
| 168 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ | 172 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ |
| 169 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ | 173 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ |
| 170 | /* bit 21 was formerly the FLUSH bit but is now unused */ | 174 | #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ |
| 171 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ | 175 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ |
| 172 | 176 | ||
| 173 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ | 177 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ |
| @@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 194 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) | 198 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) |
| 195 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) | 199 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) |
| 196 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) | 200 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) |
| 201 | #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) | ||
| 197 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) | 202 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) |
| 198 | 203 | ||
| 199 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | 204 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) |
| @@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 217 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) | 222 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) |
| 218 | #define PxD_FLAG_MASK (0xf) | 223 | #define PxD_FLAG_MASK (0xf) |
| 219 | #define PxD_FLAG_SHIFT (4) | 224 | #define PxD_FLAG_SHIFT (4) |
| 220 | #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ | 225 | #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) |
| 221 | 226 | ||
| 222 | #ifndef __ASSEMBLY__ | 227 | #ifndef __ASSEMBLY__ |
| 223 | 228 | ||
| @@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return | |||
| 363 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 368 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
| 364 | 369 | ||
| 365 | /* | 370 | /* |
| 371 | * Huge pte definitions. | ||
| 372 | */ | ||
| 373 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 374 | #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) | ||
| 375 | #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE)) | ||
| 376 | #else | ||
| 377 | #define pte_huge(pte) (0) | ||
| 378 | #define pte_mkhuge(pte) (pte) | ||
| 379 | #endif | ||
| 380 | |||
| 381 | |||
| 382 | /* | ||
| 366 | * Conversion functions: convert a page and protection to a page entry, | 383 | * Conversion functions: convert a page and protection to a page entry, |
| 367 | * and a page entry and page directory to the page they refer to. | 384 | * and a page entry and page directory to the page they refer to. |
| 368 | */ | 385 | */ |
| @@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
| 410 | /* Find an entry in the second-level page table.. */ | 427 | /* Find an entry in the second-level page table.. */ |
| 411 | 428 | ||
| 412 | #if CONFIG_PGTABLE_LEVELS == 3 | 429 | #if CONFIG_PGTABLE_LEVELS == 3 |
| 430 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | ||
| 413 | #define pmd_offset(dir,address) \ | 431 | #define pmd_offset(dir,address) \ |
| 414 | ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) | 432 | ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) |
| 415 | #else | 433 | #else |
| 416 | #define pmd_offset(dir,addr) ((pmd_t *) dir) | 434 | #define pmd_offset(dir,addr) ((pmd_t *) dir) |
| 417 | #endif | 435 | #endif |
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 54adb60c0a42..7e759ecb1343 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
| @@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack); | |||
| 192 | */ | 192 | */ |
| 193 | typedef unsigned int elf_caddr_t; | 193 | typedef unsigned int elf_caddr_t; |
| 194 | 194 | ||
| 195 | #define start_thread_som(regs, new_pc, new_sp) do { \ | ||
| 196 | unsigned long *sp = (unsigned long *)new_sp; \ | ||
| 197 | __u32 spaceid = (__u32)current->mm->context; \ | ||
| 198 | unsigned long pc = (unsigned long)new_pc; \ | ||
| 199 | /* offset pc for priv. level */ \ | ||
| 200 | pc |= 3; \ | ||
| 201 | \ | ||
| 202 | regs->iasq[0] = spaceid; \ | ||
| 203 | regs->iasq[1] = spaceid; \ | ||
| 204 | regs->iaoq[0] = pc; \ | ||
| 205 | regs->iaoq[1] = pc + 4; \ | ||
| 206 | regs->sr[2] = LINUX_GATEWAY_SPACE; \ | ||
| 207 | regs->sr[3] = 0xffff; \ | ||
| 208 | regs->sr[4] = spaceid; \ | ||
| 209 | regs->sr[5] = spaceid; \ | ||
| 210 | regs->sr[6] = spaceid; \ | ||
| 211 | regs->sr[7] = spaceid; \ | ||
| 212 | regs->gr[ 0] = USER_PSW; \ | ||
| 213 | regs->gr[30] = ((new_sp)+63)&~63; \ | ||
| 214 | regs->gr[31] = pc; \ | ||
| 215 | \ | ||
| 216 | get_user(regs->gr[26],&sp[0]); \ | ||
| 217 | get_user(regs->gr[25],&sp[-1]); \ | ||
| 218 | get_user(regs->gr[24],&sp[-2]); \ | ||
| 219 | get_user(regs->gr[23],&sp[-3]); \ | ||
| 220 | } while(0) | ||
| 221 | |||
| 222 | /* The ELF abi wants things done a "wee bit" differently than | 195 | /* The ELF abi wants things done a "wee bit" differently than |
| 223 | * som does. Supporting this behavior here avoids | 196 | * som does. Supporting this behavior here avoids |
| 224 | * having our own version of create_elf_tables. | 197 | * having our own version of create_elf_tables. |
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index ecc3ae1ca28e..dd4d1876a020 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h | |||
| @@ -49,16 +49,6 @@ | |||
| 49 | #define MADV_DONTFORK 10 /* don't inherit across fork */ | 49 | #define MADV_DONTFORK 10 /* don't inherit across fork */ |
| 50 | #define MADV_DOFORK 11 /* do inherit across fork */ | 50 | #define MADV_DOFORK 11 /* do inherit across fork */ |
| 51 | 51 | ||
| 52 | /* The range 12-64 is reserved for page size specification. */ | ||
| 53 | #define MADV_4K_PAGES 12 /* Use 4K pages */ | ||
| 54 | #define MADV_16K_PAGES 14 /* Use 16K pages */ | ||
| 55 | #define MADV_64K_PAGES 16 /* Use 64K pages */ | ||
| 56 | #define MADV_256K_PAGES 18 /* Use 256K pages */ | ||
| 57 | #define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */ | ||
| 58 | #define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */ | ||
| 59 | #define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */ | ||
| 60 | #define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */ | ||
| 61 | |||
| 62 | #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ | 52 | #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ |
| 63 | #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ | 53 | #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ |
| 64 | 54 | ||
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index 59001cea13f9..d2f62570a7b1 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
| @@ -290,6 +290,14 @@ int main(void) | |||
| 290 | DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); | 290 | DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT); |
| 291 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); | 291 | DEFINE(ASM_PT_INITIAL, PT_INITIAL); |
| 292 | BLANK(); | 292 | BLANK(); |
| 293 | /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text | ||
| 294 | * and kernel data on physical huge pages */ | ||
| 295 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 296 | DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT); | ||
| 297 | #else | ||
| 298 | DEFINE(HUGEPAGE_SIZE, PAGE_SIZE); | ||
| 299 | #endif | ||
| 300 | BLANK(); | ||
| 293 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); | 301 | DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); |
| 294 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); | 302 | DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); |
| 295 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); | 303 | DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index c5ef4081b01d..623496c11756 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
| @@ -502,21 +502,38 @@ | |||
| 502 | STREG \pte,0(\ptp) | 502 | STREG \pte,0(\ptp) |
| 503 | .endm | 503 | .endm |
| 504 | 504 | ||
| 505 | /* We have (depending on the page size): | ||
| 506 | * - 38 to 52-bit Physical Page Number | ||
| 507 | * - 12 to 26-bit page offset | ||
| 508 | */ | ||
| 505 | /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) | 509 | /* bitshift difference between a PFN (based on kernel's PAGE_SIZE) |
| 506 | * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ | 510 | * to a CPU TLB 4k PFN (4k => 12 bits to shift) */ |
| 507 | #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) | 511 | #define PAGE_ADD_SHIFT (PAGE_SHIFT-12) |
| 512 | #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12) | ||
| 508 | 513 | ||
| 509 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ | 514 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
| 510 | .macro convert_for_tlb_insert20 pte | 515 | .macro convert_for_tlb_insert20 pte,tmp |
| 516 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 517 | copy \pte,\tmp | ||
| 518 | extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ | ||
| 519 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte | ||
| 520 | |||
| 521 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ | ||
| 522 | (63-58)+PAGE_ADD_SHIFT,\pte | ||
| 523 | extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0 | ||
| 524 | depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\ | ||
| 525 | (63-58)+PAGE_ADD_HUGE_SHIFT,\pte | ||
| 526 | #else /* Huge pages disabled */ | ||
| 511 | extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ | 527 | extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\ |
| 512 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte | 528 | 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte |
| 513 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ | 529 | depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\ |
| 514 | (63-58)+PAGE_ADD_SHIFT,\pte | 530 | (63-58)+PAGE_ADD_SHIFT,\pte |
| 531 | #endif | ||
| 515 | .endm | 532 | .endm |
| 516 | 533 | ||
| 517 | /* Convert the pte and prot to tlb insertion values. How | 534 | /* Convert the pte and prot to tlb insertion values. How |
| 518 | * this happens is quite subtle, read below */ | 535 | * this happens is quite subtle, read below */ |
| 519 | .macro make_insert_tlb spc,pte,prot | 536 | .macro make_insert_tlb spc,pte,prot,tmp |
| 520 | space_to_prot \spc \prot /* create prot id from space */ | 537 | space_to_prot \spc \prot /* create prot id from space */ |
| 521 | /* The following is the real subtlety. This is depositing | 538 | /* The following is the real subtlety. This is depositing |
| 522 | * T <-> _PAGE_REFTRAP | 539 | * T <-> _PAGE_REFTRAP |
| @@ -553,7 +570,7 @@ | |||
| 553 | depdi 1,12,1,\prot | 570 | depdi 1,12,1,\prot |
| 554 | 571 | ||
| 555 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ | 572 | /* Drop prot bits and convert to page addr for iitlbt and idtlbt */ |
| 556 | convert_for_tlb_insert20 \pte | 573 | convert_for_tlb_insert20 \pte \tmp |
| 557 | .endm | 574 | .endm |
| 558 | 575 | ||
| 559 | /* Identical macro to make_insert_tlb above, except it | 576 | /* Identical macro to make_insert_tlb above, except it |
| @@ -646,17 +663,12 @@ | |||
| 646 | 663 | ||
| 647 | 664 | ||
| 648 | /* | 665 | /* |
| 649 | * Align fault_vector_20 on 4K boundary so that both | 666 | * Fault_vectors are architecturally required to be aligned on a 2K |
| 650 | * fault_vector_11 and fault_vector_20 are on the | 667 | * boundary |
| 651 | * same page. This is only necessary as long as we | ||
| 652 | * write protect the kernel text, which we may stop | ||
| 653 | * doing once we use large page translations to cover | ||
| 654 | * the static part of the kernel address space. | ||
| 655 | */ | 668 | */ |
| 656 | 669 | ||
| 657 | .text | 670 | .text |
| 658 | 671 | .align 2048 | |
| 659 | .align 4096 | ||
| 660 | 672 | ||
| 661 | ENTRY(fault_vector_20) | 673 | ENTRY(fault_vector_20) |
| 662 | /* First vector is invalid (0) */ | 674 | /* First vector is invalid (0) */ |
| @@ -1147,7 +1159,7 @@ dtlb_miss_20w: | |||
| 1147 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w | 1159 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w |
| 1148 | update_accessed ptp,pte,t0,t1 | 1160 | update_accessed ptp,pte,t0,t1 |
| 1149 | 1161 | ||
| 1150 | make_insert_tlb spc,pte,prot | 1162 | make_insert_tlb spc,pte,prot,t1 |
| 1151 | 1163 | ||
| 1152 | idtlbt pte,prot | 1164 | idtlbt pte,prot |
| 1153 | 1165 | ||
| @@ -1173,7 +1185,7 @@ nadtlb_miss_20w: | |||
| 1173 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w | 1185 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w |
| 1174 | update_accessed ptp,pte,t0,t1 | 1186 | update_accessed ptp,pte,t0,t1 |
| 1175 | 1187 | ||
| 1176 | make_insert_tlb spc,pte,prot | 1188 | make_insert_tlb spc,pte,prot,t1 |
| 1177 | 1189 | ||
| 1178 | idtlbt pte,prot | 1190 | idtlbt pte,prot |
| 1179 | 1191 | ||
| @@ -1267,7 +1279,7 @@ dtlb_miss_20: | |||
| 1267 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 | 1279 | tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20 |
| 1268 | update_accessed ptp,pte,t0,t1 | 1280 | update_accessed ptp,pte,t0,t1 |
| 1269 | 1281 | ||
| 1270 | make_insert_tlb spc,pte,prot | 1282 | make_insert_tlb spc,pte,prot,t1 |
| 1271 | 1283 | ||
| 1272 | f_extend pte,t1 | 1284 | f_extend pte,t1 |
| 1273 | 1285 | ||
| @@ -1295,7 +1307,7 @@ nadtlb_miss_20: | |||
| 1295 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 | 1307 | tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20 |
| 1296 | update_accessed ptp,pte,t0,t1 | 1308 | update_accessed ptp,pte,t0,t1 |
| 1297 | 1309 | ||
| 1298 | make_insert_tlb spc,pte,prot | 1310 | make_insert_tlb spc,pte,prot,t1 |
| 1299 | 1311 | ||
| 1300 | f_extend pte,t1 | 1312 | f_extend pte,t1 |
| 1301 | 1313 | ||
| @@ -1404,7 +1416,7 @@ itlb_miss_20w: | |||
| 1404 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault | 1416 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault |
| 1405 | update_accessed ptp,pte,t0,t1 | 1417 | update_accessed ptp,pte,t0,t1 |
| 1406 | 1418 | ||
| 1407 | make_insert_tlb spc,pte,prot | 1419 | make_insert_tlb spc,pte,prot,t1 |
| 1408 | 1420 | ||
| 1409 | iitlbt pte,prot | 1421 | iitlbt pte,prot |
| 1410 | 1422 | ||
| @@ -1428,7 +1440,7 @@ naitlb_miss_20w: | |||
| 1428 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w | 1440 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w |
| 1429 | update_accessed ptp,pte,t0,t1 | 1441 | update_accessed ptp,pte,t0,t1 |
| 1430 | 1442 | ||
| 1431 | make_insert_tlb spc,pte,prot | 1443 | make_insert_tlb spc,pte,prot,t1 |
| 1432 | 1444 | ||
| 1433 | iitlbt pte,prot | 1445 | iitlbt pte,prot |
| 1434 | 1446 | ||
| @@ -1514,7 +1526,7 @@ itlb_miss_20: | |||
| 1514 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault | 1526 | tlb_lock spc,ptp,pte,t0,t1,itlb_fault |
| 1515 | update_accessed ptp,pte,t0,t1 | 1527 | update_accessed ptp,pte,t0,t1 |
| 1516 | 1528 | ||
| 1517 | make_insert_tlb spc,pte,prot | 1529 | make_insert_tlb spc,pte,prot,t1 |
| 1518 | 1530 | ||
| 1519 | f_extend pte,t1 | 1531 | f_extend pte,t1 |
| 1520 | 1532 | ||
| @@ -1534,7 +1546,7 @@ naitlb_miss_20: | |||
| 1534 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 | 1546 | tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20 |
| 1535 | update_accessed ptp,pte,t0,t1 | 1547 | update_accessed ptp,pte,t0,t1 |
| 1536 | 1548 | ||
| 1537 | make_insert_tlb spc,pte,prot | 1549 | make_insert_tlb spc,pte,prot,t1 |
| 1538 | 1550 | ||
| 1539 | f_extend pte,t1 | 1551 | f_extend pte,t1 |
| 1540 | 1552 | ||
| @@ -1566,7 +1578,7 @@ dbit_trap_20w: | |||
| 1566 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault | 1578 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault |
| 1567 | update_dirty ptp,pte,t1 | 1579 | update_dirty ptp,pte,t1 |
| 1568 | 1580 | ||
| 1569 | make_insert_tlb spc,pte,prot | 1581 | make_insert_tlb spc,pte,prot,t1 |
| 1570 | 1582 | ||
| 1571 | idtlbt pte,prot | 1583 | idtlbt pte,prot |
| 1572 | 1584 | ||
| @@ -1610,7 +1622,7 @@ dbit_trap_20: | |||
| 1610 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault | 1622 | tlb_lock spc,ptp,pte,t0,t1,dbit_fault |
| 1611 | update_dirty ptp,pte,t1 | 1623 | update_dirty ptp,pte,t1 |
| 1612 | 1624 | ||
| 1613 | make_insert_tlb spc,pte,prot | 1625 | make_insert_tlb spc,pte,prot,t1 |
| 1614 | 1626 | ||
| 1615 | f_extend pte,t1 | 1627 | f_extend pte,t1 |
| 1616 | 1628 | ||
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S index e7d64527aff9..75aa0db9f69e 100644 --- a/arch/parisc/kernel/head.S +++ b/arch/parisc/kernel/head.S | |||
| @@ -69,7 +69,7 @@ $bss_loop: | |||
| 69 | stw,ma %arg2,4(%r1) | 69 | stw,ma %arg2,4(%r1) |
| 70 | stw,ma %arg3,4(%r1) | 70 | stw,ma %arg3,4(%r1) |
| 71 | 71 | ||
| 72 | /* Initialize startup VM. Just map first 8/16 MB of memory */ | 72 | /* Initialize startup VM. Just map first 16/32 MB of memory */ |
| 73 | load32 PA(swapper_pg_dir),%r4 | 73 | load32 PA(swapper_pg_dir),%r4 |
| 74 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ | 74 | mtctl %r4,%cr24 /* Initialize kernel root pointer */ |
| 75 | mtctl %r4,%cr25 /* Initialize user root pointer */ | 75 | mtctl %r4,%cr25 /* Initialize user root pointer */ |
| @@ -107,7 +107,7 @@ $bss_loop: | |||
| 107 | /* Now initialize the PTEs themselves. We use RWX for | 107 | /* Now initialize the PTEs themselves. We use RWX for |
| 108 | * everything ... it will get remapped correctly later */ | 108 | * everything ... it will get remapped correctly later */ |
| 109 | ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ | 109 | ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */ |
| 110 | ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ | 110 | load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */ |
| 111 | load32 PA(pg0),%r1 | 111 | load32 PA(pg0),%r1 |
| 112 | 112 | ||
| 113 | $pgt_fill_loop: | 113 | $pgt_fill_loop: |
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index 3c63a820fcda..b9d75d9fa9ac 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -42,9 +42,9 @@ | |||
| 42 | * We are not doing SEGREL32 handling correctly. According to the ABI, we | 42 | * We are not doing SEGREL32 handling correctly. According to the ABI, we |
| 43 | * should do a value offset, like this: | 43 | * should do a value offset, like this: |
| 44 | * if (in_init(me, (void *)val)) | 44 | * if (in_init(me, (void *)val)) |
| 45 | * val -= (uint32_t)me->module_init; | 45 | * val -= (uint32_t)me->init_layout.base; |
| 46 | * else | 46 | * else |
| 47 | * val -= (uint32_t)me->module_core; | 47 | * val -= (uint32_t)me->core_layout.base; |
| 48 | * However, SEGREL32 is used only for PARISC unwind entries, and we want | 48 | * However, SEGREL32 is used only for PARISC unwind entries, and we want |
| 49 | * those entries to have an absolute address, and not just an offset. | 49 | * those entries to have an absolute address, and not just an offset. |
| 50 | * | 50 | * |
| @@ -100,14 +100,14 @@ | |||
| 100 | * or init pieces the location is */ | 100 | * or init pieces the location is */ |
| 101 | static inline int in_init(struct module *me, void *loc) | 101 | static inline int in_init(struct module *me, void *loc) |
| 102 | { | 102 | { |
| 103 | return (loc >= me->module_init && | 103 | return (loc >= me->init_layout.base && |
| 104 | loc <= (me->module_init + me->init_size)); | 104 | loc <= (me->init_layout.base + me->init_layout.size)); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline int in_core(struct module *me, void *loc) | 107 | static inline int in_core(struct module *me, void *loc) |
| 108 | { | 108 | { |
| 109 | return (loc >= me->module_core && | 109 | return (loc >= me->core_layout.base && |
| 110 | loc <= (me->module_core + me->core_size)); | 110 | loc <= (me->core_layout.base + me->core_layout.size)); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline int in_local(struct module *me, void *loc) | 113 | static inline int in_local(struct module *me, void *loc) |
| @@ -367,13 +367,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr, | |||
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | /* align things a bit */ | 369 | /* align things a bit */ |
| 370 | me->core_size = ALIGN(me->core_size, 16); | 370 | me->core_layout.size = ALIGN(me->core_layout.size, 16); |
| 371 | me->arch.got_offset = me->core_size; | 371 | me->arch.got_offset = me->core_layout.size; |
| 372 | me->core_size += gots * sizeof(struct got_entry); | 372 | me->core_layout.size += gots * sizeof(struct got_entry); |
| 373 | 373 | ||
| 374 | me->core_size = ALIGN(me->core_size, 16); | 374 | me->core_layout.size = ALIGN(me->core_layout.size, 16); |
| 375 | me->arch.fdesc_offset = me->core_size; | 375 | me->arch.fdesc_offset = me->core_layout.size; |
| 376 | me->core_size += fdescs * sizeof(Elf_Fdesc); | 376 | me->core_layout.size += fdescs * sizeof(Elf_Fdesc); |
| 377 | 377 | ||
| 378 | me->arch.got_max = gots; | 378 | me->arch.got_max = gots; |
| 379 | me->arch.fdesc_max = fdescs; | 379 | me->arch.fdesc_max = fdescs; |
| @@ -391,7 +391,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) | |||
| 391 | 391 | ||
| 392 | BUG_ON(value == 0); | 392 | BUG_ON(value == 0); |
| 393 | 393 | ||
| 394 | got = me->module_core + me->arch.got_offset; | 394 | got = me->core_layout.base + me->arch.got_offset; |
| 395 | for (i = 0; got[i].addr; i++) | 395 | for (i = 0; got[i].addr; i++) |
| 396 | if (got[i].addr == value) | 396 | if (got[i].addr == value) |
| 397 | goto out; | 397 | goto out; |
| @@ -409,7 +409,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend) | |||
| 409 | #ifdef CONFIG_64BIT | 409 | #ifdef CONFIG_64BIT |
| 410 | static Elf_Addr get_fdesc(struct module *me, unsigned long value) | 410 | static Elf_Addr get_fdesc(struct module *me, unsigned long value) |
| 411 | { | 411 | { |
| 412 | Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; | 412 | Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset; |
| 413 | 413 | ||
| 414 | if (!value) { | 414 | if (!value) { |
| 415 | printk(KERN_ERR "%s: zero OPD requested!\n", me->name); | 415 | printk(KERN_ERR "%s: zero OPD requested!\n", me->name); |
| @@ -427,7 +427,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value) | |||
| 427 | 427 | ||
| 428 | /* Create new one */ | 428 | /* Create new one */ |
| 429 | fdesc->addr = value; | 429 | fdesc->addr = value; |
| 430 | fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; | 430 | fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; |
| 431 | return (Elf_Addr)fdesc; | 431 | return (Elf_Addr)fdesc; |
| 432 | } | 432 | } |
| 433 | #endif /* CONFIG_64BIT */ | 433 | #endif /* CONFIG_64BIT */ |
| @@ -839,7 +839,7 @@ register_unwind_table(struct module *me, | |||
| 839 | 839 | ||
| 840 | table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; | 840 | table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; |
| 841 | end = table + sechdrs[me->arch.unwind_section].sh_size; | 841 | end = table + sechdrs[me->arch.unwind_section].sh_size; |
| 842 | gp = (Elf_Addr)me->module_core + me->arch.got_offset; | 842 | gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset; |
| 843 | 843 | ||
| 844 | DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", | 844 | DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", |
| 845 | me->arch.unwind_section, table, end, gp); | 845 | me->arch.unwind_section, table, end, gp); |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 72a3c658ad7b..f7ea626e29c9 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
| @@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p) | |||
| 130 | printk(KERN_INFO "The 32-bit Kernel has started...\n"); | 130 | printk(KERN_INFO "The 32-bit Kernel has started...\n"); |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024)); | 133 | printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ", |
| 134 | (int)(PAGE_SIZE / 1024)); | ||
| 135 | #ifdef CONFIG_HUGETLB_PAGE | ||
| 136 | printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size", | ||
| 137 | 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20)); | ||
| 138 | #else | ||
| 139 | printk(KERN_CONT "disabled"); | ||
| 140 | #endif | ||
| 141 | printk(KERN_CONT ".\n"); | ||
| 142 | |||
| 134 | 143 | ||
| 135 | pdc_console_init(); | 144 | pdc_console_init(); |
| 136 | 145 | ||
| @@ -377,6 +386,7 @@ arch_initcall(parisc_init); | |||
| 377 | void start_parisc(void) | 386 | void start_parisc(void) |
| 378 | { | 387 | { |
| 379 | extern void start_kernel(void); | 388 | extern void start_kernel(void); |
| 389 | extern void early_trap_init(void); | ||
| 380 | 390 | ||
| 381 | int ret, cpunum; | 391 | int ret, cpunum; |
| 382 | struct pdc_coproc_cfg coproc_cfg; | 392 | struct pdc_coproc_cfg coproc_cfg; |
| @@ -397,6 +407,8 @@ void start_parisc(void) | |||
| 397 | panic("must have an fpu to boot linux"); | 407 | panic("must have an fpu to boot linux"); |
| 398 | } | 408 | } |
| 399 | 409 | ||
| 410 | early_trap_init(); /* initialize checksum of fault_vector */ | ||
| 411 | |||
| 400 | start_kernel(); | 412 | start_kernel(); |
| 401 | // not reached | 413 | // not reached |
| 402 | } | 414 | } |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 0b8d26d3ba43..3fbd7252a4b2 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -369,7 +369,7 @@ tracesys_exit: | |||
| 369 | ldo -16(%r30),%r29 /* Reference param save area */ | 369 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 370 | #endif | 370 | #endif |
| 371 | ldo TASK_REGS(%r1),%r26 | 371 | ldo TASK_REGS(%r1),%r26 |
| 372 | bl do_syscall_trace_exit,%r2 | 372 | BL do_syscall_trace_exit,%r2 |
| 373 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ | 373 | STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ |
| 374 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ | 374 | ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ |
| 375 | LDREG TI_TASK(%r1), %r1 | 375 | LDREG TI_TASK(%r1), %r1 |
| @@ -390,7 +390,7 @@ tracesys_sigexit: | |||
| 390 | #ifdef CONFIG_64BIT | 390 | #ifdef CONFIG_64BIT |
| 391 | ldo -16(%r30),%r29 /* Reference param save area */ | 391 | ldo -16(%r30),%r29 /* Reference param save area */ |
| 392 | #endif | 392 | #endif |
| 393 | bl do_syscall_trace_exit,%r2 | 393 | BL do_syscall_trace_exit,%r2 |
| 394 | ldo TASK_REGS(%r1),%r26 | 394 | ldo TASK_REGS(%r1),%r26 |
| 395 | 395 | ||
| 396 | ldil L%syscall_exit_rfi,%r1 | 396 | ldil L%syscall_exit_rfi,%r1 |
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index b99b39f1da02..553b09855cfd 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c | |||
| @@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs) | |||
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | 809 | ||
| 810 | int __init check_ivt(void *iva) | 810 | void __init initialize_ivt(const void *iva) |
| 811 | { | 811 | { |
| 812 | extern u32 os_hpmc_size; | 812 | extern u32 os_hpmc_size; |
| 813 | extern const u32 os_hpmc[]; | 813 | extern const u32 os_hpmc[]; |
| @@ -818,8 +818,8 @@ int __init check_ivt(void *iva) | |||
| 818 | u32 *hpmcp; | 818 | u32 *hpmcp; |
| 819 | u32 length; | 819 | u32 length; |
| 820 | 820 | ||
| 821 | if (strcmp((char *)iva, "cows can fly")) | 821 | if (strcmp((const char *)iva, "cows can fly")) |
| 822 | return -1; | 822 | panic("IVT invalid"); |
| 823 | 823 | ||
| 824 | ivap = (u32 *)iva; | 824 | ivap = (u32 *)iva; |
| 825 | 825 | ||
| @@ -839,28 +839,23 @@ int __init check_ivt(void *iva) | |||
| 839 | check += ivap[i]; | 839 | check += ivap[i]; |
| 840 | 840 | ||
| 841 | ivap[5] = -check; | 841 | ivap[5] = -check; |
| 842 | |||
| 843 | return 0; | ||
| 844 | } | 842 | } |
| 845 | 843 | ||
| 846 | #ifndef CONFIG_64BIT | ||
| 847 | extern const void fault_vector_11; | ||
| 848 | #endif | ||
| 849 | extern const void fault_vector_20; | ||
| 850 | 844 | ||
| 851 | void __init trap_init(void) | 845 | /* early_trap_init() is called before we set up kernel mappings and |
| 846 | * write-protect the kernel */ | ||
| 847 | void __init early_trap_init(void) | ||
| 852 | { | 848 | { |
| 853 | void *iva; | 849 | extern const void fault_vector_20; |
| 854 | 850 | ||
| 855 | if (boot_cpu_data.cpu_type >= pcxu) | 851 | #ifndef CONFIG_64BIT |
| 856 | iva = (void *) &fault_vector_20; | 852 | extern const void fault_vector_11; |
| 857 | else | 853 | initialize_ivt(&fault_vector_11); |
| 858 | #ifdef CONFIG_64BIT | ||
| 859 | panic("Can't boot 64-bit OS on PA1.1 processor!"); | ||
| 860 | #else | ||
| 861 | iva = (void *) &fault_vector_11; | ||
| 862 | #endif | 854 | #endif |
| 863 | 855 | ||
| 864 | if (check_ivt(iva)) | 856 | initialize_ivt(&fault_vector_20); |
| 865 | panic("IVT invalid"); | 857 | } |
| 858 | |||
| 859 | void __init trap_init(void) | ||
| 860 | { | ||
| 866 | } | 861 | } |
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 0dacc5ca555a..308f29081d46 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S | |||
| @@ -60,7 +60,7 @@ SECTIONS | |||
| 60 | EXIT_DATA | 60 | EXIT_DATA |
| 61 | } | 61 | } |
| 62 | PERCPU_SECTION(8) | 62 | PERCPU_SECTION(8) |
| 63 | . = ALIGN(PAGE_SIZE); | 63 | . = ALIGN(HUGEPAGE_SIZE); |
| 64 | __init_end = .; | 64 | __init_end = .; |
| 65 | /* freed after init ends here */ | 65 | /* freed after init ends here */ |
| 66 | 66 | ||
| @@ -116,7 +116,7 @@ SECTIONS | |||
| 116 | * that we can properly leave these | 116 | * that we can properly leave these |
| 117 | * as writable | 117 | * as writable |
| 118 | */ | 118 | */ |
| 119 | . = ALIGN(PAGE_SIZE); | 119 | . = ALIGN(HUGEPAGE_SIZE); |
| 120 | data_start = .; | 120 | data_start = .; |
| 121 | 121 | ||
| 122 | EXCEPTION_TABLE(8) | 122 | EXCEPTION_TABLE(8) |
| @@ -135,8 +135,11 @@ SECTIONS | |||
| 135 | _edata = .; | 135 | _edata = .; |
| 136 | 136 | ||
| 137 | /* BSS */ | 137 | /* BSS */ |
| 138 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8) | 138 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) |
| 139 | |||
| 140 | /* bootmap is allocated in setup_bootmem() directly behind bss. */ | ||
| 139 | 141 | ||
| 142 | . = ALIGN(HUGEPAGE_SIZE); | ||
| 140 | _end = . ; | 143 | _end = . ; |
| 141 | 144 | ||
| 142 | STABS_DEBUG | 145 | STABS_DEBUG |
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile index 758ceefb373a..134393de69d2 100644 --- a/arch/parisc/mm/Makefile +++ b/arch/parisc/mm/Makefile | |||
| @@ -3,3 +3,4 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-y := init.o fault.o ioremap.o | 5 | obj-y := init.o fault.o ioremap.o |
| 6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c new file mode 100644 index 000000000000..f6fdc77a72bd --- /dev/null +++ b/arch/parisc/mm/hugetlbpage.c | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * PARISC64 Huge TLB page support. | ||
| 3 | * | ||
| 4 | * This parisc implementation is heavily based on the SPARC and x86 code. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2015 Helge Deller <deller@gmx.de> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/fs.h> | ||
| 10 | #include <linux/mm.h> | ||
| 11 | #include <linux/hugetlb.h> | ||
| 12 | #include <linux/pagemap.h> | ||
| 13 | #include <linux/sysctl.h> | ||
| 14 | |||
| 15 | #include <asm/mman.h> | ||
| 16 | #include <asm/pgalloc.h> | ||
| 17 | #include <asm/tlb.h> | ||
| 18 | #include <asm/tlbflush.h> | ||
| 19 | #include <asm/cacheflush.h> | ||
| 20 | #include <asm/mmu_context.h> | ||
| 21 | |||
| 22 | |||
| 23 | unsigned long | ||
| 24 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
| 25 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
| 26 | { | ||
| 27 | struct hstate *h = hstate_file(file); | ||
| 28 | |||
| 29 | if (len & ~huge_page_mask(h)) | ||
| 30 | return -EINVAL; | ||
| 31 | if (len > TASK_SIZE) | ||
| 32 | return -ENOMEM; | ||
| 33 | |||
| 34 | if (flags & MAP_FIXED) | ||
| 35 | if (prepare_hugepage_range(file, addr, len)) | ||
| 36 | return -EINVAL; | ||
| 37 | |||
| 38 | if (addr) | ||
| 39 | addr = ALIGN(addr, huge_page_size(h)); | ||
| 40 | |||
| 41 | /* we need to make sure the colouring is OK */ | ||
| 42 | return arch_get_unmapped_area(file, addr, len, pgoff, flags); | ||
| 43 | } | ||
| 44 | |||
| 45 | |||
| 46 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
| 47 | unsigned long addr, unsigned long sz) | ||
| 48 | { | ||
| 49 | pgd_t *pgd; | ||
| 50 | pud_t *pud; | ||
| 51 | pmd_t *pmd; | ||
| 52 | pte_t *pte = NULL; | ||
| 53 | |||
| 54 | /* We must align the address, because our caller will run | ||
| 55 | * set_huge_pte_at() on whatever we return, which writes out | ||
| 56 | * all of the sub-ptes for the hugepage range. So we have | ||
| 57 | * to give it the first such sub-pte. | ||
| 58 | */ | ||
| 59 | addr &= HPAGE_MASK; | ||
| 60 | |||
| 61 | pgd = pgd_offset(mm, addr); | ||
| 62 | pud = pud_alloc(mm, pgd, addr); | ||
| 63 | if (pud) { | ||
| 64 | pmd = pmd_alloc(mm, pud, addr); | ||
| 65 | if (pmd) | ||
| 66 | pte = pte_alloc_map(mm, NULL, pmd, addr); | ||
| 67 | } | ||
| 68 | return pte; | ||
| 69 | } | ||
| 70 | |||
| 71 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
| 72 | { | ||
| 73 | pgd_t *pgd; | ||
| 74 | pud_t *pud; | ||
| 75 | pmd_t *pmd; | ||
| 76 | pte_t *pte = NULL; | ||
| 77 | |||
| 78 | addr &= HPAGE_MASK; | ||
| 79 | |||
| 80 | pgd = pgd_offset(mm, addr); | ||
| 81 | if (!pgd_none(*pgd)) { | ||
| 82 | pud = pud_offset(pgd, addr); | ||
| 83 | if (!pud_none(*pud)) { | ||
| 84 | pmd = pmd_offset(pud, addr); | ||
| 85 | if (!pmd_none(*pmd)) | ||
| 86 | pte = pte_offset_map(pmd, addr); | ||
| 87 | } | ||
| 88 | } | ||
| 89 | return pte; | ||
| 90 | } | ||
| 91 | |||
| 92 | /* Purge data and instruction TLB entries. Must be called holding | ||
| 93 | * the pa_tlb_lock. The TLB purge instructions are slow on SMP | ||
| 94 | * machines since the purge must be broadcast to all CPUs. | ||
| 95 | */ | ||
| 96 | static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr) | ||
| 97 | { | ||
| 98 | int i; | ||
| 99 | |||
| 100 | /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate | ||
| 101 | * Linux standard huge pages (e.g. 2 MB) */ | ||
| 102 | BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT); | ||
| 103 | |||
| 104 | addr &= HPAGE_MASK; | ||
| 105 | addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT; | ||
| 106 | |||
| 107 | for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) { | ||
| 108 | mtsp(mm->context, 1); | ||
| 109 | pdtlb(addr); | ||
| 110 | if (unlikely(split_tlb)) | ||
| 111 | pitlb(addr); | ||
| 112 | addr += (1UL << REAL_HPAGE_SHIFT); | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 117 | pte_t *ptep, pte_t entry) | ||
| 118 | { | ||
| 119 | unsigned long addr_start; | ||
| 120 | int i; | ||
| 121 | |||
| 122 | addr &= HPAGE_MASK; | ||
| 123 | addr_start = addr; | ||
| 124 | |||
| 125 | for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { | ||
| 126 | /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry) | ||
| 127 | * instead, but then we get double locking on pa_tlb_lock. */ | ||
| 128 | *ptep = entry; | ||
| 129 | ptep++; | ||
| 130 | |||
| 131 | /* Drop the PAGE_SIZE/non-huge tlb entry */ | ||
| 132 | purge_tlb_entries(mm, addr); | ||
| 133 | |||
| 134 | addr += PAGE_SIZE; | ||
| 135 | pte_val(entry) += PAGE_SIZE; | ||
| 136 | } | ||
| 137 | |||
| 138 | purge_tlb_entries_huge(mm, addr_start); | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | ||
| 143 | pte_t *ptep) | ||
| 144 | { | ||
| 145 | pte_t entry; | ||
| 146 | |||
| 147 | entry = *ptep; | ||
| 148 | set_huge_pte_at(mm, addr, ptep, __pte(0)); | ||
| 149 | |||
| 150 | return entry; | ||
| 151 | } | ||
| 152 | |||
| 153 | int pmd_huge(pmd_t pmd) | ||
| 154 | { | ||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | int pud_huge(pud_t pud) | ||
| 159 | { | ||
| 160 | return 0; | ||
| 161 | } | ||
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index c5fec4890fdf..1b366c477687 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
| @@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr, | |||
| 409 | unsigned long vaddr; | 409 | unsigned long vaddr; |
| 410 | unsigned long ro_start; | 410 | unsigned long ro_start; |
| 411 | unsigned long ro_end; | 411 | unsigned long ro_end; |
| 412 | unsigned long fv_addr; | 412 | unsigned long kernel_end; |
| 413 | unsigned long gw_addr; | ||
| 414 | extern const unsigned long fault_vector_20; | ||
| 415 | extern void * const linux_gateway_page; | ||
| 416 | 413 | ||
| 417 | ro_start = __pa((unsigned long)_text); | 414 | ro_start = __pa((unsigned long)_text); |
| 418 | ro_end = __pa((unsigned long)&data_start); | 415 | ro_end = __pa((unsigned long)&data_start); |
| 419 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | 416 | kernel_end = __pa((unsigned long)&_end); |
| 420 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
| 421 | 417 | ||
| 422 | end_paddr = start_paddr + size; | 418 | end_paddr = start_paddr + size; |
| 423 | 419 | ||
| @@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr, | |||
| 475 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | 471 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { |
| 476 | pte_t pte; | 472 | pte_t pte; |
| 477 | 473 | ||
| 478 | /* | ||
| 479 | * Map the fault vector writable so we can | ||
| 480 | * write the HPMC checksum. | ||
| 481 | */ | ||
| 482 | if (force) | 474 | if (force) |
| 483 | pte = __mk_pte(address, pgprot); | 475 | pte = __mk_pte(address, pgprot); |
| 484 | else if (parisc_text_address(vaddr) && | 476 | else if (parisc_text_address(vaddr)) { |
| 485 | address != fv_addr) | ||
| 486 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | 477 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
| 478 | if (address >= ro_start && address < kernel_end) | ||
| 479 | pte = pte_mkhuge(pte); | ||
| 480 | } | ||
| 487 | else | 481 | else |
| 488 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | 482 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) |
| 489 | if (address >= ro_start && address < ro_end | 483 | if (address >= ro_start && address < ro_end) { |
| 490 | && address != fv_addr | 484 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
| 491 | && address != gw_addr) | 485 | pte = pte_mkhuge(pte); |
| 492 | pte = __mk_pte(address, PAGE_KERNEL_RO); | 486 | } else |
| 493 | else | ||
| 494 | #endif | 487 | #endif |
| 488 | { | ||
| 495 | pte = __mk_pte(address, pgprot); | 489 | pte = __mk_pte(address, pgprot); |
| 490 | if (address >= ro_start && address < kernel_end) | ||
| 491 | pte = pte_mkhuge(pte); | ||
| 492 | } | ||
| 496 | 493 | ||
| 497 | if (address >= end_paddr) { | 494 | if (address >= end_paddr) { |
| 498 | if (force) | 495 | if (force) |
| @@ -536,15 +533,12 @@ void free_initmem(void) | |||
| 536 | 533 | ||
| 537 | /* force the kernel to see the new TLB entries */ | 534 | /* force the kernel to see the new TLB entries */ |
| 538 | __flush_tlb_range(0, init_begin, init_end); | 535 | __flush_tlb_range(0, init_begin, init_end); |
| 539 | /* Attempt to catch anyone trying to execute code here | 536 | |
| 540 | * by filling the page with BRK insns. | ||
| 541 | */ | ||
| 542 | memset((void *)init_begin, 0x00, init_end - init_begin); | ||
| 543 | /* finally dump all the instructions which were cached, since the | 537 | /* finally dump all the instructions which were cached, since the |
| 544 | * pages are no-longer executable */ | 538 | * pages are no-longer executable */ |
| 545 | flush_icache_range(init_begin, init_end); | 539 | flush_icache_range(init_begin, init_end); |
| 546 | 540 | ||
| 547 | free_initmem_default(-1); | 541 | free_initmem_default(POISON_FREE_INITMEM); |
| 548 | 542 | ||
| 549 | /* set up a new led state on systems shipped LED State panel */ | 543 | /* set up a new led state on systems shipped LED State panel */ |
| 550 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | 544 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); |
| @@ -728,8 +722,8 @@ static void __init pagetable_init(void) | |||
| 728 | unsigned long size; | 722 | unsigned long size; |
| 729 | 723 | ||
| 730 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | 724 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; |
| 731 | end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT); | ||
| 732 | size = pmem_ranges[range].pages << PAGE_SHIFT; | 725 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
| 726 | end_paddr = start_paddr + size; | ||
| 733 | 727 | ||
| 734 | map_pages((unsigned long)__va(start_paddr), start_paddr, | 728 | map_pages((unsigned long)__va(start_paddr), start_paddr, |
| 735 | size, PAGE_KERNEL, 0); | 729 | size, PAGE_KERNEL, 0); |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index c9e26cb264f4..f2b0b1b0c72a 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -382,3 +382,4 @@ COMPAT_SYS(shmat) | |||
| 382 | SYSCALL(shmdt) | 382 | SYSCALL(shmdt) |
| 383 | SYSCALL(shmget) | 383 | SYSCALL(shmget) |
| 384 | COMPAT_SYS(shmctl) | 384 | COMPAT_SYS(shmctl) |
| 385 | SYSCALL(mlock2) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6d8f8023ac27..4b6b8ace18e0 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | #define __NR_syscalls 378 | 15 | #define __NR_syscalls 379 |
| 16 | 16 | ||
| 17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
| 18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 81579e93c659..1effea5193d6 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
| @@ -400,5 +400,6 @@ | |||
| 400 | #define __NR_shmdt 375 | 400 | #define __NR_shmdt 375 |
| 401 | #define __NR_shmget 376 | 401 | #define __NR_shmget 376 |
| 402 | #define __NR_shmctl 377 | 402 | #define __NR_shmctl 377 |
| 403 | #define __NR_mlock2 378 | ||
| 403 | 404 | ||
| 404 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 405 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c index c94d2e018d84..2c01665eb410 100644 --- a/arch/powerpc/kernel/module_32.c +++ b/arch/powerpc/kernel/module_32.c | |||
| @@ -188,8 +188,8 @@ static uint32_t do_plt_call(void *location, | |||
| 188 | 188 | ||
| 189 | pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); | 189 | pr_debug("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); |
| 190 | /* Init, or core PLT? */ | 190 | /* Init, or core PLT? */ |
| 191 | if (location >= mod->module_core | 191 | if (location >= mod->core_layout.base |
| 192 | && location < mod->module_core + mod->core_size) | 192 | && location < mod->core_layout.base + mod->core_layout.size) |
| 193 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; | 193 | entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; |
| 194 | else | 194 | else |
| 195 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; | 195 | entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; |
| @@ -296,7 +296,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
| 296 | } | 296 | } |
| 297 | #ifdef CONFIG_DYNAMIC_FTRACE | 297 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 298 | module->arch.tramp = | 298 | module->arch.tramp = |
| 299 | do_plt_call(module->module_core, | 299 | do_plt_call(module->core_layout.base, |
| 300 | (unsigned long)ftrace_caller, | 300 | (unsigned long)ftrace_caller, |
| 301 | sechdrs, module); | 301 | sechdrs, module); |
| 302 | #endif | 302 | #endif |
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 0c1a679314dd..7873e171457c 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c | |||
| @@ -159,11 +159,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
| 159 | 159 | ||
| 160 | /* Increase core size by size of got & plt and set start | 160 | /* Increase core size by size of got & plt and set start |
| 161 | offsets for got and plt. */ | 161 | offsets for got and plt. */ |
| 162 | me->core_size = ALIGN(me->core_size, 4); | 162 | me->core_layout.size = ALIGN(me->core_layout.size, 4); |
| 163 | me->arch.got_offset = me->core_size; | 163 | me->arch.got_offset = me->core_layout.size; |
| 164 | me->core_size += me->arch.got_size; | 164 | me->core_layout.size += me->arch.got_size; |
| 165 | me->arch.plt_offset = me->core_size; | 165 | me->arch.plt_offset = me->core_layout.size; |
| 166 | me->core_size += me->arch.plt_size; | 166 | me->core_layout.size += me->arch.plt_size; |
| 167 | return 0; | 167 | return 0; |
| 168 | } | 168 | } |
| 169 | 169 | ||
| @@ -279,7 +279,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 279 | if (info->got_initialized == 0) { | 279 | if (info->got_initialized == 0) { |
| 280 | Elf_Addr *gotent; | 280 | Elf_Addr *gotent; |
| 281 | 281 | ||
| 282 | gotent = me->module_core + me->arch.got_offset + | 282 | gotent = me->core_layout.base + me->arch.got_offset + |
| 283 | info->got_offset; | 283 | info->got_offset; |
| 284 | *gotent = val; | 284 | *gotent = val; |
| 285 | info->got_initialized = 1; | 285 | info->got_initialized = 1; |
| @@ -302,7 +302,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 302 | rc = apply_rela_bits(loc, val, 0, 64, 0); | 302 | rc = apply_rela_bits(loc, val, 0, 64, 0); |
| 303 | else if (r_type == R_390_GOTENT || | 303 | else if (r_type == R_390_GOTENT || |
| 304 | r_type == R_390_GOTPLTENT) { | 304 | r_type == R_390_GOTPLTENT) { |
| 305 | val += (Elf_Addr) me->module_core - loc; | 305 | val += (Elf_Addr) me->core_layout.base - loc; |
| 306 | rc = apply_rela_bits(loc, val, 1, 32, 1); | 306 | rc = apply_rela_bits(loc, val, 1, 32, 1); |
| 307 | } | 307 | } |
| 308 | break; | 308 | break; |
| @@ -315,7 +315,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 315 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ | 315 | case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ |
| 316 | if (info->plt_initialized == 0) { | 316 | if (info->plt_initialized == 0) { |
| 317 | unsigned int *ip; | 317 | unsigned int *ip; |
| 318 | ip = me->module_core + me->arch.plt_offset + | 318 | ip = me->core_layout.base + me->arch.plt_offset + |
| 319 | info->plt_offset; | 319 | info->plt_offset; |
| 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ | 320 | ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */ |
| 321 | ip[1] = 0x100a0004; | 321 | ip[1] = 0x100a0004; |
| @@ -334,7 +334,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 334 | val - loc + 0xffffUL < 0x1ffffeUL) || | 334 | val - loc + 0xffffUL < 0x1ffffeUL) || |
| 335 | (r_type == R_390_PLT32DBL && | 335 | (r_type == R_390_PLT32DBL && |
| 336 | val - loc + 0xffffffffULL < 0x1fffffffeULL))) | 336 | val - loc + 0xffffffffULL < 0x1fffffffeULL))) |
| 337 | val = (Elf_Addr) me->module_core + | 337 | val = (Elf_Addr) me->core_layout.base + |
| 338 | me->arch.plt_offset + | 338 | me->arch.plt_offset + |
| 339 | info->plt_offset; | 339 | info->plt_offset; |
| 340 | val += rela->r_addend - loc; | 340 | val += rela->r_addend - loc; |
| @@ -356,7 +356,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 356 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ | 356 | case R_390_GOTOFF32: /* 32 bit offset to GOT. */ |
| 357 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ | 357 | case R_390_GOTOFF64: /* 64 bit offset to GOT. */ |
| 358 | val = val + rela->r_addend - | 358 | val = val + rela->r_addend - |
| 359 | ((Elf_Addr) me->module_core + me->arch.got_offset); | 359 | ((Elf_Addr) me->core_layout.base + me->arch.got_offset); |
| 360 | if (r_type == R_390_GOTOFF16) | 360 | if (r_type == R_390_GOTOFF16) |
| 361 | rc = apply_rela_bits(loc, val, 0, 16, 0); | 361 | rc = apply_rela_bits(loc, val, 0, 16, 0); |
| 362 | else if (r_type == R_390_GOTOFF32) | 362 | else if (r_type == R_390_GOTOFF32) |
| @@ -366,7 +366,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab, | |||
| 366 | break; | 366 | break; |
| 367 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ | 367 | case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ |
| 368 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ | 368 | case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ |
| 369 | val = (Elf_Addr) me->module_core + me->arch.got_offset + | 369 | val = (Elf_Addr) me->core_layout.base + me->arch.got_offset + |
| 370 | rela->r_addend - loc; | 370 | rela->r_addend - loc; |
| 371 | if (r_type == R_390_GOTPC) | 371 | if (r_type == R_390_GOTPC) |
| 372 | rc = apply_rela_bits(loc, val, 1, 32, 0); | 372 | rc = apply_rela_bits(loc, val, 1, 32, 0); |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 373e32346d68..6a75352f453c 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
| @@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq) | |||
| 1030 | src_id, 0); | 1030 | src_id, 0); |
| 1031 | 1031 | ||
| 1032 | /* sending vcpu invalid */ | 1032 | /* sending vcpu invalid */ |
| 1033 | if (src_id >= KVM_MAX_VCPUS || | 1033 | if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL) |
| 1034 | kvm_get_vcpu(vcpu->kvm, src_id) == NULL) | ||
| 1035 | return -EINVAL; | 1034 | return -EINVAL; |
| 1036 | 1035 | ||
| 1037 | if (sclp.has_sigpif) | 1036 | if (sclp.has_sigpif) |
| @@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu, | |||
| 1110 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, | 1109 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, |
| 1111 | irq->u.emerg.code, 0); | 1110 | irq->u.emerg.code, 0); |
| 1112 | 1111 | ||
| 1112 | /* sending vcpu invalid */ | ||
| 1113 | if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL) | ||
| 1114 | return -EINVAL; | ||
| 1115 | |||
| 1113 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); | 1116 | set_bit(irq->u.emerg.code, li->sigp_emerg_pending); |
| 1114 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); | 1117 | set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); |
| 1115 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); | 1118 | atomic_or(CPUSTAT_EXT_INT, li->cpuflags); |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 8fe2f1c722dc..846589281b04 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) | |||
| 342 | r = 0; | 342 | r = 0; |
| 343 | break; | 343 | break; |
| 344 | case KVM_CAP_S390_VECTOR_REGISTERS: | 344 | case KVM_CAP_S390_VECTOR_REGISTERS: |
| 345 | if (MACHINE_HAS_VX) { | 345 | mutex_lock(&kvm->lock); |
| 346 | if (atomic_read(&kvm->online_vcpus)) { | ||
| 347 | r = -EBUSY; | ||
| 348 | } else if (MACHINE_HAS_VX) { | ||
| 346 | set_kvm_facility(kvm->arch.model.fac->mask, 129); | 349 | set_kvm_facility(kvm->arch.model.fac->mask, 129); |
| 347 | set_kvm_facility(kvm->arch.model.fac->list, 129); | 350 | set_kvm_facility(kvm->arch.model.fac->list, 129); |
| 348 | r = 0; | 351 | r = 0; |
| 349 | } else | 352 | } else |
| 350 | r = -EINVAL; | 353 | r = -EINVAL; |
| 354 | mutex_unlock(&kvm->lock); | ||
| 351 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", | 355 | VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", |
| 352 | r ? "(not available)" : "(success)"); | 356 | r ? "(not available)" : "(success)"); |
| 353 | break; | 357 | break; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 77191b85ea7a..d76b51cb4b62 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
| @@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
| 660 | 660 | ||
| 661 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); | 661 | kvm_s390_get_regs_rre(vcpu, ®1, ®2); |
| 662 | 662 | ||
| 663 | if (!MACHINE_HAS_PFMF) | 663 | if (!test_kvm_facility(vcpu->kvm, 8)) |
| 664 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); | 664 | return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); |
| 665 | 665 | ||
| 666 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) | 666 | if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index da690b69f9fe..77c22d685c7a 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
| @@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code, | |||
| 291 | u16 cpu_addr, u32 parameter, u64 *status_reg) | 291 | u16 cpu_addr, u32 parameter, u64 *status_reg) |
| 292 | { | 292 | { |
| 293 | int rc; | 293 | int rc; |
| 294 | struct kvm_vcpu *dst_vcpu; | 294 | struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); |
| 295 | 295 | ||
| 296 | if (cpu_addr >= KVM_MAX_VCPUS) | ||
| 297 | return SIGP_CC_NOT_OPERATIONAL; | ||
| 298 | |||
| 299 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | ||
| 300 | if (!dst_vcpu) | 296 | if (!dst_vcpu) |
| 301 | return SIGP_CC_NOT_OPERATIONAL; | 297 | return SIGP_CC_NOT_OPERATIONAL; |
| 302 | 298 | ||
| @@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
| 478 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); | 474 | trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); |
| 479 | 475 | ||
| 480 | if (order_code == SIGP_EXTERNAL_CALL) { | 476 | if (order_code == SIGP_EXTERNAL_CALL) { |
| 481 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 477 | dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr); |
| 482 | BUG_ON(dest_vcpu == NULL); | 478 | BUG_ON(dest_vcpu == NULL); |
| 483 | 479 | ||
| 484 | kvm_s390_vcpu_wakeup(dest_vcpu); | 480 | kvm_s390_vcpu_wakeup(dest_vcpu); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 9f3905697f12..690b4027e17c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #define MSR_IA32_PERFCTR0 0x000000c1 | 35 | #define MSR_IA32_PERFCTR0 0x000000c1 |
| 36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
| 37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
| 38 | #define MSR_NHM_PLATFORM_INFO 0x000000ce | 38 | #define MSR_PLATFORM_INFO 0x000000ce |
| 39 | 39 | ||
| 40 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | 40 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
| 41 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | 41 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) |
| @@ -44,7 +44,6 @@ | |||
| 44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) | 44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
| 45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) | 45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
| 46 | 46 | ||
| 47 | #define MSR_PLATFORM_INFO 0x000000ce | ||
| 48 | #define MSR_MTRRcap 0x000000fe | 47 | #define MSR_MTRRcap 0x000000fe |
| 49 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 48 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
| 50 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e | 49 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4ddd780aeac9..c2b7522cbf35 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
| @@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap); | |||
| 273 | 273 | ||
| 274 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) | 274 | static __always_inline void setup_smap(struct cpuinfo_x86 *c) |
| 275 | { | 275 | { |
| 276 | unsigned long eflags; | 276 | unsigned long eflags = native_save_fl(); |
| 277 | 277 | ||
| 278 | /* This should have been cleared long ago */ | 278 | /* This should have been cleared long ago */ |
| 279 | raw_local_save_flags(eflags); | ||
| 280 | BUG_ON(eflags & X86_EFLAGS_AC); | 279 | BUG_ON(eflags & X86_EFLAGS_AC); |
| 281 | 280 | ||
| 282 | if (cpu_has(c, X86_FEATURE_SMAP)) { | 281 | if (cpu_has(c, X86_FEATURE_SMAP)) { |
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index ef29b742cea7..31c6a60505e6 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
| @@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame, | |||
| 385 | */ | 385 | */ |
| 386 | void fpu__init_prepare_fx_sw_frame(void) | 386 | void fpu__init_prepare_fx_sw_frame(void) |
| 387 | { | 387 | { |
| 388 | int fsave_header_size = sizeof(struct fregs_state); | ||
| 389 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; | 388 | int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; |
| 390 | 389 | ||
| 391 | if (config_enabled(CONFIG_X86_32)) | ||
| 392 | size += fsave_header_size; | ||
| 393 | |||
| 394 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; | 390 | fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; |
| 395 | fx_sw_reserved.extended_size = size; | 391 | fx_sw_reserved.extended_size = size; |
| 396 | fx_sw_reserved.xfeatures = xfeatures_mask; | 392 | fx_sw_reserved.xfeatures = xfeatures_mask; |
| 397 | fx_sw_reserved.xstate_size = xstate_size; | 393 | fx_sw_reserved.xstate_size = xstate_size; |
| 398 | 394 | ||
| 399 | if (config_enabled(CONFIG_IA32_EMULATION)) { | 395 | if (config_enabled(CONFIG_IA32_EMULATION) || |
| 396 | config_enabled(CONFIG_X86_32)) { | ||
| 397 | int fsave_header_size = sizeof(struct fregs_state); | ||
| 398 | |||
| 400 | fx_sw_reserved_ia32 = fx_sw_reserved; | 399 | fx_sw_reserved_ia32 = fx_sw_reserved; |
| 401 | fx_sw_reserved_ia32.extended_size += fsave_header_size; | 400 | fx_sw_reserved_ia32.extended_size = size + fsave_header_size; |
| 402 | } | 401 | } |
| 403 | } | 402 | } |
| 404 | 403 | ||
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 6454f2731b56..70fc312221fc 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c | |||
| @@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature) | |||
| 694 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) | 694 | if (!boot_cpu_has(X86_FEATURE_XSAVE)) |
| 695 | return NULL; | 695 | return NULL; |
| 696 | 696 | ||
| 697 | xsave = ¤t->thread.fpu.state.xsave; | ||
| 698 | /* | 697 | /* |
| 699 | * We should not ever be requesting features that we | 698 | * We should not ever be requesting features that we |
| 700 | * have not enabled. Remember that pcntxt_mask is | 699 | * have not enabled. Remember that pcntxt_mask is |
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index d1d35ccffed3..bcc06e82a593 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c | |||
| @@ -41,8 +41,8 @@ int klp_write_module_reloc(struct module *mod, unsigned long type, | |||
| 41 | int ret, numpages, size = 4; | 41 | int ret, numpages, size = 4; |
| 42 | bool readonly; | 42 | bool readonly; |
| 43 | unsigned long val; | 43 | unsigned long val; |
| 44 | unsigned long core = (unsigned long)mod->module_core; | 44 | unsigned long core = (unsigned long)mod->core_layout.base; |
| 45 | unsigned long core_size = mod->core_size; | 45 | unsigned long core_size = mod->core_layout.size; |
| 46 | 46 | ||
| 47 | switch (type) { | 47 | switch (type) { |
| 48 | case R_X86_64_NONE: | 48 | case R_X86_64_NONE: |
| @@ -72,7 +72,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type, | |||
| 72 | readonly = false; | 72 | readonly = false; |
| 73 | 73 | ||
| 74 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | 74 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
| 75 | if (loc < core + mod->core_ro_size) | 75 | if (loc < core + mod->core_layout.ro_size) |
| 76 | readonly = true; | 76 | readonly = true; |
| 77 | #endif | 77 | #endif |
| 78 | 78 | ||
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index 94ea120fa21f..87e1762e2bca 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
| @@ -278,6 +278,12 @@ trace: | |||
| 278 | /* save_mcount_regs fills in first two parameters */ | 278 | /* save_mcount_regs fills in first two parameters */ |
| 279 | save_mcount_regs | 279 | save_mcount_regs |
| 280 | 280 | ||
| 281 | /* | ||
| 282 | * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not | ||
| 283 | * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the | ||
| 284 | * ip and parent ip are used and the list function is called when | ||
| 285 | * function tracing is enabled. | ||
| 286 | */ | ||
| 281 | call *ftrace_trace_function | 287 | call *ftrace_trace_function |
| 282 | 288 | ||
| 283 | restore_mcount_regs | 289 | restore_mcount_regs |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 87acc5221740..af823a388c19 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) | |||
| 7394 | 7394 | ||
| 7395 | switch (type) { | 7395 | switch (type) { |
| 7396 | case VMX_VPID_EXTENT_ALL_CONTEXT: | 7396 | case VMX_VPID_EXTENT_ALL_CONTEXT: |
| 7397 | if (get_vmcs12(vcpu)->virtual_processor_id == 0) { | ||
| 7398 | nested_vmx_failValid(vcpu, | ||
| 7399 | VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); | ||
| 7400 | return 1; | ||
| 7401 | } | ||
| 7402 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); | 7397 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); |
| 7403 | nested_vmx_succeed(vcpu); | 7398 | nested_vmx_succeed(vcpu); |
| 7404 | break; | 7399 | break; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 00462bd63129..eed32283d22c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, | |||
| 2763 | return 0; | 2763 | return 0; |
| 2764 | } | 2764 | } |
| 2765 | 2765 | ||
| 2766 | static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) | ||
| 2767 | { | ||
| 2768 | return (!lapic_in_kernel(vcpu) || | ||
| 2769 | kvm_apic_accept_pic_intr(vcpu)); | ||
| 2770 | } | ||
| 2771 | |||
| 2772 | /* | ||
| 2773 | * if userspace requested an interrupt window, check that the | ||
| 2774 | * interrupt window is open. | ||
| 2775 | * | ||
| 2776 | * No need to exit to userspace if we already have an interrupt queued. | ||
| 2777 | */ | ||
| 2778 | static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) | ||
| 2779 | { | ||
| 2780 | return kvm_arch_interrupt_allowed(vcpu) && | ||
| 2781 | !kvm_cpu_has_interrupt(vcpu) && | ||
| 2782 | !kvm_event_needs_reinjection(vcpu) && | ||
| 2783 | kvm_cpu_accept_dm_intr(vcpu); | ||
| 2784 | } | ||
| 2785 | |||
| 2766 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | 2786 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
| 2767 | struct kvm_interrupt *irq) | 2787 | struct kvm_interrupt *irq) |
| 2768 | { | 2788 | { |
| @@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | |||
| 2786 | return -EEXIST; | 2806 | return -EEXIST; |
| 2787 | 2807 | ||
| 2788 | vcpu->arch.pending_external_vector = irq->irq; | 2808 | vcpu->arch.pending_external_vector = irq->irq; |
| 2809 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
| 2789 | return 0; | 2810 | return 0; |
| 2790 | } | 2811 | } |
| 2791 | 2812 | ||
| @@ -5910,23 +5931,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) | |||
| 5910 | return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); | 5931 | return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); |
| 5911 | } | 5932 | } |
| 5912 | 5933 | ||
| 5913 | /* | ||
| 5914 | * Check if userspace requested an interrupt window, and that the | ||
| 5915 | * interrupt window is open. | ||
| 5916 | * | ||
| 5917 | * No need to exit to userspace if we already have an interrupt queued. | ||
| 5918 | */ | ||
| 5919 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) | 5934 | static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) |
| 5920 | { | 5935 | { |
| 5921 | if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) | 5936 | return vcpu->run->request_interrupt_window && |
| 5922 | return false; | 5937 | likely(!pic_in_kernel(vcpu->kvm)); |
| 5923 | |||
| 5924 | if (kvm_cpu_has_interrupt(vcpu)) | ||
| 5925 | return false; | ||
| 5926 | |||
| 5927 | return (irqchip_split(vcpu->kvm) | ||
| 5928 | ? kvm_apic_accept_pic_intr(vcpu) | ||
| 5929 | : kvm_arch_interrupt_allowed(vcpu)); | ||
| 5930 | } | 5938 | } |
| 5931 | 5939 | ||
| 5932 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) | 5940 | static void post_kvm_run_save(struct kvm_vcpu *vcpu) |
| @@ -5937,17 +5945,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) | |||
| 5937 | kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; | 5945 | kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; |
| 5938 | kvm_run->cr8 = kvm_get_cr8(vcpu); | 5946 | kvm_run->cr8 = kvm_get_cr8(vcpu); |
| 5939 | kvm_run->apic_base = kvm_get_apic_base(vcpu); | 5947 | kvm_run->apic_base = kvm_get_apic_base(vcpu); |
| 5940 | if (!irqchip_in_kernel(vcpu->kvm)) | 5948 | kvm_run->ready_for_interrupt_injection = |
| 5941 | kvm_run->ready_for_interrupt_injection = | 5949 | pic_in_kernel(vcpu->kvm) || |
| 5942 | kvm_arch_interrupt_allowed(vcpu) && | 5950 | kvm_vcpu_ready_for_interrupt_injection(vcpu); |
| 5943 | !kvm_cpu_has_interrupt(vcpu) && | ||
| 5944 | !kvm_event_needs_reinjection(vcpu); | ||
| 5945 | else if (!pic_in_kernel(vcpu->kvm)) | ||
| 5946 | kvm_run->ready_for_interrupt_injection = | ||
| 5947 | kvm_apic_accept_pic_intr(vcpu) && | ||
| 5948 | !kvm_cpu_has_interrupt(vcpu); | ||
| 5949 | else | ||
| 5950 | kvm_run->ready_for_interrupt_injection = 1; | ||
| 5951 | } | 5951 | } |
| 5952 | 5952 | ||
| 5953 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) | 5953 | static void update_cr8_intercept(struct kvm_vcpu *vcpu) |
| @@ -6360,8 +6360,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, | |||
| 6360 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | 6360 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu) |
| 6361 | { | 6361 | { |
| 6362 | int r; | 6362 | int r; |
| 6363 | bool req_int_win = !lapic_in_kernel(vcpu) && | 6363 | bool req_int_win = |
| 6364 | vcpu->run->request_interrupt_window; | 6364 | dm_request_for_irq_injection(vcpu) && |
| 6365 | kvm_cpu_accept_dm_intr(vcpu); | ||
| 6366 | |||
| 6365 | bool req_immediate_exit = false; | 6367 | bool req_immediate_exit = false; |
| 6366 | 6368 | ||
| 6367 | if (vcpu->requests) { | 6369 | if (vcpu->requests) { |
| @@ -6663,7 +6665,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu) | |||
| 6663 | if (kvm_cpu_has_pending_timer(vcpu)) | 6665 | if (kvm_cpu_has_pending_timer(vcpu)) |
| 6664 | kvm_inject_pending_timer_irqs(vcpu); | 6666 | kvm_inject_pending_timer_irqs(vcpu); |
| 6665 | 6667 | ||
| 6666 | if (dm_request_for_irq_injection(vcpu)) { | 6668 | if (dm_request_for_irq_injection(vcpu) && |
| 6669 | kvm_vcpu_ready_for_interrupt_injection(vcpu)) { | ||
| 6667 | r = 0; | 6670 | r = 0; |
| 6668 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | 6671 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
| 6669 | ++vcpu->stat.request_irq_exits; | 6672 | ++vcpu->stat.request_irq_exits; |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index b0ae85f90f10..1202d5ca2fb5 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
| @@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, | |||
| 586 | } | 586 | } |
| 587 | 587 | ||
| 588 | /* | 588 | /* |
| 589 | * We only want to do a 4-byte get_user() on 32-bit. Otherwise, | ||
| 590 | * we might run off the end of the bounds table if we are on | ||
| 591 | * a 64-bit kernel and try to get 8 bytes. | ||
| 592 | */ | ||
| 593 | int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, | ||
| 594 | long __user *bd_entry_ptr) | ||
| 595 | { | ||
| 596 | u32 bd_entry_32; | ||
| 597 | int ret; | ||
| 598 | |||
| 599 | if (is_64bit_mm(mm)) | ||
| 600 | return get_user(*bd_entry_ret, bd_entry_ptr); | ||
| 601 | |||
| 602 | /* | ||
| 603 | * Note that get_user() uses the type of the *pointer* to | ||
| 604 | * establish the size of the get, not the destination. | ||
| 605 | */ | ||
| 606 | ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr); | ||
| 607 | *bd_entry_ret = bd_entry_32; | ||
| 608 | return ret; | ||
| 609 | } | ||
| 610 | |||
| 611 | /* | ||
| 589 | * Get the base of bounds tables pointed by specific bounds | 612 | * Get the base of bounds tables pointed by specific bounds |
| 590 | * directory entry. | 613 | * directory entry. |
| 591 | */ | 614 | */ |
| @@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm, | |||
| 605 | int need_write = 0; | 628 | int need_write = 0; |
| 606 | 629 | ||
| 607 | pagefault_disable(); | 630 | pagefault_disable(); |
| 608 | ret = get_user(bd_entry, bd_entry_ptr); | 631 | ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr); |
| 609 | pagefault_enable(); | 632 | pagefault_enable(); |
| 610 | if (!ret) | 633 | if (!ret) |
| 611 | break; | 634 | break; |
| @@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm, | |||
| 700 | */ | 723 | */ |
| 701 | static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) | 724 | static inline unsigned long bd_entry_virt_space(struct mm_struct *mm) |
| 702 | { | 725 | { |
| 703 | unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits); | 726 | unsigned long long virt_space; |
| 704 | if (is_64bit_mm(mm)) | 727 | unsigned long long GB = (1ULL << 30); |
| 705 | return virt_space / MPX_BD_NR_ENTRIES_64; | 728 | |
| 706 | else | 729 | /* |
| 707 | return virt_space / MPX_BD_NR_ENTRIES_32; | 730 | * This covers 32-bit emulation as well as 32-bit kernels |
| 731 | * running on 64-bit harware. | ||
| 732 | */ | ||
| 733 | if (!is_64bit_mm(mm)) | ||
| 734 | return (4ULL * GB) / MPX_BD_NR_ENTRIES_32; | ||
| 735 | |||
| 736 | /* | ||
| 737 | * 'x86_virt_bits' returns what the hardware is capable | ||
| 738 | * of, and returns the full >32-bit adddress space when | ||
| 739 | * running 32-bit kernels on 64-bit hardware. | ||
| 740 | */ | ||
| 741 | virt_space = (1ULL << boot_cpu_data.x86_virt_bits); | ||
| 742 | return virt_space / MPX_BD_NR_ENTRIES_64; | ||
| 708 | } | 743 | } |
| 709 | 744 | ||
| 710 | /* | 745 | /* |
diff --git a/block/blk-merge.c b/block/blk-merge.c index de5716d8e525..41a55ba0d78e 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -76,6 +76,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; | 76 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
| 77 | struct bvec_iter iter; | 77 | struct bvec_iter iter; |
| 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; | 78 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
| 79 | unsigned front_seg_size = bio->bi_seg_front_size; | ||
| 80 | bool do_split = true; | ||
| 81 | struct bio *new = NULL; | ||
| 79 | 82 | ||
| 80 | bio_for_each_segment(bv, bio, iter) { | 83 | bio_for_each_segment(bv, bio, iter) { |
| 81 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) | 84 | if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q)) |
| @@ -98,7 +101,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 98 | 101 | ||
| 99 | seg_size += bv.bv_len; | 102 | seg_size += bv.bv_len; |
| 100 | bvprv = bv; | 103 | bvprv = bv; |
| 101 | bvprvp = &bv; | 104 | bvprvp = &bvprv; |
| 102 | sectors += bv.bv_len >> 9; | 105 | sectors += bv.bv_len >> 9; |
| 103 | continue; | 106 | continue; |
| 104 | } | 107 | } |
| @@ -108,16 +111,29 @@ new_segment: | |||
| 108 | 111 | ||
| 109 | nsegs++; | 112 | nsegs++; |
| 110 | bvprv = bv; | 113 | bvprv = bv; |
| 111 | bvprvp = &bv; | 114 | bvprvp = &bvprv; |
| 112 | seg_size = bv.bv_len; | 115 | seg_size = bv.bv_len; |
| 113 | sectors += bv.bv_len >> 9; | 116 | sectors += bv.bv_len >> 9; |
| 117 | |||
| 118 | if (nsegs == 1 && seg_size > front_seg_size) | ||
| 119 | front_seg_size = seg_size; | ||
| 114 | } | 120 | } |
| 115 | 121 | ||
| 116 | *segs = nsegs; | 122 | do_split = false; |
| 117 | return NULL; | ||
| 118 | split: | 123 | split: |
| 119 | *segs = nsegs; | 124 | *segs = nsegs; |
| 120 | return bio_split(bio, sectors, GFP_NOIO, bs); | 125 | |
| 126 | if (do_split) { | ||
| 127 | new = bio_split(bio, sectors, GFP_NOIO, bs); | ||
| 128 | if (new) | ||
| 129 | bio = new; | ||
| 130 | } | ||
| 131 | |||
| 132 | bio->bi_seg_front_size = front_seg_size; | ||
| 133 | if (seg_size > bio->bi_seg_back_size) | ||
| 134 | bio->bi_seg_back_size = seg_size; | ||
| 135 | |||
| 136 | return do_split ? new : NULL; | ||
| 121 | } | 137 | } |
| 122 | 138 | ||
| 123 | void blk_queue_split(struct request_queue *q, struct bio **bio, | 139 | void blk_queue_split(struct request_queue *q, struct bio **bio, |
| @@ -412,6 +428,12 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |||
| 412 | if (sg) | 428 | if (sg) |
| 413 | sg_mark_end(sg); | 429 | sg_mark_end(sg); |
| 414 | 430 | ||
| 431 | /* | ||
| 432 | * Something must have been wrong if the figured number of | ||
| 433 | * segment is bigger than number of req's physical segments | ||
| 434 | */ | ||
| 435 | WARN_ON(nsegs > rq->nr_phys_segments); | ||
| 436 | |||
| 415 | return nsegs; | 437 | return nsegs; |
| 416 | } | 438 | } |
| 417 | EXPORT_SYMBOL(blk_rq_map_sg); | 439 | EXPORT_SYMBOL(blk_rq_map_sg); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3ae09de62f19..6d6f8feb48c0 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1291,15 +1291,16 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1291 | blk_mq_bio_to_request(rq, bio); | 1291 | blk_mq_bio_to_request(rq, bio); |
| 1292 | 1292 | ||
| 1293 | /* | 1293 | /* |
| 1294 | * we do limited pluging. If bio can be merged, do merge. | 1294 | * We do limited pluging. If the bio can be merged, do that. |
| 1295 | * Otherwise the existing request in the plug list will be | 1295 | * Otherwise the existing request in the plug list will be |
| 1296 | * issued. So the plug list will have one request at most | 1296 | * issued. So the plug list will have one request at most |
| 1297 | */ | 1297 | */ |
| 1298 | if (plug) { | 1298 | if (plug) { |
| 1299 | /* | 1299 | /* |
| 1300 | * The plug list might get flushed before this. If that | 1300 | * The plug list might get flushed before this. If that |
| 1301 | * happens, same_queue_rq is invalid and plug list is empty | 1301 | * happens, same_queue_rq is invalid and plug list is |
| 1302 | **/ | 1302 | * empty |
| 1303 | */ | ||
| 1303 | if (same_queue_rq && !list_empty(&plug->mq_list)) { | 1304 | if (same_queue_rq && !list_empty(&plug->mq_list)) { |
| 1304 | old_rq = same_queue_rq; | 1305 | old_rq = same_queue_rq; |
| 1305 | list_del_init(&old_rq->queuelist); | 1306 | list_del_init(&old_rq->queuelist); |
| @@ -1380,12 +1381,15 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1380 | blk_mq_bio_to_request(rq, bio); | 1381 | blk_mq_bio_to_request(rq, bio); |
| 1381 | if (!request_count) | 1382 | if (!request_count) |
| 1382 | trace_block_plug(q); | 1383 | trace_block_plug(q); |
| 1383 | else if (request_count >= BLK_MAX_REQUEST_COUNT) { | 1384 | |
| 1385 | blk_mq_put_ctx(data.ctx); | ||
| 1386 | |||
| 1387 | if (request_count >= BLK_MAX_REQUEST_COUNT) { | ||
| 1384 | blk_flush_plug_list(plug, false); | 1388 | blk_flush_plug_list(plug, false); |
| 1385 | trace_block_plug(q); | 1389 | trace_block_plug(q); |
| 1386 | } | 1390 | } |
| 1391 | |||
| 1387 | list_add_tail(&rq->queuelist, &plug->mq_list); | 1392 | list_add_tail(&rq->queuelist, &plug->mq_list); |
| 1388 | blk_mq_put_ctx(data.ctx); | ||
| 1389 | return cookie; | 1393 | return cookie; |
| 1390 | } | 1394 | } |
| 1391 | 1395 | ||
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 246dfb16c3d9..aa40aa93381b 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
| @@ -158,11 +158,13 @@ void blk_abort_request(struct request *req) | |||
| 158 | { | 158 | { |
| 159 | if (blk_mark_rq_complete(req)) | 159 | if (blk_mark_rq_complete(req)) |
| 160 | return; | 160 | return; |
| 161 | blk_delete_timer(req); | 161 | |
| 162 | if (req->q->mq_ops) | 162 | if (req->q->mq_ops) { |
| 163 | blk_mq_rq_timed_out(req, false); | 163 | blk_mq_rq_timed_out(req, false); |
| 164 | else | 164 | } else { |
| 165 | blk_delete_timer(req); | ||
| 165 | blk_rq_timed_out(req); | 166 | blk_rq_timed_out(req); |
| 167 | } | ||
| 166 | } | 168 | } |
| 167 | EXPORT_SYMBOL_GPL(blk_abort_request); | 169 | EXPORT_SYMBOL_GPL(blk_abort_request); |
| 168 | 170 | ||
diff --git a/block/blk.h b/block/blk.h index da722eb786df..c43926d3d74d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -72,8 +72,6 @@ void blk_dequeue_request(struct request *rq); | |||
| 72 | void __blk_queue_free_tags(struct request_queue *q); | 72 | void __blk_queue_free_tags(struct request_queue *q); |
| 73 | bool __blk_end_bidi_request(struct request *rq, int error, | 73 | bool __blk_end_bidi_request(struct request *rq, int error, |
| 74 | unsigned int nr_bytes, unsigned int bidi_bytes); | 74 | unsigned int nr_bytes, unsigned int bidi_bytes); |
| 75 | int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
| 76 | void blk_queue_exit(struct request_queue *q); | ||
| 77 | void blk_freeze_queue(struct request_queue *q); | 75 | void blk_freeze_queue(struct request_queue *q); |
| 78 | 76 | ||
| 79 | static inline void blk_queue_enter_live(struct request_queue *q) | 77 | static inline void blk_queue_enter_live(struct request_queue *q) |
diff --git a/block/noop-iosched.c b/block/noop-iosched.c index 3de89d4690f3..a163c487cf38 100644 --- a/block/noop-iosched.c +++ b/block/noop-iosched.c | |||
| @@ -21,10 +21,10 @@ static void noop_merged_requests(struct request_queue *q, struct request *rq, | |||
| 21 | static int noop_dispatch(struct request_queue *q, int force) | 21 | static int noop_dispatch(struct request_queue *q, int force) |
| 22 | { | 22 | { |
| 23 | struct noop_data *nd = q->elevator->elevator_data; | 23 | struct noop_data *nd = q->elevator->elevator_data; |
| 24 | struct request *rq; | ||
| 24 | 25 | ||
| 25 | if (!list_empty(&nd->queue)) { | 26 | rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); |
| 26 | struct request *rq; | 27 | if (rq) { |
| 27 | rq = list_entry(nd->queue.next, struct request, queuelist); | ||
| 28 | list_del_init(&rq->queuelist); | 28 | list_del_init(&rq->queuelist); |
| 29 | elv_dispatch_sort(q, rq); | 29 | elv_dispatch_sort(q, rq); |
| 30 | return 1; | 30 | return 1; |
| @@ -46,7 +46,7 @@ noop_former_request(struct request_queue *q, struct request *rq) | |||
| 46 | 46 | ||
| 47 | if (rq->queuelist.prev == &nd->queue) | 47 | if (rq->queuelist.prev == &nd->queue) |
| 48 | return NULL; | 48 | return NULL; |
| 49 | return list_entry(rq->queuelist.prev, struct request, queuelist); | 49 | return list_prev_entry(rq, queuelist); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static struct request * | 52 | static struct request * |
| @@ -56,7 +56,7 @@ noop_latter_request(struct request_queue *q, struct request *rq) | |||
| 56 | 56 | ||
| 57 | if (rq->queuelist.next == &nd->queue) | 57 | if (rq->queuelist.next == &nd->queue) |
| 58 | return NULL; | 58 | return NULL; |
| 59 | return list_entry(rq->queuelist.next, struct request, queuelist); | 59 | return list_next_entry(rq, queuelist); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) | 62 | static int noop_init_queue(struct request_queue *q, struct elevator_type *e) |
diff --git a/block/partitions/mac.c b/block/partitions/mac.c index c2c48ec64b27..621317ac4d59 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c | |||
| @@ -32,7 +32,7 @@ int mac_partition(struct parsed_partitions *state) | |||
| 32 | Sector sect; | 32 | Sector sect; |
| 33 | unsigned char *data; | 33 | unsigned char *data; |
| 34 | int slot, blocks_in_map; | 34 | int slot, blocks_in_map; |
| 35 | unsigned secsize; | 35 | unsigned secsize, datasize, partoffset; |
| 36 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
| 37 | int found_root = 0; | 37 | int found_root = 0; |
| 38 | int found_root_goodness = 0; | 38 | int found_root_goodness = 0; |
| @@ -50,10 +50,14 @@ int mac_partition(struct parsed_partitions *state) | |||
| 50 | } | 50 | } |
| 51 | secsize = be16_to_cpu(md->block_size); | 51 | secsize = be16_to_cpu(md->block_size); |
| 52 | put_dev_sector(sect); | 52 | put_dev_sector(sect); |
| 53 | data = read_part_sector(state, secsize/512, §); | 53 | datasize = round_down(secsize, 512); |
| 54 | data = read_part_sector(state, datasize / 512, §); | ||
| 54 | if (!data) | 55 | if (!data) |
| 55 | return -1; | 56 | return -1; |
| 56 | part = (struct mac_partition *) (data + secsize%512); | 57 | partoffset = secsize % 512; |
| 58 | if (partoffset + sizeof(*part) > datasize) | ||
| 59 | return -1; | ||
| 60 | part = (struct mac_partition *) (data + partoffset); | ||
| 57 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { | 61 | if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { |
| 58 | put_dev_sector(sect); | 62 | put_dev_sector(sect); |
| 59 | return 0; /* not a MacOS disk */ | 63 | return 0; /* not a MacOS disk */ |
diff --git a/drivers/Makefile b/drivers/Makefile index 73d039156ea7..795d0ca714bf 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
| @@ -63,6 +63,7 @@ obj-$(CONFIG_FB_I810) += video/fbdev/i810/ | |||
| 63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ | 63 | obj-$(CONFIG_FB_INTEL) += video/fbdev/intelfb/ |
| 64 | 64 | ||
| 65 | obj-$(CONFIG_PARPORT) += parport/ | 65 | obj-$(CONFIG_PARPORT) += parport/ |
| 66 | obj-$(CONFIG_NVM) += lightnvm/ | ||
| 66 | obj-y += base/ block/ misc/ mfd/ nfc/ | 67 | obj-y += base/ block/ misc/ mfd/ nfc/ |
| 67 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ | 68 | obj-$(CONFIG_LIBNVDIMM) += nvdimm/ |
| 68 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ | 69 | obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ |
| @@ -70,7 +71,6 @@ obj-$(CONFIG_NUBUS) += nubus/ | |||
| 70 | obj-y += macintosh/ | 71 | obj-y += macintosh/ |
| 71 | obj-$(CONFIG_IDE) += ide/ | 72 | obj-$(CONFIG_IDE) += ide/ |
| 72 | obj-$(CONFIG_SCSI) += scsi/ | 73 | obj-$(CONFIG_SCSI) += scsi/ |
| 73 | obj-$(CONFIG_NVM) += lightnvm/ | ||
| 74 | obj-y += nvme/ | 74 | obj-y += nvme/ |
| 75 | obj-$(CONFIG_ATA) += ata/ | 75 | obj-$(CONFIG_ATA) += ata/ |
| 76 | obj-$(CONFIG_TARGET_CORE) += target/ | 76 | obj-$(CONFIG_TARGET_CORE) += target/ |
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3c083d2cc434..6730f965b379 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c | |||
| @@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); | |||
| 304 | 304 | ||
| 305 | static int register_pcc_channel(int pcc_subspace_idx) | 305 | static int register_pcc_channel(int pcc_subspace_idx) |
| 306 | { | 306 | { |
| 307 | struct acpi_pcct_subspace *cppc_ss; | 307 | struct acpi_pcct_hw_reduced *cppc_ss; |
| 308 | unsigned int len; | 308 | unsigned int len; |
| 309 | 309 | ||
| 310 | if (pcc_subspace_idx >= 0) { | 310 | if (pcc_subspace_idx >= 0) { |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index f61a7c834540..b420fb46669d 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1103,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) | |||
| 1103 | } | 1103 | } |
| 1104 | 1104 | ||
| 1105 | err_exit: | 1105 | err_exit: |
| 1106 | if (result && q) | 1106 | if (result) |
| 1107 | acpi_ec_delete_query(q); | 1107 | acpi_ec_delete_query(q); |
| 1108 | if (data) | 1108 | if (data) |
| 1109 | *data = value; | 1109 | *data = value; |
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index bf034f8b7c1a..2fa8304171e0 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/dmi.h> | ||
| 18 | #include "sbshc.h" | 17 | #include "sbshc.h" |
| 19 | 18 | ||
| 20 | #define PREFIX "ACPI: " | 19 | #define PREFIX "ACPI: " |
| @@ -30,6 +29,7 @@ struct acpi_smb_hc { | |||
| 30 | u8 query_bit; | 29 | u8 query_bit; |
| 31 | smbus_alarm_callback callback; | 30 | smbus_alarm_callback callback; |
| 32 | void *context; | 31 | void *context; |
| 32 | bool done; | ||
| 33 | }; | 33 | }; |
| 34 | 34 | ||
| 35 | static int acpi_smbus_hc_add(struct acpi_device *device); | 35 | static int acpi_smbus_hc_add(struct acpi_device *device); |
| @@ -88,8 +88,6 @@ enum acpi_smb_offset { | |||
| 88 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ | 88 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | static bool macbook; | ||
| 92 | |||
| 93 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) | 91 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) |
| 94 | { | 92 | { |
| 95 | return ec_read(hc->offset + address, data); | 93 | return ec_read(hc->offset + address, data); |
| @@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data) | |||
| 100 | return ec_write(hc->offset + address, data); | 98 | return ec_write(hc->offset + address, data); |
| 101 | } | 99 | } |
| 102 | 100 | ||
| 103 | static inline int smb_check_done(struct acpi_smb_hc *hc) | ||
| 104 | { | ||
| 105 | union acpi_smb_status status = {.raw = 0}; | ||
| 106 | smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw); | ||
| 107 | return status.fields.done && (status.fields.status == SMBUS_OK); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) | 101 | static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout) |
| 111 | { | 102 | { |
| 112 | if (wait_event_timeout(hc->wait, smb_check_done(hc), | 103 | if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout))) |
| 113 | msecs_to_jiffies(timeout))) | ||
| 114 | return 0; | 104 | return 0; |
| 115 | /* | 105 | return -ETIME; |
| 116 | * After the timeout happens, OS will try to check the status of SMbus. | ||
| 117 | * If the status is what OS expected, it will be regarded as the bogus | ||
| 118 | * timeout. | ||
| 119 | */ | ||
| 120 | if (smb_check_done(hc)) | ||
| 121 | return 0; | ||
| 122 | else | ||
| 123 | return -ETIME; | ||
| 124 | } | 106 | } |
| 125 | 107 | ||
| 126 | static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, | 108 | static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, |
| @@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, | |||
| 135 | } | 117 | } |
| 136 | 118 | ||
| 137 | mutex_lock(&hc->lock); | 119 | mutex_lock(&hc->lock); |
| 138 | if (macbook) | 120 | hc->done = false; |
| 139 | udelay(5); | ||
| 140 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) | 121 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) |
| 141 | goto end; | 122 | goto end; |
| 142 | if (temp) { | 123 | if (temp) { |
| @@ -235,8 +216,10 @@ static int smbus_alarm(void *context) | |||
| 235 | if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) | 216 | if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw)) |
| 236 | return 0; | 217 | return 0; |
| 237 | /* Check if it is only a completion notify */ | 218 | /* Check if it is only a completion notify */ |
| 238 | if (status.fields.done) | 219 | if (status.fields.done && status.fields.status == SMBUS_OK) { |
| 220 | hc->done = true; | ||
| 239 | wake_up(&hc->wait); | 221 | wake_up(&hc->wait); |
| 222 | } | ||
| 240 | if (!status.fields.alarm) | 223 | if (!status.fields.alarm) |
| 241 | return 0; | 224 | return 0; |
| 242 | mutex_lock(&hc->lock); | 225 | mutex_lock(&hc->lock); |
| @@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
| 262 | acpi_handle handle, acpi_ec_query_func func, | 245 | acpi_handle handle, acpi_ec_query_func func, |
| 263 | void *data); | 246 | void *data); |
| 264 | 247 | ||
| 265 | static int macbook_dmi_match(const struct dmi_system_id *d) | ||
| 266 | { | ||
| 267 | pr_debug("Detected MacBook, enabling workaround\n"); | ||
| 268 | macbook = true; | ||
| 269 | return 0; | ||
| 270 | } | ||
| 271 | |||
| 272 | static struct dmi_system_id acpi_smbus_dmi_table[] = { | ||
| 273 | { macbook_dmi_match, "Apple MacBook", { | ||
| 274 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | ||
| 275 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, | ||
| 276 | }, | ||
| 277 | { }, | ||
| 278 | }; | ||
| 279 | |||
| 280 | static int acpi_smbus_hc_add(struct acpi_device *device) | 248 | static int acpi_smbus_hc_add(struct acpi_device *device) |
| 281 | { | 249 | { |
| 282 | int status; | 250 | int status; |
| 283 | unsigned long long val; | 251 | unsigned long long val; |
| 284 | struct acpi_smb_hc *hc; | 252 | struct acpi_smb_hc *hc; |
| 285 | 253 | ||
| 286 | dmi_check_system(acpi_smbus_dmi_table); | ||
| 287 | |||
| 288 | if (!device) | 254 | if (!device) |
| 289 | return -EINVAL; | 255 | return -EINVAL; |
| 290 | 256 | ||
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index eb6e67451dec..0d77cd6fd8d1 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c | |||
| @@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq) | |||
| 68 | struct wake_irq *wirq; | 68 | struct wake_irq *wirq; |
| 69 | int err; | 69 | int err; |
| 70 | 70 | ||
| 71 | if (irq < 0) | ||
| 72 | return -EINVAL; | ||
| 73 | |||
| 71 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); | 74 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); |
| 72 | if (!wirq) | 75 | if (!wirq) |
| 73 | return -ENOMEM; | 76 | return -ENOMEM; |
| @@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) | |||
| 167 | struct wake_irq *wirq; | 170 | struct wake_irq *wirq; |
| 168 | int err; | 171 | int err; |
| 169 | 172 | ||
| 173 | if (irq < 0) | ||
| 174 | return -EINVAL; | ||
| 175 | |||
| 170 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); | 176 | wirq = kzalloc(sizeof(*wirq), GFP_KERNEL); |
| 171 | if (!wirq) | 177 | if (!wirq) |
| 172 | return -ENOMEM; | 178 | return -ENOMEM; |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a28a562f7b7f..3457ac8c03e2 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -3810,7 +3810,6 @@ static int mtip_block_initialize(struct driver_data *dd) | |||
| 3810 | sector_t capacity; | 3810 | sector_t capacity; |
| 3811 | unsigned int index = 0; | 3811 | unsigned int index = 0; |
| 3812 | struct kobject *kobj; | 3812 | struct kobject *kobj; |
| 3813 | unsigned char thd_name[16]; | ||
| 3814 | 3813 | ||
| 3815 | if (dd->disk) | 3814 | if (dd->disk) |
| 3816 | goto skip_create_disk; /* hw init done, before rebuild */ | 3815 | goto skip_create_disk; /* hw init done, before rebuild */ |
| @@ -3958,10 +3957,9 @@ skip_create_disk: | |||
| 3958 | } | 3957 | } |
| 3959 | 3958 | ||
| 3960 | start_service_thread: | 3959 | start_service_thread: |
| 3961 | sprintf(thd_name, "mtip_svc_thd_%02d", index); | ||
| 3962 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, | 3960 | dd->mtip_svc_handler = kthread_create_on_node(mtip_service_thread, |
| 3963 | dd, dd->numa_node, "%s", | 3961 | dd, dd->numa_node, |
| 3964 | thd_name); | 3962 | "mtip_svc_thd_%02d", index); |
| 3965 | 3963 | ||
| 3966 | if (IS_ERR(dd->mtip_svc_handler)) { | 3964 | if (IS_ERR(dd->mtip_svc_handler)) { |
| 3967 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); | 3965 | dev_err(&dd->pdev->dev, "service thread failed to start\n"); |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 6255d1c4bba4..5c8ba5484d86 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/blk-mq.h> | 9 | #include <linux/blk-mq.h> |
| 10 | #include <linux/hrtimer.h> | 10 | #include <linux/hrtimer.h> |
| 11 | #include <linux/lightnvm.h> | ||
| 11 | 12 | ||
| 12 | struct nullb_cmd { | 13 | struct nullb_cmd { |
| 13 | struct list_head list; | 14 | struct list_head list; |
| @@ -39,12 +40,14 @@ struct nullb { | |||
| 39 | 40 | ||
| 40 | struct nullb_queue *queues; | 41 | struct nullb_queue *queues; |
| 41 | unsigned int nr_queues; | 42 | unsigned int nr_queues; |
| 43 | char disk_name[DISK_NAME_LEN]; | ||
| 42 | }; | 44 | }; |
| 43 | 45 | ||
| 44 | static LIST_HEAD(nullb_list); | 46 | static LIST_HEAD(nullb_list); |
| 45 | static struct mutex lock; | 47 | static struct mutex lock; |
| 46 | static int null_major; | 48 | static int null_major; |
| 47 | static int nullb_indexes; | 49 | static int nullb_indexes; |
| 50 | static struct kmem_cache *ppa_cache; | ||
| 48 | 51 | ||
| 49 | struct completion_queue { | 52 | struct completion_queue { |
| 50 | struct llist_head list; | 53 | struct llist_head list; |
| @@ -119,6 +122,10 @@ static int nr_devices = 2; | |||
| 119 | module_param(nr_devices, int, S_IRUGO); | 122 | module_param(nr_devices, int, S_IRUGO); |
| 120 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | 123 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
| 121 | 124 | ||
| 125 | static bool use_lightnvm; | ||
| 126 | module_param(use_lightnvm, bool, S_IRUGO); | ||
| 127 | MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device"); | ||
| 128 | |||
| 122 | static int irqmode = NULL_IRQ_SOFTIRQ; | 129 | static int irqmode = NULL_IRQ_SOFTIRQ; |
| 123 | 130 | ||
| 124 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | 131 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) |
| @@ -427,15 +434,156 @@ static void null_del_dev(struct nullb *nullb) | |||
| 427 | { | 434 | { |
| 428 | list_del_init(&nullb->list); | 435 | list_del_init(&nullb->list); |
| 429 | 436 | ||
| 430 | del_gendisk(nullb->disk); | 437 | if (use_lightnvm) |
| 438 | nvm_unregister(nullb->disk_name); | ||
| 439 | else | ||
| 440 | del_gendisk(nullb->disk); | ||
| 431 | blk_cleanup_queue(nullb->q); | 441 | blk_cleanup_queue(nullb->q); |
| 432 | if (queue_mode == NULL_Q_MQ) | 442 | if (queue_mode == NULL_Q_MQ) |
| 433 | blk_mq_free_tag_set(&nullb->tag_set); | 443 | blk_mq_free_tag_set(&nullb->tag_set); |
| 434 | put_disk(nullb->disk); | 444 | if (!use_lightnvm) |
| 445 | put_disk(nullb->disk); | ||
| 435 | cleanup_queues(nullb); | 446 | cleanup_queues(nullb); |
| 436 | kfree(nullb); | 447 | kfree(nullb); |
| 437 | } | 448 | } |
| 438 | 449 | ||
| 450 | #ifdef CONFIG_NVM | ||
| 451 | |||
| 452 | static void null_lnvm_end_io(struct request *rq, int error) | ||
| 453 | { | ||
| 454 | struct nvm_rq *rqd = rq->end_io_data; | ||
| 455 | struct nvm_dev *dev = rqd->dev; | ||
| 456 | |||
| 457 | dev->mt->end_io(rqd, error); | ||
| 458 | |||
| 459 | blk_put_request(rq); | ||
| 460 | } | ||
| 461 | |||
| 462 | static int null_lnvm_submit_io(struct request_queue *q, struct nvm_rq *rqd) | ||
| 463 | { | ||
| 464 | struct request *rq; | ||
| 465 | struct bio *bio = rqd->bio; | ||
| 466 | |||
| 467 | rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); | ||
| 468 | if (IS_ERR(rq)) | ||
| 469 | return -ENOMEM; | ||
| 470 | |||
| 471 | rq->cmd_type = REQ_TYPE_DRV_PRIV; | ||
| 472 | rq->__sector = bio->bi_iter.bi_sector; | ||
| 473 | rq->ioprio = bio_prio(bio); | ||
| 474 | |||
| 475 | if (bio_has_data(bio)) | ||
| 476 | rq->nr_phys_segments = bio_phys_segments(q, bio); | ||
| 477 | |||
| 478 | rq->__data_len = bio->bi_iter.bi_size; | ||
| 479 | rq->bio = rq->biotail = bio; | ||
| 480 | |||
| 481 | rq->end_io_data = rqd; | ||
| 482 | |||
| 483 | blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io); | ||
| 484 | |||
| 485 | return 0; | ||
| 486 | } | ||
| 487 | |||
| 488 | static int null_lnvm_id(struct request_queue *q, struct nvm_id *id) | ||
| 489 | { | ||
| 490 | sector_t size = gb * 1024 * 1024 * 1024ULL; | ||
| 491 | sector_t blksize; | ||
| 492 | struct nvm_id_group *grp; | ||
| 493 | |||
| 494 | id->ver_id = 0x1; | ||
| 495 | id->vmnt = 0; | ||
| 496 | id->cgrps = 1; | ||
| 497 | id->cap = 0x3; | ||
| 498 | id->dom = 0x1; | ||
| 499 | |||
| 500 | id->ppaf.blk_offset = 0; | ||
| 501 | id->ppaf.blk_len = 16; | ||
| 502 | id->ppaf.pg_offset = 16; | ||
| 503 | id->ppaf.pg_len = 16; | ||
| 504 | id->ppaf.sect_offset = 32; | ||
| 505 | id->ppaf.sect_len = 8; | ||
| 506 | id->ppaf.pln_offset = 40; | ||
| 507 | id->ppaf.pln_len = 8; | ||
| 508 | id->ppaf.lun_offset = 48; | ||
| 509 | id->ppaf.lun_len = 8; | ||
| 510 | id->ppaf.ch_offset = 56; | ||
| 511 | id->ppaf.ch_len = 8; | ||
| 512 | |||
| 513 | do_div(size, bs); /* convert size to pages */ | ||
| 514 | do_div(size, 256); /* concert size to pgs pr blk */ | ||
| 515 | grp = &id->groups[0]; | ||
| 516 | grp->mtype = 0; | ||
| 517 | grp->fmtype = 0; | ||
| 518 | grp->num_ch = 1; | ||
| 519 | grp->num_pg = 256; | ||
| 520 | blksize = size; | ||
| 521 | do_div(size, (1 << 16)); | ||
| 522 | grp->num_lun = size + 1; | ||
| 523 | do_div(blksize, grp->num_lun); | ||
| 524 | grp->num_blk = blksize; | ||
| 525 | grp->num_pln = 1; | ||
| 526 | |||
| 527 | grp->fpg_sz = bs; | ||
| 528 | grp->csecs = bs; | ||
| 529 | grp->trdt = 25000; | ||
| 530 | grp->trdm = 25000; | ||
| 531 | grp->tprt = 500000; | ||
| 532 | grp->tprm = 500000; | ||
| 533 | grp->tbet = 1500000; | ||
| 534 | grp->tbem = 1500000; | ||
| 535 | grp->mpos = 0x010101; /* single plane rwe */ | ||
| 536 | grp->cpar = hw_queue_depth; | ||
| 537 | |||
| 538 | return 0; | ||
| 539 | } | ||
| 540 | |||
| 541 | static void *null_lnvm_create_dma_pool(struct request_queue *q, char *name) | ||
| 542 | { | ||
| 543 | mempool_t *virtmem_pool; | ||
| 544 | |||
| 545 | virtmem_pool = mempool_create_slab_pool(64, ppa_cache); | ||
| 546 | if (!virtmem_pool) { | ||
| 547 | pr_err("null_blk: Unable to create virtual memory pool\n"); | ||
| 548 | return NULL; | ||
| 549 | } | ||
| 550 | |||
| 551 | return virtmem_pool; | ||
| 552 | } | ||
| 553 | |||
| 554 | static void null_lnvm_destroy_dma_pool(void *pool) | ||
| 555 | { | ||
| 556 | mempool_destroy(pool); | ||
| 557 | } | ||
| 558 | |||
| 559 | static void *null_lnvm_dev_dma_alloc(struct request_queue *q, void *pool, | ||
| 560 | gfp_t mem_flags, dma_addr_t *dma_handler) | ||
| 561 | { | ||
| 562 | return mempool_alloc(pool, mem_flags); | ||
| 563 | } | ||
| 564 | |||
| 565 | static void null_lnvm_dev_dma_free(void *pool, void *entry, | ||
| 566 | dma_addr_t dma_handler) | ||
| 567 | { | ||
| 568 | mempool_free(entry, pool); | ||
| 569 | } | ||
| 570 | |||
| 571 | static struct nvm_dev_ops null_lnvm_dev_ops = { | ||
| 572 | .identity = null_lnvm_id, | ||
| 573 | .submit_io = null_lnvm_submit_io, | ||
| 574 | |||
| 575 | .create_dma_pool = null_lnvm_create_dma_pool, | ||
| 576 | .destroy_dma_pool = null_lnvm_destroy_dma_pool, | ||
| 577 | .dev_dma_alloc = null_lnvm_dev_dma_alloc, | ||
| 578 | .dev_dma_free = null_lnvm_dev_dma_free, | ||
| 579 | |||
| 580 | /* Simulate nvme protocol restriction */ | ||
| 581 | .max_phys_sect = 64, | ||
| 582 | }; | ||
| 583 | #else | ||
| 584 | static struct nvm_dev_ops null_lnvm_dev_ops; | ||
| 585 | #endif /* CONFIG_NVM */ | ||
| 586 | |||
| 439 | static int null_open(struct block_device *bdev, fmode_t mode) | 587 | static int null_open(struct block_device *bdev, fmode_t mode) |
| 440 | { | 588 | { |
| 441 | return 0; | 589 | return 0; |
| @@ -575,11 +723,6 @@ static int null_add_dev(void) | |||
| 575 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 723 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
| 576 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); | 724 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
| 577 | 725 | ||
| 578 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
| 579 | if (!disk) { | ||
| 580 | rv = -ENOMEM; | ||
| 581 | goto out_cleanup_blk_queue; | ||
| 582 | } | ||
| 583 | 726 | ||
| 584 | mutex_lock(&lock); | 727 | mutex_lock(&lock); |
| 585 | list_add_tail(&nullb->list, &nullb_list); | 728 | list_add_tail(&nullb->list, &nullb_list); |
| @@ -589,6 +732,21 @@ static int null_add_dev(void) | |||
| 589 | blk_queue_logical_block_size(nullb->q, bs); | 732 | blk_queue_logical_block_size(nullb->q, bs); |
| 590 | blk_queue_physical_block_size(nullb->q, bs); | 733 | blk_queue_physical_block_size(nullb->q, bs); |
| 591 | 734 | ||
| 735 | sprintf(nullb->disk_name, "nullb%d", nullb->index); | ||
| 736 | |||
| 737 | if (use_lightnvm) { | ||
| 738 | rv = nvm_register(nullb->q, nullb->disk_name, | ||
| 739 | &null_lnvm_dev_ops); | ||
| 740 | if (rv) | ||
| 741 | goto out_cleanup_blk_queue; | ||
| 742 | goto done; | ||
| 743 | } | ||
| 744 | |||
| 745 | disk = nullb->disk = alloc_disk_node(1, home_node); | ||
| 746 | if (!disk) { | ||
| 747 | rv = -ENOMEM; | ||
| 748 | goto out_cleanup_lightnvm; | ||
| 749 | } | ||
| 592 | size = gb * 1024 * 1024 * 1024ULL; | 750 | size = gb * 1024 * 1024 * 1024ULL; |
| 593 | set_capacity(disk, size >> 9); | 751 | set_capacity(disk, size >> 9); |
| 594 | 752 | ||
| @@ -598,10 +756,15 @@ static int null_add_dev(void) | |||
| 598 | disk->fops = &null_fops; | 756 | disk->fops = &null_fops; |
| 599 | disk->private_data = nullb; | 757 | disk->private_data = nullb; |
| 600 | disk->queue = nullb->q; | 758 | disk->queue = nullb->q; |
| 601 | sprintf(disk->disk_name, "nullb%d", nullb->index); | 759 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); |
| 760 | |||
| 602 | add_disk(disk); | 761 | add_disk(disk); |
| 762 | done: | ||
| 603 | return 0; | 763 | return 0; |
| 604 | 764 | ||
| 765 | out_cleanup_lightnvm: | ||
| 766 | if (use_lightnvm) | ||
| 767 | nvm_unregister(nullb->disk_name); | ||
| 605 | out_cleanup_blk_queue: | 768 | out_cleanup_blk_queue: |
| 606 | blk_cleanup_queue(nullb->q); | 769 | blk_cleanup_queue(nullb->q); |
| 607 | out_cleanup_tags: | 770 | out_cleanup_tags: |
| @@ -625,6 +788,18 @@ static int __init null_init(void) | |||
| 625 | bs = PAGE_SIZE; | 788 | bs = PAGE_SIZE; |
| 626 | } | 789 | } |
| 627 | 790 | ||
| 791 | if (use_lightnvm && bs != 4096) { | ||
| 792 | pr_warn("null_blk: LightNVM only supports 4k block size\n"); | ||
| 793 | pr_warn("null_blk: defaults block size to 4k\n"); | ||
| 794 | bs = 4096; | ||
| 795 | } | ||
| 796 | |||
| 797 | if (use_lightnvm && queue_mode != NULL_Q_MQ) { | ||
| 798 | pr_warn("null_blk: LightNVM only supported for blk-mq\n"); | ||
| 799 | pr_warn("null_blk: defaults queue mode to blk-mq\n"); | ||
| 800 | queue_mode = NULL_Q_MQ; | ||
| 801 | } | ||
| 802 | |||
| 628 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { | 803 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
| 629 | if (submit_queues < nr_online_nodes) { | 804 | if (submit_queues < nr_online_nodes) { |
| 630 | pr_warn("null_blk: submit_queues param is set to %u.", | 805 | pr_warn("null_blk: submit_queues param is set to %u.", |
| @@ -655,15 +830,27 @@ static int __init null_init(void) | |||
| 655 | if (null_major < 0) | 830 | if (null_major < 0) |
| 656 | return null_major; | 831 | return null_major; |
| 657 | 832 | ||
| 833 | if (use_lightnvm) { | ||
| 834 | ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), | ||
| 835 | 0, 0, NULL); | ||
| 836 | if (!ppa_cache) { | ||
| 837 | pr_err("null_blk: unable to create ppa cache\n"); | ||
| 838 | return -ENOMEM; | ||
| 839 | } | ||
| 840 | } | ||
| 841 | |||
| 658 | for (i = 0; i < nr_devices; i++) { | 842 | for (i = 0; i < nr_devices; i++) { |
| 659 | if (null_add_dev()) { | 843 | if (null_add_dev()) { |
| 660 | unregister_blkdev(null_major, "nullb"); | 844 | unregister_blkdev(null_major, "nullb"); |
| 661 | return -EINVAL; | 845 | goto err_ppa; |
| 662 | } | 846 | } |
| 663 | } | 847 | } |
| 664 | 848 | ||
| 665 | pr_info("null: module loaded\n"); | 849 | pr_info("null: module loaded\n"); |
| 666 | return 0; | 850 | return 0; |
| 851 | err_ppa: | ||
| 852 | kmem_cache_destroy(ppa_cache); | ||
| 853 | return -EINVAL; | ||
| 667 | } | 854 | } |
| 668 | 855 | ||
| 669 | static void __exit null_exit(void) | 856 | static void __exit null_exit(void) |
| @@ -678,6 +865,8 @@ static void __exit null_exit(void) | |||
| 678 | null_del_dev(nullb); | 865 | null_del_dev(nullb); |
| 679 | } | 866 | } |
| 680 | mutex_unlock(&lock); | 867 | mutex_unlock(&lock); |
| 868 | |||
| 869 | kmem_cache_destroy(ppa_cache); | ||
| 681 | } | 870 | } |
| 682 | 871 | ||
| 683 | module_init(null_init); | 872 | module_init(null_init); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 654f6f36a071..55fe9020459f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -412,18 +412,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
| 412 | return rv; | 412 | return rv; |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | static void start_check_enables(struct smi_info *smi_info) | 415 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
| 416 | { | ||
| 417 | smi_info->last_timeout_jiffies = jiffies; | ||
| 418 | mod_timer(&smi_info->si_timer, new_val); | ||
| 419 | smi_info->timer_running = true; | ||
| 420 | } | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Start a new message and (re)start the timer and thread. | ||
| 424 | */ | ||
| 425 | static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, | ||
| 426 | unsigned int size) | ||
| 427 | { | ||
| 428 | smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); | ||
| 429 | |||
| 430 | if (smi_info->thread) | ||
| 431 | wake_up_process(smi_info->thread); | ||
| 432 | |||
| 433 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); | ||
| 434 | } | ||
| 435 | |||
| 436 | static void start_check_enables(struct smi_info *smi_info, bool start_timer) | ||
| 416 | { | 437 | { |
| 417 | unsigned char msg[2]; | 438 | unsigned char msg[2]; |
| 418 | 439 | ||
| 419 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 440 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| 420 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 441 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
| 421 | 442 | ||
| 422 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 443 | if (start_timer) |
| 444 | start_new_msg(smi_info, msg, 2); | ||
| 445 | else | ||
| 446 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | ||
| 423 | smi_info->si_state = SI_CHECKING_ENABLES; | 447 | smi_info->si_state = SI_CHECKING_ENABLES; |
| 424 | } | 448 | } |
| 425 | 449 | ||
| 426 | static void start_clear_flags(struct smi_info *smi_info) | 450 | static void start_clear_flags(struct smi_info *smi_info, bool start_timer) |
| 427 | { | 451 | { |
| 428 | unsigned char msg[3]; | 452 | unsigned char msg[3]; |
| 429 | 453 | ||
| @@ -432,7 +456,10 @@ static void start_clear_flags(struct smi_info *smi_info) | |||
| 432 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; | 456 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
| 433 | msg[2] = WDT_PRE_TIMEOUT_INT; | 457 | msg[2] = WDT_PRE_TIMEOUT_INT; |
| 434 | 458 | ||
| 435 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | 459 | if (start_timer) |
| 460 | start_new_msg(smi_info, msg, 3); | ||
| 461 | else | ||
| 462 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | ||
| 436 | smi_info->si_state = SI_CLEARING_FLAGS; | 463 | smi_info->si_state = SI_CLEARING_FLAGS; |
| 437 | } | 464 | } |
| 438 | 465 | ||
| @@ -442,10 +469,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info) | |||
| 442 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; | 469 | smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; |
| 443 | smi_info->curr_msg->data_size = 2; | 470 | smi_info->curr_msg->data_size = 2; |
| 444 | 471 | ||
| 445 | smi_info->handlers->start_transaction( | 472 | start_new_msg(smi_info, smi_info->curr_msg->data, |
| 446 | smi_info->si_sm, | 473 | smi_info->curr_msg->data_size); |
| 447 | smi_info->curr_msg->data, | ||
| 448 | smi_info->curr_msg->data_size); | ||
| 449 | smi_info->si_state = SI_GETTING_MESSAGES; | 474 | smi_info->si_state = SI_GETTING_MESSAGES; |
| 450 | } | 475 | } |
| 451 | 476 | ||
| @@ -455,20 +480,11 @@ static void start_getting_events(struct smi_info *smi_info) | |||
| 455 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; | 480 | smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; |
| 456 | smi_info->curr_msg->data_size = 2; | 481 | smi_info->curr_msg->data_size = 2; |
| 457 | 482 | ||
| 458 | smi_info->handlers->start_transaction( | 483 | start_new_msg(smi_info, smi_info->curr_msg->data, |
| 459 | smi_info->si_sm, | 484 | smi_info->curr_msg->data_size); |
| 460 | smi_info->curr_msg->data, | ||
| 461 | smi_info->curr_msg->data_size); | ||
| 462 | smi_info->si_state = SI_GETTING_EVENTS; | 485 | smi_info->si_state = SI_GETTING_EVENTS; |
| 463 | } | 486 | } |
| 464 | 487 | ||
| 465 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | ||
| 466 | { | ||
| 467 | smi_info->last_timeout_jiffies = jiffies; | ||
| 468 | mod_timer(&smi_info->si_timer, new_val); | ||
| 469 | smi_info->timer_running = true; | ||
| 470 | } | ||
| 471 | |||
| 472 | /* | 488 | /* |
| 473 | * When we have a situtaion where we run out of memory and cannot | 489 | * When we have a situtaion where we run out of memory and cannot |
| 474 | * allocate messages, we just leave them in the BMC and run the system | 490 | * allocate messages, we just leave them in the BMC and run the system |
| @@ -478,11 +494,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | |||
| 478 | * Note that we cannot just use disable_irq(), since the interrupt may | 494 | * Note that we cannot just use disable_irq(), since the interrupt may |
| 479 | * be shared. | 495 | * be shared. |
| 480 | */ | 496 | */ |
| 481 | static inline bool disable_si_irq(struct smi_info *smi_info) | 497 | static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) |
| 482 | { | 498 | { |
| 483 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 499 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
| 484 | smi_info->interrupt_disabled = true; | 500 | smi_info->interrupt_disabled = true; |
| 485 | start_check_enables(smi_info); | 501 | start_check_enables(smi_info, start_timer); |
| 486 | return true; | 502 | return true; |
| 487 | } | 503 | } |
| 488 | return false; | 504 | return false; |
| @@ -492,7 +508,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) | |||
| 492 | { | 508 | { |
| 493 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { | 509 | if ((smi_info->irq) && (smi_info->interrupt_disabled)) { |
| 494 | smi_info->interrupt_disabled = false; | 510 | smi_info->interrupt_disabled = false; |
| 495 | start_check_enables(smi_info); | 511 | start_check_enables(smi_info, true); |
| 496 | return true; | 512 | return true; |
| 497 | } | 513 | } |
| 498 | return false; | 514 | return false; |
| @@ -510,7 +526,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) | |||
| 510 | 526 | ||
| 511 | msg = ipmi_alloc_smi_msg(); | 527 | msg = ipmi_alloc_smi_msg(); |
| 512 | if (!msg) { | 528 | if (!msg) { |
| 513 | if (!disable_si_irq(smi_info)) | 529 | if (!disable_si_irq(smi_info, true)) |
| 514 | smi_info->si_state = SI_NORMAL; | 530 | smi_info->si_state = SI_NORMAL; |
| 515 | } else if (enable_si_irq(smi_info)) { | 531 | } else if (enable_si_irq(smi_info)) { |
| 516 | ipmi_free_smi_msg(msg); | 532 | ipmi_free_smi_msg(msg); |
| @@ -526,7 +542,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
| 526 | /* Watchdog pre-timeout */ | 542 | /* Watchdog pre-timeout */ |
| 527 | smi_inc_stat(smi_info, watchdog_pretimeouts); | 543 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
| 528 | 544 | ||
| 529 | start_clear_flags(smi_info); | 545 | start_clear_flags(smi_info, true); |
| 530 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 546 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
| 531 | if (smi_info->intf) | 547 | if (smi_info->intf) |
| 532 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | 548 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
| @@ -879,8 +895,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 879 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 895 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| 880 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 896 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
| 881 | 897 | ||
| 882 | smi_info->handlers->start_transaction( | 898 | start_new_msg(smi_info, msg, 2); |
| 883 | smi_info->si_sm, msg, 2); | ||
| 884 | smi_info->si_state = SI_GETTING_FLAGS; | 899 | smi_info->si_state = SI_GETTING_FLAGS; |
| 885 | goto restart; | 900 | goto restart; |
| 886 | } | 901 | } |
| @@ -910,7 +925,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 910 | * disable and messages disabled. | 925 | * disable and messages disabled. |
| 911 | */ | 926 | */ |
| 912 | if (smi_info->supports_event_msg_buff || smi_info->irq) { | 927 | if (smi_info->supports_event_msg_buff || smi_info->irq) { |
| 913 | start_check_enables(smi_info); | 928 | start_check_enables(smi_info, true); |
| 914 | } else { | 929 | } else { |
| 915 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); | 930 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
| 916 | if (!smi_info->curr_msg) | 931 | if (!smi_info->curr_msg) |
| @@ -920,6 +935,13 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
| 920 | } | 935 | } |
| 921 | goto restart; | 936 | goto restart; |
| 922 | } | 937 | } |
| 938 | |||
| 939 | if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { | ||
| 940 | /* Ok it if fails, the timer will just go off. */ | ||
| 941 | if (del_timer(&smi_info->si_timer)) | ||
| 942 | smi_info->timer_running = false; | ||
| 943 | } | ||
| 944 | |||
| 923 | out: | 945 | out: |
| 924 | return si_sm_result; | 946 | return si_sm_result; |
| 925 | } | 947 | } |
| @@ -2560,6 +2582,7 @@ static const struct of_device_id of_ipmi_match[] = { | |||
| 2560 | .data = (void *)(unsigned long) SI_BT }, | 2582 | .data = (void *)(unsigned long) SI_BT }, |
| 2561 | {}, | 2583 | {}, |
| 2562 | }; | 2584 | }; |
| 2585 | MODULE_DEVICE_TABLE(of, of_ipmi_match); | ||
| 2563 | 2586 | ||
| 2564 | static int of_ipmi_probe(struct platform_device *dev) | 2587 | static int of_ipmi_probe(struct platform_device *dev) |
| 2565 | { | 2588 | { |
| @@ -2646,7 +2669,6 @@ static int of_ipmi_probe(struct platform_device *dev) | |||
| 2646 | } | 2669 | } |
| 2647 | return 0; | 2670 | return 0; |
| 2648 | } | 2671 | } |
| 2649 | MODULE_DEVICE_TABLE(of, of_ipmi_match); | ||
| 2650 | #else | 2672 | #else |
| 2651 | #define of_ipmi_match NULL | 2673 | #define of_ipmi_match NULL |
| 2652 | static int of_ipmi_probe(struct platform_device *dev) | 2674 | static int of_ipmi_probe(struct platform_device *dev) |
| @@ -3613,7 +3635,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
| 3613 | * Start clearing the flags before we enable interrupts or the | 3635 | * Start clearing the flags before we enable interrupts or the |
| 3614 | * timer to avoid racing with the timer. | 3636 | * timer to avoid racing with the timer. |
| 3615 | */ | 3637 | */ |
| 3616 | start_clear_flags(new_smi); | 3638 | start_clear_flags(new_smi, false); |
| 3617 | 3639 | ||
| 3618 | /* | 3640 | /* |
| 3619 | * IRQ is defined to be set when non-zero. req_events will | 3641 | * IRQ is defined to be set when non-zero. req_events will |
| @@ -3908,7 +3930,7 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
| 3908 | poll(to_clean); | 3930 | poll(to_clean); |
| 3909 | schedule_timeout_uninterruptible(1); | 3931 | schedule_timeout_uninterruptible(1); |
| 3910 | } | 3932 | } |
| 3911 | disable_si_irq(to_clean); | 3933 | disable_si_irq(to_clean, false); |
| 3912 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3934 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
| 3913 | poll(to_clean); | 3935 | poll(to_clean); |
| 3914 | schedule_timeout_uninterruptible(1); | 3936 | schedule_timeout_uninterruptible(1); |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 0ac3bd1a5497..096f0cef4da1 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
| @@ -153,6 +153,9 @@ static int timeout = 10; | |||
| 153 | /* The pre-timeout is disabled by default. */ | 153 | /* The pre-timeout is disabled by default. */ |
| 154 | static int pretimeout; | 154 | static int pretimeout; |
| 155 | 155 | ||
| 156 | /* Default timeout to set on panic */ | ||
| 157 | static int panic_wdt_timeout = 255; | ||
| 158 | |||
| 156 | /* Default action is to reset the board on a timeout. */ | 159 | /* Default action is to reset the board on a timeout. */ |
| 157 | static unsigned char action_val = WDOG_TIMEOUT_RESET; | 160 | static unsigned char action_val = WDOG_TIMEOUT_RESET; |
| 158 | 161 | ||
| @@ -293,6 +296,9 @@ MODULE_PARM_DESC(timeout, "Timeout value in seconds."); | |||
| 293 | module_param(pretimeout, timeout, 0644); | 296 | module_param(pretimeout, timeout, 0644); |
| 294 | MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); | 297 | MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds."); |
| 295 | 298 | ||
| 299 | module_param(panic_wdt_timeout, timeout, 0644); | ||
| 300 | MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds."); | ||
| 301 | |||
| 296 | module_param_cb(action, ¶m_ops_str, action_op, 0644); | 302 | module_param_cb(action, ¶m_ops_str, action_op, 0644); |
| 297 | MODULE_PARM_DESC(action, "Timeout action. One of: " | 303 | MODULE_PARM_DESC(action, "Timeout action. One of: " |
| 298 | "reset, none, power_cycle, power_off."); | 304 | "reset, none, power_cycle, power_off."); |
| @@ -1189,7 +1195,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
| 1189 | /* Make sure we do this only once. */ | 1195 | /* Make sure we do this only once. */ |
| 1190 | panic_event_handled = 1; | 1196 | panic_event_handled = 1; |
| 1191 | 1197 | ||
| 1192 | timeout = 255; | 1198 | timeout = panic_wdt_timeout; |
| 1193 | pretimeout = 0; | 1199 | pretimeout = 0; |
| 1194 | panic_halt_ipmi_set_timeout(); | 1200 | panic_halt_ipmi_set_timeout(); |
| 1195 | } | 1201 | } |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 71cfdf7c9708..2eb5f0efae90 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | menu "Clock Source drivers" | 1 | menu "Clock Source drivers" |
| 2 | depends on !ARCH_USES_GETTIMEOFFSET | ||
| 2 | 3 | ||
| 3 | config CLKSRC_OF | 4 | config CLKSRC_OF |
| 4 | bool | 5 | bool |
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/fsl_ftm_timer.c index 10202f1fdfd7..517e1c7624d4 100644 --- a/drivers/clocksource/fsl_ftm_timer.c +++ b/drivers/clocksource/fsl_ftm_timer.c | |||
| @@ -203,7 +203,7 @@ static int __init ftm_clockevent_init(unsigned long freq, int irq) | |||
| 203 | int err; | 203 | int err; |
| 204 | 204 | ||
| 205 | ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); | 205 | ftm_writel(0x00, priv->clkevt_base + FTM_CNTIN); |
| 206 | ftm_writel(~0UL, priv->clkevt_base + FTM_MOD); | 206 | ftm_writel(~0u, priv->clkevt_base + FTM_MOD); |
| 207 | 207 | ||
| 208 | ftm_reset_counter(priv->clkevt_base); | 208 | ftm_reset_counter(priv->clkevt_base); |
| 209 | 209 | ||
| @@ -230,7 +230,7 @@ static int __init ftm_clocksource_init(unsigned long freq) | |||
| 230 | int err; | 230 | int err; |
| 231 | 231 | ||
| 232 | ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); | 232 | ftm_writel(0x00, priv->clksrc_base + FTM_CNTIN); |
| 233 | ftm_writel(~0UL, priv->clksrc_base + FTM_MOD); | 233 | ftm_writel(~0u, priv->clksrc_base + FTM_MOD); |
| 234 | 234 | ||
| 235 | ftm_reset_counter(priv->clksrc_base); | 235 | ftm_reset_counter(priv->clksrc_base); |
| 236 | 236 | ||
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 1582c1c016b0..8014c2307332 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ | |||
| 84 | config ARM_MT8173_CPUFREQ | 84 | config ARM_MT8173_CPUFREQ |
| 85 | bool "Mediatek MT8173 CPUFreq support" | 85 | bool "Mediatek MT8173 CPUFreq support" |
| 86 | depends on ARCH_MEDIATEK && REGULATOR | 86 | depends on ARCH_MEDIATEK && REGULATOR |
| 87 | depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) | ||
| 87 | depends on !CPU_THERMAL || THERMAL=y | 88 | depends on !CPU_THERMAL || THERMAL=y |
| 88 | select PM_OPP | 89 | select PM_OPP |
| 89 | help | 90 | help |
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..c59bdcb83217 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
| @@ -5,7 +5,6 @@ | |||
| 5 | config X86_INTEL_PSTATE | 5 | config X86_INTEL_PSTATE |
| 6 | bool "Intel P state control" | 6 | bool "Intel P state control" |
| 7 | depends on X86 | 7 | depends on X86 |
| 8 | select ACPI_PROCESSOR if ACPI | ||
| 9 | help | 8 | help |
| 10 | This driver provides a P state for Intel core processors. | 9 | This driver provides a P state for Intel core processors. |
| 11 | The driver implements an internal governor and will become | 10 | The driver implements an internal governor and will become |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2e31d097def6..001a532e342e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -34,14 +34,10 @@ | |||
| 34 | #include <asm/cpu_device_id.h> | 34 | #include <asm/cpu_device_id.h> |
| 35 | #include <asm/cpufeature.h> | 35 | #include <asm/cpufeature.h> |
| 36 | 36 | ||
| 37 | #if IS_ENABLED(CONFIG_ACPI) | 37 | #define ATOM_RATIOS 0x66a |
| 38 | #include <acpi/processor.h> | 38 | #define ATOM_VIDS 0x66b |
| 39 | #endif | 39 | #define ATOM_TURBO_RATIOS 0x66c |
| 40 | 40 | #define ATOM_TURBO_VIDS 0x66d | |
| 41 | #define BYT_RATIOS 0x66a | ||
| 42 | #define BYT_VIDS 0x66b | ||
| 43 | #define BYT_TURBO_RATIOS 0x66c | ||
| 44 | #define BYT_TURBO_VIDS 0x66d | ||
| 45 | 41 | ||
| 46 | #define FRAC_BITS 8 | 42 | #define FRAC_BITS 8 |
| 47 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | 43 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) |
| @@ -117,9 +113,6 @@ struct cpudata { | |||
| 117 | u64 prev_mperf; | 113 | u64 prev_mperf; |
| 118 | u64 prev_tsc; | 114 | u64 prev_tsc; |
| 119 | struct sample sample; | 115 | struct sample sample; |
| 120 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 121 | struct acpi_processor_performance acpi_perf_data; | ||
| 122 | #endif | ||
| 123 | }; | 116 | }; |
| 124 | 117 | ||
| 125 | static struct cpudata **all_cpu_data; | 118 | static struct cpudata **all_cpu_data; |
| @@ -150,7 +143,6 @@ struct cpu_defaults { | |||
| 150 | static struct pstate_adjust_policy pid_params; | 143 | static struct pstate_adjust_policy pid_params; |
| 151 | static struct pstate_funcs pstate_funcs; | 144 | static struct pstate_funcs pstate_funcs; |
| 152 | static int hwp_active; | 145 | static int hwp_active; |
| 153 | static int no_acpi_perf; | ||
| 154 | 146 | ||
| 155 | struct perf_limits { | 147 | struct perf_limits { |
| 156 | int no_turbo; | 148 | int no_turbo; |
| @@ -163,8 +155,6 @@ struct perf_limits { | |||
| 163 | int max_sysfs_pct; | 155 | int max_sysfs_pct; |
| 164 | int min_policy_pct; | 156 | int min_policy_pct; |
| 165 | int min_sysfs_pct; | 157 | int min_sysfs_pct; |
| 166 | int max_perf_ctl; | ||
| 167 | int min_perf_ctl; | ||
| 168 | }; | 158 | }; |
| 169 | 159 | ||
| 170 | static struct perf_limits performance_limits = { | 160 | static struct perf_limits performance_limits = { |
| @@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = { | |||
| 191 | .max_sysfs_pct = 100, | 181 | .max_sysfs_pct = 100, |
| 192 | .min_policy_pct = 0, | 182 | .min_policy_pct = 0, |
| 193 | .min_sysfs_pct = 0, | 183 | .min_sysfs_pct = 0, |
| 194 | .max_perf_ctl = 0, | ||
| 195 | .min_perf_ctl = 0, | ||
| 196 | }; | 184 | }; |
| 197 | 185 | ||
| 198 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 186 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
| @@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits; | |||
| 201 | static struct perf_limits *limits = &powersave_limits; | 189 | static struct perf_limits *limits = &powersave_limits; |
| 202 | #endif | 190 | #endif |
| 203 | 191 | ||
| 204 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 205 | /* | ||
| 206 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
| 207 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
| 208 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
| 209 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
| 210 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
| 211 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
| 212 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
| 213 | * This function converts the _PSS control value to intel pstate driver format | ||
| 214 | * for comparison and assignment. | ||
| 215 | */ | ||
| 216 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
| 217 | { | ||
| 218 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
| 219 | } | ||
| 220 | |||
| 221 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
| 222 | { | ||
| 223 | struct cpudata *cpu; | ||
| 224 | int ret; | ||
| 225 | bool turbo_absent = false; | ||
| 226 | int max_pstate_index; | ||
| 227 | int min_pss_ctl, max_pss_ctl, turbo_pss_ctl; | ||
| 228 | int i; | ||
| 229 | |||
| 230 | cpu = all_cpu_data[policy->cpu]; | ||
| 231 | |||
| 232 | pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n", | ||
| 233 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
| 234 | cpu->pstate.turbo_pstate); | ||
| 235 | |||
| 236 | if (!cpu->acpi_perf_data.shared_cpu_map && | ||
| 237 | zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map, | ||
| 238 | GFP_KERNEL, cpu_to_node(policy->cpu))) { | ||
| 239 | return -ENOMEM; | ||
| 240 | } | ||
| 241 | |||
| 242 | ret = acpi_processor_register_performance(&cpu->acpi_perf_data, | ||
| 243 | policy->cpu); | ||
| 244 | if (ret) | ||
| 245 | return ret; | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Check if the control value in _PSS is for PERF_CTL MSR, which should | ||
| 249 | * guarantee that the states returned by it map to the states in our | ||
| 250 | * list directly. | ||
| 251 | */ | ||
| 252 | if (cpu->acpi_perf_data.control_register.space_id != | ||
| 253 | ACPI_ADR_SPACE_FIXED_HARDWARE) | ||
| 254 | return -EIO; | ||
| 255 | |||
| 256 | pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu); | ||
| 257 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) | ||
| 258 | pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n", | ||
| 259 | (i == cpu->acpi_perf_data.state ? '*' : ' '), i, | ||
| 260 | (u32) cpu->acpi_perf_data.states[i].core_frequency, | ||
| 261 | (u32) cpu->acpi_perf_data.states[i].power, | ||
| 262 | (u32) cpu->acpi_perf_data.states[i].control); | ||
| 263 | |||
| 264 | /* | ||
| 265 | * If there is only one entry _PSS, simply ignore _PSS and continue as | ||
| 266 | * usual without taking _PSS into account | ||
| 267 | */ | ||
| 268 | if (cpu->acpi_perf_data.state_count < 2) | ||
| 269 | return 0; | ||
| 270 | |||
| 271 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | ||
| 272 | min_pss_ctl = convert_to_native_pstate_format(cpu, | ||
| 273 | cpu->acpi_perf_data.state_count - 1); | ||
| 274 | /* Check if there is a turbo freq in _PSS */ | ||
| 275 | if (turbo_pss_ctl <= cpu->pstate.max_pstate && | ||
| 276 | turbo_pss_ctl > cpu->pstate.min_pstate) { | ||
| 277 | pr_debug("intel_pstate: no turbo range exists in _PSS\n"); | ||
| 278 | limits->no_turbo = limits->turbo_disabled = 1; | ||
| 279 | cpu->pstate.turbo_pstate = cpu->pstate.max_pstate; | ||
| 280 | turbo_absent = true; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* Check if the max non turbo p state < Intel P state max */ | ||
| 284 | max_pstate_index = turbo_absent ? 0 : 1; | ||
| 285 | max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index); | ||
| 286 | if (max_pss_ctl < cpu->pstate.max_pstate && | ||
| 287 | max_pss_ctl > cpu->pstate.min_pstate) | ||
| 288 | cpu->pstate.max_pstate = max_pss_ctl; | ||
| 289 | |||
| 290 | /* check If min perf > Intel P State min */ | ||
| 291 | if (min_pss_ctl > cpu->pstate.min_pstate && | ||
| 292 | min_pss_ctl < cpu->pstate.max_pstate) { | ||
| 293 | cpu->pstate.min_pstate = min_pss_ctl; | ||
| 294 | policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling; | ||
| 295 | } | ||
| 296 | |||
| 297 | if (turbo_absent) | ||
| 298 | policy->cpuinfo.max_freq = cpu->pstate.max_pstate * | ||
| 299 | cpu->pstate.scaling; | ||
| 300 | else { | ||
| 301 | policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * | ||
| 302 | cpu->pstate.scaling; | ||
| 303 | /* | ||
| 304 | * The _PSS table doesn't contain whole turbo frequency range. | ||
| 305 | * This just contains +1 MHZ above the max non turbo frequency, | ||
| 306 | * with control value corresponding to max turbo ratio. But | ||
| 307 | * when cpufreq set policy is called, it will call with this | ||
| 308 | * max frequency, which will cause a reduced performance as | ||
| 309 | * this driver uses real max turbo frequency as the max | ||
| 310 | * frequeny. So correct this frequency in _PSS table to | ||
| 311 | * correct max turbo frequency based on the turbo ratio. | ||
| 312 | * Also need to convert to MHz as _PSS freq is in MHz. | ||
| 313 | */ | ||
| 314 | cpu->acpi_perf_data.states[0].core_frequency = | ||
| 315 | turbo_pss_ctl * 100; | ||
| 316 | } | ||
| 317 | |||
| 318 | pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n", | ||
| 319 | cpu->pstate.min_pstate, cpu->pstate.max_pstate, | ||
| 320 | cpu->pstate.turbo_pstate); | ||
| 321 | pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n", | ||
| 322 | policy->cpuinfo.max_freq, policy->cpuinfo.min_freq); | ||
| 323 | |||
| 324 | return 0; | ||
| 325 | } | ||
| 326 | |||
| 327 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
| 328 | { | ||
| 329 | struct cpudata *cpu; | ||
| 330 | |||
| 331 | if (!no_acpi_perf) | ||
| 332 | return 0; | ||
| 333 | |||
| 334 | cpu = all_cpu_data[policy->cpu]; | ||
| 335 | acpi_processor_unregister_performance(policy->cpu); | ||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | |||
| 339 | #else | ||
| 340 | static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy) | ||
| 341 | { | ||
| 342 | return 0; | ||
| 343 | } | ||
| 344 | |||
| 345 | static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy) | ||
| 346 | { | ||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | #endif | ||
| 350 | |||
| 351 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 192 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
| 352 | int deadband, int integral) { | 193 | int deadband, int integral) { |
| 353 | pid->setpoint = setpoint; | 194 | pid->setpoint = setpoint; |
| @@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) | |||
| 687 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); | 528 | wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1); |
| 688 | } | 529 | } |
| 689 | 530 | ||
| 690 | static int byt_get_min_pstate(void) | 531 | static int atom_get_min_pstate(void) |
| 691 | { | 532 | { |
| 692 | u64 value; | 533 | u64 value; |
| 693 | 534 | ||
| 694 | rdmsrl(BYT_RATIOS, value); | 535 | rdmsrl(ATOM_RATIOS, value); |
| 695 | return (value >> 8) & 0x7F; | 536 | return (value >> 8) & 0x7F; |
| 696 | } | 537 | } |
| 697 | 538 | ||
| 698 | static int byt_get_max_pstate(void) | 539 | static int atom_get_max_pstate(void) |
| 699 | { | 540 | { |
| 700 | u64 value; | 541 | u64 value; |
| 701 | 542 | ||
| 702 | rdmsrl(BYT_RATIOS, value); | 543 | rdmsrl(ATOM_RATIOS, value); |
| 703 | return (value >> 16) & 0x7F; | 544 | return (value >> 16) & 0x7F; |
| 704 | } | 545 | } |
| 705 | 546 | ||
| 706 | static int byt_get_turbo_pstate(void) | 547 | static int atom_get_turbo_pstate(void) |
| 707 | { | 548 | { |
| 708 | u64 value; | 549 | u64 value; |
| 709 | 550 | ||
| 710 | rdmsrl(BYT_TURBO_RATIOS, value); | 551 | rdmsrl(ATOM_TURBO_RATIOS, value); |
| 711 | return value & 0x7F; | 552 | return value & 0x7F; |
| 712 | } | 553 | } |
| 713 | 554 | ||
| 714 | static void byt_set_pstate(struct cpudata *cpudata, int pstate) | 555 | static void atom_set_pstate(struct cpudata *cpudata, int pstate) |
| 715 | { | 556 | { |
| 716 | u64 val; | 557 | u64 val; |
| 717 | int32_t vid_fp; | 558 | int32_t vid_fp; |
| @@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) | |||
| 736 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); | 577 | wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); |
| 737 | } | 578 | } |
| 738 | 579 | ||
| 739 | #define BYT_BCLK_FREQS 5 | 580 | static int silvermont_get_scaling(void) |
| 740 | static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; | ||
| 741 | |||
| 742 | static int byt_get_scaling(void) | ||
| 743 | { | 581 | { |
| 744 | u64 value; | 582 | u64 value; |
| 745 | int i; | 583 | int i; |
| 584 | /* Defined in Table 35-6 from SDM (Sept 2015) */ | ||
| 585 | static int silvermont_freq_table[] = { | ||
| 586 | 83300, 100000, 133300, 116700, 80000}; | ||
| 746 | 587 | ||
| 747 | rdmsrl(MSR_FSB_FREQ, value); | 588 | rdmsrl(MSR_FSB_FREQ, value); |
| 748 | i = value & 0x3; | 589 | i = value & 0x7; |
| 590 | WARN_ON(i > 4); | ||
| 749 | 591 | ||
| 750 | BUG_ON(i > BYT_BCLK_FREQS); | 592 | return silvermont_freq_table[i]; |
| 593 | } | ||
| 751 | 594 | ||
| 752 | return byt_freq_table[i] * 100; | 595 | static int airmont_get_scaling(void) |
| 596 | { | ||
| 597 | u64 value; | ||
| 598 | int i; | ||
| 599 | /* Defined in Table 35-10 from SDM (Sept 2015) */ | ||
| 600 | static int airmont_freq_table[] = { | ||
| 601 | 83300, 100000, 133300, 116700, 80000, | ||
| 602 | 93300, 90000, 88900, 87500}; | ||
| 603 | |||
| 604 | rdmsrl(MSR_FSB_FREQ, value); | ||
| 605 | i = value & 0xF; | ||
| 606 | WARN_ON(i > 8); | ||
| 607 | |||
| 608 | return airmont_freq_table[i]; | ||
| 753 | } | 609 | } |
| 754 | 610 | ||
| 755 | static void byt_get_vid(struct cpudata *cpudata) | 611 | static void atom_get_vid(struct cpudata *cpudata) |
| 756 | { | 612 | { |
| 757 | u64 value; | 613 | u64 value; |
| 758 | 614 | ||
| 759 | rdmsrl(BYT_VIDS, value); | 615 | rdmsrl(ATOM_VIDS, value); |
| 760 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); | 616 | cpudata->vid.min = int_tofp((value >> 8) & 0x7f); |
| 761 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); | 617 | cpudata->vid.max = int_tofp((value >> 16) & 0x7f); |
| 762 | cpudata->vid.ratio = div_fp( | 618 | cpudata->vid.ratio = div_fp( |
| @@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata) | |||
| 764 | int_tofp(cpudata->pstate.max_pstate - | 620 | int_tofp(cpudata->pstate.max_pstate - |
| 765 | cpudata->pstate.min_pstate)); | 621 | cpudata->pstate.min_pstate)); |
| 766 | 622 | ||
| 767 | rdmsrl(BYT_TURBO_VIDS, value); | 623 | rdmsrl(ATOM_TURBO_VIDS, value); |
| 768 | cpudata->vid.turbo = value & 0x7f; | 624 | cpudata->vid.turbo = value & 0x7f; |
| 769 | } | 625 | } |
| 770 | 626 | ||
| @@ -885,7 +741,7 @@ static struct cpu_defaults core_params = { | |||
| 885 | }, | 741 | }, |
| 886 | }; | 742 | }; |
| 887 | 743 | ||
| 888 | static struct cpu_defaults byt_params = { | 744 | static struct cpu_defaults silvermont_params = { |
| 889 | .pid_policy = { | 745 | .pid_policy = { |
| 890 | .sample_rate_ms = 10, | 746 | .sample_rate_ms = 10, |
| 891 | .deadband = 0, | 747 | .deadband = 0, |
| @@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = { | |||
| 895 | .i_gain_pct = 4, | 751 | .i_gain_pct = 4, |
| 896 | }, | 752 | }, |
| 897 | .funcs = { | 753 | .funcs = { |
| 898 | .get_max = byt_get_max_pstate, | 754 | .get_max = atom_get_max_pstate, |
| 899 | .get_max_physical = byt_get_max_pstate, | 755 | .get_max_physical = atom_get_max_pstate, |
| 900 | .get_min = byt_get_min_pstate, | 756 | .get_min = atom_get_min_pstate, |
| 901 | .get_turbo = byt_get_turbo_pstate, | 757 | .get_turbo = atom_get_turbo_pstate, |
| 902 | .set = byt_set_pstate, | 758 | .set = atom_set_pstate, |
| 903 | .get_scaling = byt_get_scaling, | 759 | .get_scaling = silvermont_get_scaling, |
| 904 | .get_vid = byt_get_vid, | 760 | .get_vid = atom_get_vid, |
| 761 | }, | ||
| 762 | }; | ||
| 763 | |||
| 764 | static struct cpu_defaults airmont_params = { | ||
| 765 | .pid_policy = { | ||
| 766 | .sample_rate_ms = 10, | ||
| 767 | .deadband = 0, | ||
| 768 | .setpoint = 60, | ||
| 769 | .p_gain_pct = 14, | ||
| 770 | .d_gain_pct = 0, | ||
| 771 | .i_gain_pct = 4, | ||
| 772 | }, | ||
| 773 | .funcs = { | ||
| 774 | .get_max = atom_get_max_pstate, | ||
| 775 | .get_max_physical = atom_get_max_pstate, | ||
| 776 | .get_min = atom_get_min_pstate, | ||
| 777 | .get_turbo = atom_get_turbo_pstate, | ||
| 778 | .set = atom_set_pstate, | ||
| 779 | .get_scaling = airmont_get_scaling, | ||
| 780 | .get_vid = atom_get_vid, | ||
| 905 | }, | 781 | }, |
| 906 | }; | 782 | }; |
| 907 | 783 | ||
| @@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
| 938 | * policy, or by cpu specific default values determined through | 814 | * policy, or by cpu specific default values determined through |
| 939 | * experimentation. | 815 | * experimentation. |
| 940 | */ | 816 | */ |
| 941 | if (limits->max_perf_ctl && limits->max_sysfs_pct >= | 817 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf)); |
| 942 | limits->max_policy_pct) { | 818 | *max = clamp_t(int, max_perf_adj, |
| 943 | *max = limits->max_perf_ctl; | 819 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); |
| 944 | } else { | ||
| 945 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), | ||
| 946 | limits->max_perf)); | ||
| 947 | *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate, | ||
| 948 | cpu->pstate.turbo_pstate); | ||
| 949 | } | ||
| 950 | 820 | ||
| 951 | if (limits->min_perf_ctl) { | 821 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf)); |
| 952 | *min = limits->min_perf_ctl; | 822 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); |
| 953 | } else { | ||
| 954 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), | ||
| 955 | limits->min_perf)); | ||
| 956 | *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf); | ||
| 957 | } | ||
| 958 | } | 823 | } |
| 959 | 824 | ||
| 960 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) | 825 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force) |
| @@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data) | |||
| 1153 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | 1018 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { |
| 1154 | ICPU(0x2a, core_params), | 1019 | ICPU(0x2a, core_params), |
| 1155 | ICPU(0x2d, core_params), | 1020 | ICPU(0x2d, core_params), |
| 1156 | ICPU(0x37, byt_params), | 1021 | ICPU(0x37, silvermont_params), |
| 1157 | ICPU(0x3a, core_params), | 1022 | ICPU(0x3a, core_params), |
| 1158 | ICPU(0x3c, core_params), | 1023 | ICPU(0x3c, core_params), |
| 1159 | ICPU(0x3d, core_params), | 1024 | ICPU(0x3d, core_params), |
| @@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | |||
| 1162 | ICPU(0x45, core_params), | 1027 | ICPU(0x45, core_params), |
| 1163 | ICPU(0x46, core_params), | 1028 | ICPU(0x46, core_params), |
| 1164 | ICPU(0x47, core_params), | 1029 | ICPU(0x47, core_params), |
| 1165 | ICPU(0x4c, byt_params), | 1030 | ICPU(0x4c, airmont_params), |
| 1166 | ICPU(0x4e, core_params), | 1031 | ICPU(0x4e, core_params), |
| 1167 | ICPU(0x4f, core_params), | 1032 | ICPU(0x4f, core_params), |
| 1168 | ICPU(0x5e, core_params), | 1033 | ICPU(0x5e, core_params), |
| @@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
| 1229 | 1094 | ||
| 1230 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 1095 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
| 1231 | { | 1096 | { |
| 1232 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 1233 | struct cpudata *cpu; | ||
| 1234 | int i; | ||
| 1235 | #endif | ||
| 1236 | pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__, | ||
| 1237 | policy->cpuinfo.max_freq, policy->max); | ||
| 1238 | if (!policy->cpuinfo.max_freq) | 1097 | if (!policy->cpuinfo.max_freq) |
| 1239 | return -ENODEV; | 1098 | return -ENODEV; |
| 1240 | 1099 | ||
| @@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1270 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), | 1129 | limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), |
| 1271 | int_tofp(100)); | 1130 | int_tofp(100)); |
| 1272 | 1131 | ||
| 1273 | #if IS_ENABLED(CONFIG_ACPI) | ||
| 1274 | cpu = all_cpu_data[policy->cpu]; | ||
| 1275 | for (i = 0; i < cpu->acpi_perf_data.state_count; i++) { | ||
| 1276 | int control; | ||
| 1277 | |||
| 1278 | control = convert_to_native_pstate_format(cpu, i); | ||
| 1279 | if (control * cpu->pstate.scaling == policy->max) | ||
| 1280 | limits->max_perf_ctl = control; | ||
| 1281 | if (control * cpu->pstate.scaling == policy->min) | ||
| 1282 | limits->min_perf_ctl = control; | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n", | ||
| 1286 | policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl, | ||
| 1287 | limits->max_perf_ctl); | ||
| 1288 | #endif | ||
| 1289 | |||
| 1290 | if (hwp_active) | 1132 | if (hwp_active) |
| 1291 | intel_pstate_hwp_set(); | 1133 | intel_pstate_hwp_set(); |
| 1292 | 1134 | ||
| @@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1341 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1183 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1342 | policy->cpuinfo.max_freq = | 1184 | policy->cpuinfo.max_freq = |
| 1343 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1185 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; |
| 1344 | if (!no_acpi_perf) | ||
| 1345 | intel_pstate_init_perf_limits(policy); | ||
| 1346 | /* | ||
| 1347 | * If there is no acpi perf data or error, we ignore and use Intel P | ||
| 1348 | * state calculated limits, So this is not fatal error. | ||
| 1349 | */ | ||
| 1350 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1186 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1351 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1187 | cpumask_set_cpu(policy->cpu, policy->cpus); |
| 1352 | 1188 | ||
| 1353 | return 0; | 1189 | return 0; |
| 1354 | } | 1190 | } |
| 1355 | 1191 | ||
| 1356 | static int intel_pstate_cpu_exit(struct cpufreq_policy *policy) | ||
| 1357 | { | ||
| 1358 | return intel_pstate_exit_perf_limits(policy); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | static struct cpufreq_driver intel_pstate_driver = { | 1192 | static struct cpufreq_driver intel_pstate_driver = { |
| 1362 | .flags = CPUFREQ_CONST_LOOPS, | 1193 | .flags = CPUFREQ_CONST_LOOPS, |
| 1363 | .verify = intel_pstate_verify_policy, | 1194 | .verify = intel_pstate_verify_policy, |
| 1364 | .setpolicy = intel_pstate_set_policy, | 1195 | .setpolicy = intel_pstate_set_policy, |
| 1365 | .get = intel_pstate_get, | 1196 | .get = intel_pstate_get, |
| 1366 | .init = intel_pstate_cpu_init, | 1197 | .init = intel_pstate_cpu_init, |
| 1367 | .exit = intel_pstate_cpu_exit, | ||
| 1368 | .stop_cpu = intel_pstate_stop_cpu, | 1198 | .stop_cpu = intel_pstate_stop_cpu, |
| 1369 | .name = "intel_pstate", | 1199 | .name = "intel_pstate", |
| 1370 | }; | 1200 | }; |
| @@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs) | |||
| 1406 | } | 1236 | } |
| 1407 | 1237 | ||
| 1408 | #if IS_ENABLED(CONFIG_ACPI) | 1238 | #if IS_ENABLED(CONFIG_ACPI) |
| 1239 | #include <acpi/processor.h> | ||
| 1409 | 1240 | ||
| 1410 | static bool intel_pstate_no_acpi_pss(void) | 1241 | static bool intel_pstate_no_acpi_pss(void) |
| 1411 | { | 1242 | { |
| @@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str) | |||
| 1601 | force_load = 1; | 1432 | force_load = 1; |
| 1602 | if (!strcmp(str, "hwp_only")) | 1433 | if (!strcmp(str, "hwp_only")) |
| 1603 | hwp_only = 1; | 1434 | hwp_only = 1; |
| 1604 | if (!strcmp(str, "no_acpi")) | ||
| 1605 | no_acpi_perf = 1; | ||
| 1606 | |||
| 1607 | return 0; | 1435 | return 0; |
| 1608 | } | 1436 | } |
| 1609 | early_param("intel_pstate", intel_pstate_setup); | 1437 | early_param("intel_pstate", intel_pstate_setup); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 4e55239c7a30..53d22eb73b56 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, | |||
| 729 | return NULL; | 729 | return NULL; |
| 730 | 730 | ||
| 731 | dev_info(chan2dev(chan), | 731 | dev_info(chan2dev(chan), |
| 732 | "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", | 732 | "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
| 733 | __func__, xt->src_start, xt->dst_start, xt->numf, | 733 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
| 734 | xt->frame_size, flags); | 734 | xt->frame_size, flags); |
| 735 | 735 | ||
| 736 | /* | 736 | /* |
| @@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 824 | u32 ctrla; | 824 | u32 ctrla; |
| 825 | u32 ctrlb; | 825 | u32 ctrlb; |
| 826 | 826 | ||
| 827 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", | 827 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", |
| 828 | dest, src, len, flags); | 828 | &dest, &src, len, flags); |
| 829 | 829 | ||
| 830 | if (unlikely(!len)) { | 830 | if (unlikely(!len)) { |
| 831 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 831 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
| @@ -938,8 +938,8 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
| 938 | void __iomem *vaddr; | 938 | void __iomem *vaddr; |
| 939 | dma_addr_t paddr; | 939 | dma_addr_t paddr; |
| 940 | 940 | ||
| 941 | dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, | 941 | dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, |
| 942 | dest, value, len, flags); | 942 | &dest, value, len, flags); |
| 943 | 943 | ||
| 944 | if (unlikely(!len)) { | 944 | if (unlikely(!len)) { |
| 945 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); | 945 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); |
| @@ -1022,8 +1022,8 @@ atc_prep_dma_memset_sg(struct dma_chan *chan, | |||
| 1022 | dma_addr_t dest = sg_dma_address(sg); | 1022 | dma_addr_t dest = sg_dma_address(sg); |
| 1023 | size_t len = sg_dma_len(sg); | 1023 | size_t len = sg_dma_len(sg); |
| 1024 | 1024 | ||
| 1025 | dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n", | 1025 | dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n", |
| 1026 | __func__, dest, len); | 1026 | __func__, &dest, len); |
| 1027 | 1027 | ||
| 1028 | if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { | 1028 | if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { |
| 1029 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n", | 1029 | dev_err(chan2dev(chan), "%s: buffer is not aligned\n", |
| @@ -1439,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
| 1439 | unsigned int periods = buf_len / period_len; | 1439 | unsigned int periods = buf_len / period_len; |
| 1440 | unsigned int i; | 1440 | unsigned int i; |
| 1441 | 1441 | ||
| 1442 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | 1442 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n", |
| 1443 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", | 1443 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
| 1444 | buf_addr, | 1444 | &buf_addr, |
| 1445 | periods, buf_len, period_len); | 1445 | periods, buf_len, period_len); |
| 1446 | 1446 | ||
| 1447 | if (unlikely(!atslave || !buf_len || !period_len)) { | 1447 | if (unlikely(!atslave || !buf_len || !period_len)) { |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index d1cfc8c876f9..7f58f06157f6 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
| @@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} | |||
| 385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) | 385 | static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) |
| 386 | { | 386 | { |
| 387 | dev_crit(chan2dev(&atchan->chan_common), | 387 | dev_crit(chan2dev(&atchan->chan_common), |
| 388 | " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", | 388 | " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", |
| 389 | lli->saddr, lli->daddr, | 389 | &lli->saddr, &lli->daddr, |
| 390 | lli->ctrla, lli->ctrlb, lli->dscr); | 390 | lli->ctrla, lli->ctrlb, &lli->dscr); |
| 391 | } | 391 | } |
| 392 | 392 | ||
| 393 | 393 | ||
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b5e132d4bae5..7f039de143f0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, | |||
| 920 | desc->lld.mbr_cfg = chan_cc; | 920 | desc->lld.mbr_cfg = chan_cc; |
| 921 | 921 | ||
| 922 | dev_dbg(chan2dev(chan), | 922 | dev_dbg(chan2dev(chan), |
| 923 | "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | 923 | "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
| 924 | __func__, desc->lld.mbr_sa, desc->lld.mbr_da, | 924 | __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, |
| 925 | desc->lld.mbr_ubc, desc->lld.mbr_cfg); | 925 | desc->lld.mbr_ubc, desc->lld.mbr_cfg); |
| 926 | 926 | ||
| 927 | /* Chain lld. */ | 927 | /* Chain lld. */ |
| @@ -953,8 +953,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
| 953 | if ((xt->numf > 1) && (xt->frame_size > 1)) | 953 | if ((xt->numf > 1) && (xt->frame_size > 1)) |
| 954 | return NULL; | 954 | return NULL; |
| 955 | 955 | ||
| 956 | dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", | 956 | dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
| 957 | __func__, xt->src_start, xt->dst_start, xt->numf, | 957 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
| 958 | xt->frame_size, flags); | 958 | xt->frame_size, flags); |
| 959 | 959 | ||
| 960 | src_addr = xt->src_start; | 960 | src_addr = xt->src_start; |
| @@ -1179,8 +1179,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, | |||
| 1179 | desc->lld.mbr_cfg = chan_cc; | 1179 | desc->lld.mbr_cfg = chan_cc; |
| 1180 | 1180 | ||
| 1181 | dev_dbg(chan2dev(chan), | 1181 | dev_dbg(chan2dev(chan), |
| 1182 | "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", | 1182 | "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", |
| 1183 | __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, | 1183 | __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, |
| 1184 | desc->lld.mbr_cfg); | 1184 | desc->lld.mbr_cfg); |
| 1185 | 1185 | ||
| 1186 | return desc; | 1186 | return desc; |
| @@ -1193,8 +1193,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, | |||
| 1193 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); | 1193 | struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); |
| 1194 | struct at_xdmac_desc *desc; | 1194 | struct at_xdmac_desc *desc; |
| 1195 | 1195 | ||
| 1196 | dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", | 1196 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
| 1197 | __func__, dest, len, value, flags); | 1197 | __func__, &dest, len, value, flags); |
| 1198 | 1198 | ||
| 1199 | if (unlikely(!len)) | 1199 | if (unlikely(!len)) |
| 1200 | return NULL; | 1200 | return NULL; |
| @@ -1229,8 +1229,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1229 | 1229 | ||
| 1230 | /* Prepare descriptors. */ | 1230 | /* Prepare descriptors. */ |
| 1231 | for_each_sg(sgl, sg, sg_len, i) { | 1231 | for_each_sg(sgl, sg, sg_len, i) { |
| 1232 | dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", | 1232 | dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", |
| 1233 | __func__, sg_dma_address(sg), sg_dma_len(sg), | 1233 | __func__, &sg_dma_address(sg), sg_dma_len(sg), |
| 1234 | value, flags); | 1234 | value, flags); |
| 1235 | desc = at_xdmac_memset_create_desc(chan, atchan, | 1235 | desc = at_xdmac_memset_create_desc(chan, atchan, |
| 1236 | sg_dma_address(sg), | 1236 | sg_dma_address(sg), |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 6b03e4e84e6b..0675e268d577 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -107,7 +107,7 @@ | |||
| 107 | 107 | ||
| 108 | /* CCCFG register */ | 108 | /* CCCFG register */ |
| 109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | 109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ |
| 110 | #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ | 110 | #define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ |
| 111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ | 111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ |
| 112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | 112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ |
| 113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | 113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ |
| @@ -1565,7 +1565,7 @@ static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) | |||
| 1565 | struct platform_device *tc_pdev; | 1565 | struct platform_device *tc_pdev; |
| 1566 | int ret; | 1566 | int ret; |
| 1567 | 1567 | ||
| 1568 | if (!tc) | 1568 | if (!IS_ENABLED(CONFIG_OF) || !tc) |
| 1569 | return; | 1569 | return; |
| 1570 | 1570 | ||
| 1571 | tc_pdev = of_find_device_by_node(tc->node); | 1571 | tc_pdev = of_find_device_by_node(tc->node); |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 7058d58ba588..0f6fd42f55ca 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -1462,7 +1462,7 @@ err_firmware: | |||
| 1462 | 1462 | ||
| 1463 | #define EVENT_REMAP_CELLS 3 | 1463 | #define EVENT_REMAP_CELLS 3 |
| 1464 | 1464 | ||
| 1465 | static int __init sdma_event_remap(struct sdma_engine *sdma) | 1465 | static int sdma_event_remap(struct sdma_engine *sdma) |
| 1466 | { | 1466 | { |
| 1467 | struct device_node *np = sdma->dev->of_node; | 1467 | struct device_node *np = sdma->dev->of_node; |
| 1468 | struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); | 1468 | struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index ebd8a5f398b0..f1bcc2a163b3 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
| @@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev) | |||
| 679 | struct usb_dmac *dmac = dev_get_drvdata(dev); | 679 | struct usb_dmac *dmac = dev_get_drvdata(dev); |
| 680 | int i; | 680 | int i; |
| 681 | 681 | ||
| 682 | for (i = 0; i < dmac->n_channels; ++i) | 682 | for (i = 0; i < dmac->n_channels; ++i) { |
| 683 | if (!dmac->channels[i].iomem) | ||
| 684 | break; | ||
| 683 | usb_dmac_chan_halt(&dmac->channels[i]); | 685 | usb_dmac_chan_halt(&dmac->channels[i]); |
| 686 | } | ||
| 684 | 687 | ||
| 685 | return 0; | 688 | return 0; |
| 686 | } | 689 | } |
| @@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev) | |||
| 799 | ret = pm_runtime_get_sync(&pdev->dev); | 802 | ret = pm_runtime_get_sync(&pdev->dev); |
| 800 | if (ret < 0) { | 803 | if (ret < 0) { |
| 801 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | 804 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); |
| 802 | return ret; | 805 | goto error_pm; |
| 803 | } | 806 | } |
| 804 | 807 | ||
| 805 | ret = usb_dmac_init(dmac); | 808 | ret = usb_dmac_init(dmac); |
| 806 | pm_runtime_put(&pdev->dev); | ||
| 807 | 809 | ||
| 808 | if (ret) { | 810 | if (ret) { |
| 809 | dev_err(&pdev->dev, "failed to reset device\n"); | 811 | dev_err(&pdev->dev, "failed to reset device\n"); |
| @@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev) | |||
| 851 | if (ret < 0) | 853 | if (ret < 0) |
| 852 | goto error; | 854 | goto error; |
| 853 | 855 | ||
| 856 | pm_runtime_put(&pdev->dev); | ||
| 854 | return 0; | 857 | return 0; |
| 855 | 858 | ||
| 856 | error: | 859 | error: |
| 857 | of_dma_controller_free(pdev->dev.of_node); | 860 | of_dma_controller_free(pdev->dev.of_node); |
| 861 | pm_runtime_put(&pdev->dev); | ||
| 862 | error_pm: | ||
| 858 | pm_runtime_disable(&pdev->dev); | 863 | pm_runtime_disable(&pdev->dev); |
| 859 | return ret; | 864 | return ret; |
| 860 | } | 865 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 615ce6d464fb..306f75700bf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -389,7 +389,6 @@ struct amdgpu_clock { | |||
| 389 | * Fences. | 389 | * Fences. |
| 390 | */ | 390 | */ |
| 391 | struct amdgpu_fence_driver { | 391 | struct amdgpu_fence_driver { |
| 392 | struct amdgpu_ring *ring; | ||
| 393 | uint64_t gpu_addr; | 392 | uint64_t gpu_addr; |
| 394 | volatile uint32_t *cpu_addr; | 393 | volatile uint32_t *cpu_addr; |
| 395 | /* sync_seq is protected by ring emission lock */ | 394 | /* sync_seq is protected by ring emission lock */ |
| @@ -398,7 +397,7 @@ struct amdgpu_fence_driver { | |||
| 398 | bool initialized; | 397 | bool initialized; |
| 399 | struct amdgpu_irq_src *irq_src; | 398 | struct amdgpu_irq_src *irq_src; |
| 400 | unsigned irq_type; | 399 | unsigned irq_type; |
| 401 | struct delayed_work lockup_work; | 400 | struct timer_list fallback_timer; |
| 402 | wait_queue_head_t fence_queue; | 401 | wait_queue_head_t fence_queue; |
| 403 | }; | 402 | }; |
| 404 | 403 | ||
| @@ -917,8 +916,8 @@ struct amdgpu_ring { | |||
| 917 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 | 916 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 |
| 918 | 917 | ||
| 919 | struct amdgpu_vm_pt { | 918 | struct amdgpu_vm_pt { |
| 920 | struct amdgpu_bo *bo; | 919 | struct amdgpu_bo *bo; |
| 921 | uint64_t addr; | 920 | uint64_t addr; |
| 922 | }; | 921 | }; |
| 923 | 922 | ||
| 924 | struct amdgpu_vm_id { | 923 | struct amdgpu_vm_id { |
| @@ -926,8 +925,6 @@ struct amdgpu_vm_id { | |||
| 926 | uint64_t pd_gpu_addr; | 925 | uint64_t pd_gpu_addr; |
| 927 | /* last flushed PD/PT update */ | 926 | /* last flushed PD/PT update */ |
| 928 | struct fence *flushed_updates; | 927 | struct fence *flushed_updates; |
| 929 | /* last use of vmid */ | ||
| 930 | struct fence *last_id_use; | ||
| 931 | }; | 928 | }; |
| 932 | 929 | ||
| 933 | struct amdgpu_vm { | 930 | struct amdgpu_vm { |
| @@ -957,24 +954,70 @@ struct amdgpu_vm { | |||
| 957 | 954 | ||
| 958 | /* for id and flush management per ring */ | 955 | /* for id and flush management per ring */ |
| 959 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; | 956 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; |
| 957 | /* for interval tree */ | ||
| 958 | spinlock_t it_lock; | ||
| 960 | }; | 959 | }; |
| 961 | 960 | ||
| 962 | struct amdgpu_vm_manager { | 961 | struct amdgpu_vm_manager { |
| 963 | struct fence *active[AMDGPU_NUM_VM]; | 962 | struct { |
| 964 | uint32_t max_pfn; | 963 | struct fence *active; |
| 964 | atomic_long_t owner; | ||
| 965 | } ids[AMDGPU_NUM_VM]; | ||
| 966 | |||
| 967 | uint32_t max_pfn; | ||
| 965 | /* number of VMIDs */ | 968 | /* number of VMIDs */ |
| 966 | unsigned nvm; | 969 | unsigned nvm; |
| 967 | /* vram base address for page table entry */ | 970 | /* vram base address for page table entry */ |
| 968 | u64 vram_base_offset; | 971 | u64 vram_base_offset; |
| 969 | /* is vm enabled? */ | 972 | /* is vm enabled? */ |
| 970 | bool enabled; | 973 | bool enabled; |
| 971 | /* for hw to save the PD addr on suspend/resume */ | ||
| 972 | uint32_t saved_table_addr[AMDGPU_NUM_VM]; | ||
| 973 | /* vm pte handling */ | 974 | /* vm pte handling */ |
| 974 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; | 975 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; |
| 975 | struct amdgpu_ring *vm_pte_funcs_ring; | 976 | struct amdgpu_ring *vm_pte_funcs_ring; |
| 976 | }; | 977 | }; |
| 977 | 978 | ||
| 979 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); | ||
| 980 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 981 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 982 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | ||
| 983 | struct amdgpu_vm *vm, | ||
| 984 | struct list_head *head); | ||
| 985 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
| 986 | struct amdgpu_sync *sync); | ||
| 987 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | ||
| 988 | struct amdgpu_vm *vm, | ||
| 989 | struct fence *updates); | ||
| 990 | void amdgpu_vm_fence(struct amdgpu_device *adev, | ||
| 991 | struct amdgpu_vm *vm, | ||
| 992 | struct fence *fence); | ||
| 993 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); | ||
| 994 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
| 995 | struct amdgpu_vm *vm); | ||
| 996 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
| 997 | struct amdgpu_vm *vm); | ||
| 998 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, | ||
| 999 | struct amdgpu_sync *sync); | ||
| 1000 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
| 1001 | struct amdgpu_bo_va *bo_va, | ||
| 1002 | struct ttm_mem_reg *mem); | ||
| 1003 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
| 1004 | struct amdgpu_bo *bo); | ||
| 1005 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
| 1006 | struct amdgpu_bo *bo); | ||
| 1007 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
| 1008 | struct amdgpu_vm *vm, | ||
| 1009 | struct amdgpu_bo *bo); | ||
| 1010 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
| 1011 | struct amdgpu_bo_va *bo_va, | ||
| 1012 | uint64_t addr, uint64_t offset, | ||
| 1013 | uint64_t size, uint32_t flags); | ||
| 1014 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
| 1015 | struct amdgpu_bo_va *bo_va, | ||
| 1016 | uint64_t addr); | ||
| 1017 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
| 1018 | struct amdgpu_bo_va *bo_va); | ||
| 1019 | int amdgpu_vm_free_job(struct amdgpu_job *job); | ||
| 1020 | |||
| 978 | /* | 1021 | /* |
| 979 | * context related structures | 1022 | * context related structures |
| 980 | */ | 1023 | */ |
| @@ -1211,6 +1254,7 @@ struct amdgpu_cs_parser { | |||
| 1211 | /* relocations */ | 1254 | /* relocations */ |
| 1212 | struct amdgpu_bo_list_entry *vm_bos; | 1255 | struct amdgpu_bo_list_entry *vm_bos; |
| 1213 | struct list_head validated; | 1256 | struct list_head validated; |
| 1257 | struct fence *fence; | ||
| 1214 | 1258 | ||
| 1215 | struct amdgpu_ib *ibs; | 1259 | struct amdgpu_ib *ibs; |
| 1216 | uint32_t num_ibs; | 1260 | uint32_t num_ibs; |
| @@ -1226,7 +1270,7 @@ struct amdgpu_job { | |||
| 1226 | struct amdgpu_device *adev; | 1270 | struct amdgpu_device *adev; |
| 1227 | struct amdgpu_ib *ibs; | 1271 | struct amdgpu_ib *ibs; |
| 1228 | uint32_t num_ibs; | 1272 | uint32_t num_ibs; |
| 1229 | struct mutex job_lock; | 1273 | void *owner; |
| 1230 | struct amdgpu_user_fence uf; | 1274 | struct amdgpu_user_fence uf; |
| 1231 | int (*free_job)(struct amdgpu_job *job); | 1275 | int (*free_job)(struct amdgpu_job *job); |
| 1232 | }; | 1276 | }; |
| @@ -2257,11 +2301,6 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev); | |||
| 2257 | bool amdgpu_card_posted(struct amdgpu_device *adev); | 2301 | bool amdgpu_card_posted(struct amdgpu_device *adev); |
| 2258 | void amdgpu_update_display_priority(struct amdgpu_device *adev); | 2302 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
| 2259 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); | 2303 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); |
| 2260 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
| 2261 | struct drm_file *filp, | ||
| 2262 | struct amdgpu_ctx *ctx, | ||
| 2263 | struct amdgpu_ib *ibs, | ||
| 2264 | uint32_t num_ibs); | ||
| 2265 | 2304 | ||
| 2266 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | 2305 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
| 2267 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 2306 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
| @@ -2319,49 +2358,6 @@ long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 2319 | unsigned long arg); | 2358 | unsigned long arg); |
| 2320 | 2359 | ||
| 2321 | /* | 2360 | /* |
| 2322 | * vm | ||
| 2323 | */ | ||
| 2324 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 2325 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | ||
| 2326 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | ||
| 2327 | struct amdgpu_vm *vm, | ||
| 2328 | struct list_head *head); | ||
| 2329 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | ||
| 2330 | struct amdgpu_sync *sync); | ||
| 2331 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | ||
| 2332 | struct amdgpu_vm *vm, | ||
| 2333 | struct fence *updates); | ||
| 2334 | void amdgpu_vm_fence(struct amdgpu_device *adev, | ||
| 2335 | struct amdgpu_vm *vm, | ||
| 2336 | struct amdgpu_fence *fence); | ||
| 2337 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); | ||
| 2338 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | ||
| 2339 | struct amdgpu_vm *vm); | ||
| 2340 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | ||
| 2341 | struct amdgpu_vm *vm); | ||
| 2342 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | ||
| 2343 | struct amdgpu_vm *vm, struct amdgpu_sync *sync); | ||
| 2344 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | ||
| 2345 | struct amdgpu_bo_va *bo_va, | ||
| 2346 | struct ttm_mem_reg *mem); | ||
| 2347 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | ||
| 2348 | struct amdgpu_bo *bo); | ||
| 2349 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | ||
| 2350 | struct amdgpu_bo *bo); | ||
| 2351 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | ||
| 2352 | struct amdgpu_vm *vm, | ||
| 2353 | struct amdgpu_bo *bo); | ||
| 2354 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | ||
| 2355 | struct amdgpu_bo_va *bo_va, | ||
| 2356 | uint64_t addr, uint64_t offset, | ||
| 2357 | uint64_t size, uint32_t flags); | ||
| 2358 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | ||
| 2359 | struct amdgpu_bo_va *bo_va, | ||
| 2360 | uint64_t addr); | ||
| 2361 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | ||
| 2362 | struct amdgpu_bo_va *bo_va); | ||
| 2363 | int amdgpu_vm_free_job(struct amdgpu_job *job); | ||
| 2364 | /* | ||
| 2365 | * functions used by amdgpu_encoder.c | 2361 | * functions used by amdgpu_encoder.c |
| 2366 | */ | 2362 | */ |
| 2367 | struct amdgpu_afmt_acr { | 2363 | struct amdgpu_afmt_acr { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dfc4d02c7a38..3afcf0237c25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -127,30 +127,6 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
| 127 | return 0; | 127 | return 0; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | ||
| 131 | struct drm_file *filp, | ||
| 132 | struct amdgpu_ctx *ctx, | ||
| 133 | struct amdgpu_ib *ibs, | ||
| 134 | uint32_t num_ibs) | ||
| 135 | { | ||
| 136 | struct amdgpu_cs_parser *parser; | ||
| 137 | int i; | ||
| 138 | |||
| 139 | parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL); | ||
| 140 | if (!parser) | ||
| 141 | return NULL; | ||
| 142 | |||
| 143 | parser->adev = adev; | ||
| 144 | parser->filp = filp; | ||
| 145 | parser->ctx = ctx; | ||
| 146 | parser->ibs = ibs; | ||
| 147 | parser->num_ibs = num_ibs; | ||
| 148 | for (i = 0; i < num_ibs; i++) | ||
| 149 | ibs[i].ctx = ctx; | ||
| 150 | |||
| 151 | return parser; | ||
| 152 | } | ||
| 153 | |||
| 154 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | 130 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) |
| 155 | { | 131 | { |
| 156 | union drm_amdgpu_cs *cs = data; | 132 | union drm_amdgpu_cs *cs = data; |
| @@ -463,8 +439,18 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, | |||
| 463 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; | 439 | return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; |
| 464 | } | 440 | } |
| 465 | 441 | ||
| 466 | static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff) | 442 | /** |
| 443 | * cs_parser_fini() - clean parser states | ||
| 444 | * @parser: parser structure holding parsing context. | ||
| 445 | * @error: error number | ||
| 446 | * | ||
| 447 | * If error is set than unvalidate buffer, otherwise just free memory | ||
| 448 | * used by parsing context. | ||
| 449 | **/ | ||
| 450 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | ||
| 467 | { | 451 | { |
| 452 | unsigned i; | ||
| 453 | |||
| 468 | if (!error) { | 454 | if (!error) { |
| 469 | /* Sort the buffer list from the smallest to largest buffer, | 455 | /* Sort the buffer list from the smallest to largest buffer, |
| 470 | * which affects the order of buffers in the LRU list. | 456 | * which affects the order of buffers in the LRU list. |
| @@ -479,17 +465,14 @@ static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int err | |||
| 479 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | 465 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); |
| 480 | 466 | ||
| 481 | ttm_eu_fence_buffer_objects(&parser->ticket, | 467 | ttm_eu_fence_buffer_objects(&parser->ticket, |
| 482 | &parser->validated, | 468 | &parser->validated, |
| 483 | &parser->ibs[parser->num_ibs-1].fence->base); | 469 | parser->fence); |
| 484 | } else if (backoff) { | 470 | } else if (backoff) { |
| 485 | ttm_eu_backoff_reservation(&parser->ticket, | 471 | ttm_eu_backoff_reservation(&parser->ticket, |
| 486 | &parser->validated); | 472 | &parser->validated); |
| 487 | } | 473 | } |
| 488 | } | 474 | fence_put(parser->fence); |
| 489 | 475 | ||
| 490 | static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | ||
| 491 | { | ||
| 492 | unsigned i; | ||
| 493 | if (parser->ctx) | 476 | if (parser->ctx) |
| 494 | amdgpu_ctx_put(parser->ctx); | 477 | amdgpu_ctx_put(parser->ctx); |
| 495 | if (parser->bo_list) | 478 | if (parser->bo_list) |
| @@ -499,31 +482,12 @@ static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser) | |||
| 499 | for (i = 0; i < parser->nchunks; i++) | 482 | for (i = 0; i < parser->nchunks; i++) |
| 500 | drm_free_large(parser->chunks[i].kdata); | 483 | drm_free_large(parser->chunks[i].kdata); |
| 501 | kfree(parser->chunks); | 484 | kfree(parser->chunks); |
| 502 | if (!amdgpu_enable_scheduler) | 485 | if (parser->ibs) |
| 503 | { | 486 | for (i = 0; i < parser->num_ibs; i++) |
| 504 | if (parser->ibs) | 487 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
| 505 | for (i = 0; i < parser->num_ibs; i++) | 488 | kfree(parser->ibs); |
| 506 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 489 | if (parser->uf.bo) |
| 507 | kfree(parser->ibs); | 490 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); |
| 508 | if (parser->uf.bo) | ||
| 509 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | ||
| 510 | } | ||
| 511 | |||
| 512 | kfree(parser); | ||
| 513 | } | ||
| 514 | |||
| 515 | /** | ||
| 516 | * cs_parser_fini() - clean parser states | ||
| 517 | * @parser: parser structure holding parsing context. | ||
| 518 | * @error: error number | ||
| 519 | * | ||
| 520 | * If error is set than unvalidate buffer, otherwise just free memory | ||
| 521 | * used by parsing context. | ||
| 522 | **/ | ||
| 523 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff) | ||
| 524 | { | ||
| 525 | amdgpu_cs_parser_fini_early(parser, error, backoff); | ||
| 526 | amdgpu_cs_parser_fini_late(parser); | ||
| 527 | } | 491 | } |
| 528 | 492 | ||
| 529 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 493 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
| @@ -610,15 +574,9 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, | |||
| 610 | } | 574 | } |
| 611 | 575 | ||
| 612 | r = amdgpu_bo_vm_update_pte(parser, vm); | 576 | r = amdgpu_bo_vm_update_pte(parser, vm); |
| 613 | if (r) { | 577 | if (!r) |
| 614 | goto out; | 578 | amdgpu_cs_sync_rings(parser); |
| 615 | } | ||
| 616 | amdgpu_cs_sync_rings(parser); | ||
| 617 | if (!amdgpu_enable_scheduler) | ||
| 618 | r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs, | ||
| 619 | parser->filp); | ||
| 620 | 579 | ||
| 621 | out: | ||
| 622 | return r; | 580 | return r; |
| 623 | } | 581 | } |
| 624 | 582 | ||
| @@ -828,36 +786,36 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 828 | union drm_amdgpu_cs *cs = data; | 786 | union drm_amdgpu_cs *cs = data; |
| 829 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 787 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 830 | struct amdgpu_vm *vm = &fpriv->vm; | 788 | struct amdgpu_vm *vm = &fpriv->vm; |
| 831 | struct amdgpu_cs_parser *parser; | 789 | struct amdgpu_cs_parser parser = {}; |
| 832 | bool reserved_buffers = false; | 790 | bool reserved_buffers = false; |
| 833 | int i, r; | 791 | int i, r; |
| 834 | 792 | ||
| 835 | if (!adev->accel_working) | 793 | if (!adev->accel_working) |
| 836 | return -EBUSY; | 794 | return -EBUSY; |
| 837 | 795 | ||
| 838 | parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); | 796 | parser.adev = adev; |
| 839 | if (!parser) | 797 | parser.filp = filp; |
| 840 | return -ENOMEM; | 798 | |
| 841 | r = amdgpu_cs_parser_init(parser, data); | 799 | r = amdgpu_cs_parser_init(&parser, data); |
| 842 | if (r) { | 800 | if (r) { |
| 843 | DRM_ERROR("Failed to initialize parser !\n"); | 801 | DRM_ERROR("Failed to initialize parser !\n"); |
| 844 | amdgpu_cs_parser_fini(parser, r, false); | 802 | amdgpu_cs_parser_fini(&parser, r, false); |
| 845 | r = amdgpu_cs_handle_lockup(adev, r); | 803 | r = amdgpu_cs_handle_lockup(adev, r); |
| 846 | return r; | 804 | return r; |
| 847 | } | 805 | } |
| 848 | mutex_lock(&vm->mutex); | 806 | mutex_lock(&vm->mutex); |
| 849 | r = amdgpu_cs_parser_relocs(parser); | 807 | r = amdgpu_cs_parser_relocs(&parser); |
| 850 | if (r == -ENOMEM) | 808 | if (r == -ENOMEM) |
| 851 | DRM_ERROR("Not enough memory for command submission!\n"); | 809 | DRM_ERROR("Not enough memory for command submission!\n"); |
| 852 | else if (r && r != -ERESTARTSYS) | 810 | else if (r && r != -ERESTARTSYS) |
| 853 | DRM_ERROR("Failed to process the buffer list %d!\n", r); | 811 | DRM_ERROR("Failed to process the buffer list %d!\n", r); |
| 854 | else if (!r) { | 812 | else if (!r) { |
| 855 | reserved_buffers = true; | 813 | reserved_buffers = true; |
| 856 | r = amdgpu_cs_ib_fill(adev, parser); | 814 | r = amdgpu_cs_ib_fill(adev, &parser); |
| 857 | } | 815 | } |
| 858 | 816 | ||
| 859 | if (!r) { | 817 | if (!r) { |
| 860 | r = amdgpu_cs_dependencies(adev, parser); | 818 | r = amdgpu_cs_dependencies(adev, &parser); |
| 861 | if (r) | 819 | if (r) |
| 862 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); | 820 | DRM_ERROR("Failed in the dependencies handling %d!\n", r); |
| 863 | } | 821 | } |
| @@ -865,62 +823,71 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) | |||
| 865 | if (r) | 823 | if (r) |
| 866 | goto out; | 824 | goto out; |
| 867 | 825 | ||
| 868 | for (i = 0; i < parser->num_ibs; i++) | 826 | for (i = 0; i < parser.num_ibs; i++) |
| 869 | trace_amdgpu_cs(parser, i); | 827 | trace_amdgpu_cs(&parser, i); |
| 870 | 828 | ||
| 871 | r = amdgpu_cs_ib_vm_chunk(adev, parser); | 829 | r = amdgpu_cs_ib_vm_chunk(adev, &parser); |
| 872 | if (r) | 830 | if (r) |
| 873 | goto out; | 831 | goto out; |
| 874 | 832 | ||
| 875 | if (amdgpu_enable_scheduler && parser->num_ibs) { | 833 | if (amdgpu_enable_scheduler && parser.num_ibs) { |
| 834 | struct amdgpu_ring * ring = parser.ibs->ring; | ||
| 835 | struct amd_sched_fence *fence; | ||
| 876 | struct amdgpu_job *job; | 836 | struct amdgpu_job *job; |
| 877 | struct amdgpu_ring * ring = parser->ibs->ring; | 837 | |
| 878 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); | 838 | job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
| 879 | if (!job) { | 839 | if (!job) { |
| 880 | r = -ENOMEM; | 840 | r = -ENOMEM; |
| 881 | goto out; | 841 | goto out; |
| 882 | } | 842 | } |
| 843 | |||
| 883 | job->base.sched = &ring->sched; | 844 | job->base.sched = &ring->sched; |
| 884 | job->base.s_entity = &parser->ctx->rings[ring->idx].entity; | 845 | job->base.s_entity = &parser.ctx->rings[ring->idx].entity; |
| 885 | job->adev = parser->adev; | 846 | job->adev = parser.adev; |
| 886 | job->ibs = parser->ibs; | 847 | job->owner = parser.filp; |
| 887 | job->num_ibs = parser->num_ibs; | 848 | job->free_job = amdgpu_cs_free_job; |
| 888 | job->base.owner = parser->filp; | 849 | |
| 889 | mutex_init(&job->job_lock); | 850 | job->ibs = parser.ibs; |
| 851 | job->num_ibs = parser.num_ibs; | ||
| 852 | parser.ibs = NULL; | ||
| 853 | parser.num_ibs = 0; | ||
| 854 | |||
| 890 | if (job->ibs[job->num_ibs - 1].user) { | 855 | if (job->ibs[job->num_ibs - 1].user) { |
| 891 | memcpy(&job->uf, &parser->uf, | 856 | job->uf = parser.uf; |
| 892 | sizeof(struct amdgpu_user_fence)); | ||
| 893 | job->ibs[job->num_ibs - 1].user = &job->uf; | 857 | job->ibs[job->num_ibs - 1].user = &job->uf; |
| 858 | parser.uf.bo = NULL; | ||
| 894 | } | 859 | } |
| 895 | 860 | ||
| 896 | job->free_job = amdgpu_cs_free_job; | 861 | fence = amd_sched_fence_create(job->base.s_entity, |
| 897 | mutex_lock(&job->job_lock); | 862 | parser.filp); |
| 898 | r = amd_sched_entity_push_job(&job->base); | 863 | if (!fence) { |
| 899 | if (r) { | 864 | r = -ENOMEM; |
| 900 | mutex_unlock(&job->job_lock); | ||
| 901 | amdgpu_cs_free_job(job); | 865 | amdgpu_cs_free_job(job); |
| 902 | kfree(job); | 866 | kfree(job); |
| 903 | goto out; | 867 | goto out; |
| 904 | } | 868 | } |
| 905 | cs->out.handle = | 869 | job->base.s_fence = fence; |
| 906 | amdgpu_ctx_add_fence(parser->ctx, ring, | 870 | parser.fence = fence_get(&fence->base); |
| 907 | &job->base.s_fence->base); | ||
| 908 | parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle; | ||
| 909 | 871 | ||
| 910 | list_sort(NULL, &parser->validated, cmp_size_smaller_first); | 872 | cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring, |
| 911 | ttm_eu_fence_buffer_objects(&parser->ticket, | 873 | &fence->base); |
| 912 | &parser->validated, | 874 | job->ibs[job->num_ibs - 1].sequence = cs->out.handle; |
| 913 | &job->base.s_fence->base); | ||
| 914 | 875 | ||
| 915 | mutex_unlock(&job->job_lock); | 876 | trace_amdgpu_cs_ioctl(job); |
| 916 | amdgpu_cs_parser_fini_late(parser); | 877 | amd_sched_entity_push_job(&job->base); |
| 917 | mutex_unlock(&vm->mutex); | 878 | |
| 918 | return 0; | 879 | } else { |
| 880 | struct amdgpu_fence *fence; | ||
| 881 | |||
| 882 | r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs, | ||
| 883 | parser.filp); | ||
| 884 | fence = parser.ibs[parser.num_ibs - 1].fence; | ||
| 885 | parser.fence = fence_get(&fence->base); | ||
| 886 | cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence; | ||
| 919 | } | 887 | } |
| 920 | 888 | ||
| 921 | cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; | ||
| 922 | out: | 889 | out: |
| 923 | amdgpu_cs_parser_fini(parser, r, reserved_buffers); | 890 | amdgpu_cs_parser_fini(&parser, r, reserved_buffers); |
| 924 | mutex_unlock(&vm->mutex); | 891 | mutex_unlock(&vm->mutex); |
| 925 | r = amdgpu_cs_handle_lockup(adev, r); | 892 | r = amdgpu_cs_handle_lockup(adev, r); |
| 926 | return r; | 893 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 257d72205bb5..3671f9f220bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | |||
| @@ -47,6 +47,9 @@ | |||
| 47 | * that the the relevant GPU caches have been flushed. | 47 | * that the the relevant GPU caches have been flushed. |
| 48 | */ | 48 | */ |
| 49 | 49 | ||
| 50 | static struct kmem_cache *amdgpu_fence_slab; | ||
| 51 | static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0); | ||
| 52 | |||
| 50 | /** | 53 | /** |
| 51 | * amdgpu_fence_write - write a fence value | 54 | * amdgpu_fence_write - write a fence value |
| 52 | * | 55 | * |
| @@ -85,24 +88,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) | |||
| 85 | } | 88 | } |
| 86 | 89 | ||
| 87 | /** | 90 | /** |
| 88 | * amdgpu_fence_schedule_check - schedule lockup check | ||
| 89 | * | ||
| 90 | * @ring: pointer to struct amdgpu_ring | ||
| 91 | * | ||
| 92 | * Queues a delayed work item to check for lockups. | ||
| 93 | */ | ||
| 94 | static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring) | ||
| 95 | { | ||
| 96 | /* | ||
| 97 | * Do not reset the timer here with mod_delayed_work, | ||
| 98 | * this can livelock in an interaction with TTM delayed destroy. | ||
| 99 | */ | ||
| 100 | queue_delayed_work(system_power_efficient_wq, | ||
| 101 | &ring->fence_drv.lockup_work, | ||
| 102 | AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 103 | } | ||
| 104 | |||
| 105 | /** | ||
| 106 | * amdgpu_fence_emit - emit a fence on the requested ring | 91 | * amdgpu_fence_emit - emit a fence on the requested ring |
| 107 | * | 92 | * |
| 108 | * @ring: ring the fence is associated with | 93 | * @ring: ring the fence is associated with |
| @@ -118,7 +103,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
| 118 | struct amdgpu_device *adev = ring->adev; | 103 | struct amdgpu_device *adev = ring->adev; |
| 119 | 104 | ||
| 120 | /* we are protected by the ring emission mutex */ | 105 | /* we are protected by the ring emission mutex */ |
| 121 | *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); | 106 | *fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); |
| 122 | if ((*fence) == NULL) { | 107 | if ((*fence) == NULL) { |
| 123 | return -ENOMEM; | 108 | return -ENOMEM; |
| 124 | } | 109 | } |
| @@ -132,11 +117,23 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | |||
| 132 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, | 117 | amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, |
| 133 | (*fence)->seq, | 118 | (*fence)->seq, |
| 134 | AMDGPU_FENCE_FLAG_INT); | 119 | AMDGPU_FENCE_FLAG_INT); |
| 135 | trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq); | ||
| 136 | return 0; | 120 | return 0; |
| 137 | } | 121 | } |
| 138 | 122 | ||
| 139 | /** | 123 | /** |
| 124 | * amdgpu_fence_schedule_fallback - schedule fallback check | ||
| 125 | * | ||
| 126 | * @ring: pointer to struct amdgpu_ring | ||
| 127 | * | ||
| 128 | * Start a timer as fallback to our interrupts. | ||
| 129 | */ | ||
| 130 | static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) | ||
| 131 | { | ||
| 132 | mod_timer(&ring->fence_drv.fallback_timer, | ||
| 133 | jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 140 | * amdgpu_fence_activity - check for fence activity | 137 | * amdgpu_fence_activity - check for fence activity |
| 141 | * | 138 | * |
| 142 | * @ring: pointer to struct amdgpu_ring | 139 | * @ring: pointer to struct amdgpu_ring |
| @@ -202,45 +199,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) | |||
| 202 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); | 199 | } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); |
| 203 | 200 | ||
| 204 | if (seq < last_emitted) | 201 | if (seq < last_emitted) |
| 205 | amdgpu_fence_schedule_check(ring); | 202 | amdgpu_fence_schedule_fallback(ring); |
| 206 | 203 | ||
| 207 | return wake; | 204 | return wake; |
| 208 | } | 205 | } |
| 209 | 206 | ||
| 210 | /** | 207 | /** |
| 211 | * amdgpu_fence_check_lockup - check for hardware lockup | 208 | * amdgpu_fence_process - process a fence |
| 212 | * | 209 | * |
| 213 | * @work: delayed work item | 210 | * @adev: amdgpu_device pointer |
| 211 | * @ring: ring index the fence is associated with | ||
| 214 | * | 212 | * |
| 215 | * Checks for fence activity and if there is none probe | 213 | * Checks the current fence value and wakes the fence queue |
| 216 | * the hardware if a lockup occured. | 214 | * if the sequence number has increased (all asics). |
| 217 | */ | 215 | */ |
| 218 | static void amdgpu_fence_check_lockup(struct work_struct *work) | 216 | void amdgpu_fence_process(struct amdgpu_ring *ring) |
| 219 | { | 217 | { |
| 220 | struct amdgpu_fence_driver *fence_drv; | ||
| 221 | struct amdgpu_ring *ring; | ||
| 222 | |||
| 223 | fence_drv = container_of(work, struct amdgpu_fence_driver, | ||
| 224 | lockup_work.work); | ||
| 225 | ring = fence_drv->ring; | ||
| 226 | |||
| 227 | if (amdgpu_fence_activity(ring)) | 218 | if (amdgpu_fence_activity(ring)) |
| 228 | wake_up_all(&ring->fence_drv.fence_queue); | 219 | wake_up_all(&ring->fence_drv.fence_queue); |
| 229 | } | 220 | } |
| 230 | 221 | ||
| 231 | /** | 222 | /** |
| 232 | * amdgpu_fence_process - process a fence | 223 | * amdgpu_fence_fallback - fallback for hardware interrupts |
| 233 | * | 224 | * |
| 234 | * @adev: amdgpu_device pointer | 225 | * @work: delayed work item |
| 235 | * @ring: ring index the fence is associated with | ||
| 236 | * | 226 | * |
| 237 | * Checks the current fence value and wakes the fence queue | 227 | * Checks for fence activity. |
| 238 | * if the sequence number has increased (all asics). | ||
| 239 | */ | 228 | */ |
| 240 | void amdgpu_fence_process(struct amdgpu_ring *ring) | 229 | static void amdgpu_fence_fallback(unsigned long arg) |
| 241 | { | 230 | { |
| 242 | if (amdgpu_fence_activity(ring)) | 231 | struct amdgpu_ring *ring = (void *)arg; |
| 243 | wake_up_all(&ring->fence_drv.fence_queue); | 232 | |
| 233 | amdgpu_fence_process(ring); | ||
| 244 | } | 234 | } |
| 245 | 235 | ||
| 246 | /** | 236 | /** |
| @@ -290,7 +280,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) | |||
| 290 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) | 280 | if (atomic64_read(&ring->fence_drv.last_seq) >= seq) |
| 291 | return 0; | 281 | return 0; |
| 292 | 282 | ||
| 293 | amdgpu_fence_schedule_check(ring); | 283 | amdgpu_fence_schedule_fallback(ring); |
| 294 | wait_event(ring->fence_drv.fence_queue, ( | 284 | wait_event(ring->fence_drv.fence_queue, ( |
| 295 | (signaled = amdgpu_fence_seq_signaled(ring, seq)))); | 285 | (signaled = amdgpu_fence_seq_signaled(ring, seq)))); |
| 296 | 286 | ||
| @@ -491,9 +481,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
| 491 | atomic64_set(&ring->fence_drv.last_seq, 0); | 481 | atomic64_set(&ring->fence_drv.last_seq, 0); |
| 492 | ring->fence_drv.initialized = false; | 482 | ring->fence_drv.initialized = false; |
| 493 | 483 | ||
| 494 | INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, | 484 | setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, |
| 495 | amdgpu_fence_check_lockup); | 485 | (unsigned long)ring); |
| 496 | ring->fence_drv.ring = ring; | ||
| 497 | 486 | ||
| 498 | init_waitqueue_head(&ring->fence_drv.fence_queue); | 487 | init_waitqueue_head(&ring->fence_drv.fence_queue); |
| 499 | 488 | ||
| @@ -536,6 +525,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) | |||
| 536 | */ | 525 | */ |
| 537 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) | 526 | int amdgpu_fence_driver_init(struct amdgpu_device *adev) |
| 538 | { | 527 | { |
| 528 | if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) { | ||
| 529 | amdgpu_fence_slab = kmem_cache_create( | ||
| 530 | "amdgpu_fence", sizeof(struct amdgpu_fence), 0, | ||
| 531 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 532 | if (!amdgpu_fence_slab) | ||
| 533 | return -ENOMEM; | ||
| 534 | } | ||
| 539 | if (amdgpu_debugfs_fence_init(adev)) | 535 | if (amdgpu_debugfs_fence_init(adev)) |
| 540 | dev_err(adev->dev, "fence debugfs file creation failed\n"); | 536 | dev_err(adev->dev, "fence debugfs file creation failed\n"); |
| 541 | 537 | ||
| @@ -554,9 +550,12 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
| 554 | { | 550 | { |
| 555 | int i, r; | 551 | int i, r; |
| 556 | 552 | ||
| 553 | if (atomic_dec_and_test(&amdgpu_fence_slab_ref)) | ||
| 554 | kmem_cache_destroy(amdgpu_fence_slab); | ||
| 557 | mutex_lock(&adev->ring_lock); | 555 | mutex_lock(&adev->ring_lock); |
| 558 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | 556 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { |
| 559 | struct amdgpu_ring *ring = adev->rings[i]; | 557 | struct amdgpu_ring *ring = adev->rings[i]; |
| 558 | |||
| 560 | if (!ring || !ring->fence_drv.initialized) | 559 | if (!ring || !ring->fence_drv.initialized) |
| 561 | continue; | 560 | continue; |
| 562 | r = amdgpu_fence_wait_empty(ring); | 561 | r = amdgpu_fence_wait_empty(ring); |
| @@ -568,6 +567,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) | |||
| 568 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, | 567 | amdgpu_irq_put(adev, ring->fence_drv.irq_src, |
| 569 | ring->fence_drv.irq_type); | 568 | ring->fence_drv.irq_type); |
| 570 | amd_sched_fini(&ring->sched); | 569 | amd_sched_fini(&ring->sched); |
| 570 | del_timer_sync(&ring->fence_drv.fallback_timer); | ||
| 571 | ring->fence_drv.initialized = false; | 571 | ring->fence_drv.initialized = false; |
| 572 | } | 572 | } |
| 573 | mutex_unlock(&adev->ring_lock); | 573 | mutex_unlock(&adev->ring_lock); |
| @@ -751,18 +751,25 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) | |||
| 751 | fence->fence_wake.func = amdgpu_fence_check_signaled; | 751 | fence->fence_wake.func = amdgpu_fence_check_signaled; |
| 752 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); | 752 | __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); |
| 753 | fence_get(f); | 753 | fence_get(f); |
| 754 | amdgpu_fence_schedule_check(ring); | 754 | if (!timer_pending(&ring->fence_drv.fallback_timer)) |
| 755 | amdgpu_fence_schedule_fallback(ring); | ||
| 755 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); | 756 | FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); |
| 756 | return true; | 757 | return true; |
| 757 | } | 758 | } |
| 758 | 759 | ||
| 760 | static void amdgpu_fence_release(struct fence *f) | ||
| 761 | { | ||
| 762 | struct amdgpu_fence *fence = to_amdgpu_fence(f); | ||
| 763 | kmem_cache_free(amdgpu_fence_slab, fence); | ||
| 764 | } | ||
| 765 | |||
| 759 | const struct fence_ops amdgpu_fence_ops = { | 766 | const struct fence_ops amdgpu_fence_ops = { |
| 760 | .get_driver_name = amdgpu_fence_get_driver_name, | 767 | .get_driver_name = amdgpu_fence_get_driver_name, |
| 761 | .get_timeline_name = amdgpu_fence_get_timeline_name, | 768 | .get_timeline_name = amdgpu_fence_get_timeline_name, |
| 762 | .enable_signaling = amdgpu_fence_enable_signaling, | 769 | .enable_signaling = amdgpu_fence_enable_signaling, |
| 763 | .signaled = amdgpu_fence_is_signaled, | 770 | .signaled = amdgpu_fence_is_signaled, |
| 764 | .wait = fence_default_wait, | 771 | .wait = fence_default_wait, |
| 765 | .release = NULL, | 772 | .release = amdgpu_fence_release, |
| 766 | }; | 773 | }; |
| 767 | 774 | ||
| 768 | /* | 775 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 087332858853..00c5b580f56c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
| @@ -483,6 +483,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
| 483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) | 483 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
| 484 | goto error_unreserve; | 484 | goto error_unreserve; |
| 485 | } | 485 | } |
| 486 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); | ||
| 487 | if (r) | ||
| 488 | goto error_unreserve; | ||
| 486 | 489 | ||
| 487 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | 490 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
| 488 | if (r) | 491 | if (r) |
| @@ -512,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 512 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 515 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 513 | struct amdgpu_bo *rbo; | 516 | struct amdgpu_bo *rbo; |
| 514 | struct amdgpu_bo_va *bo_va; | 517 | struct amdgpu_bo_va *bo_va; |
| 518 | struct ttm_validate_buffer tv, tv_pd; | ||
| 519 | struct ww_acquire_ctx ticket; | ||
| 520 | struct list_head list, duplicates; | ||
| 515 | uint32_t invalid_flags, va_flags = 0; | 521 | uint32_t invalid_flags, va_flags = 0; |
| 516 | int r = 0; | 522 | int r = 0; |
| 517 | 523 | ||
| @@ -549,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 549 | return -ENOENT; | 555 | return -ENOENT; |
| 550 | mutex_lock(&fpriv->vm.mutex); | 556 | mutex_lock(&fpriv->vm.mutex); |
| 551 | rbo = gem_to_amdgpu_bo(gobj); | 557 | rbo = gem_to_amdgpu_bo(gobj); |
| 552 | r = amdgpu_bo_reserve(rbo, false); | 558 | INIT_LIST_HEAD(&list); |
| 559 | INIT_LIST_HEAD(&duplicates); | ||
| 560 | tv.bo = &rbo->tbo; | ||
| 561 | tv.shared = true; | ||
| 562 | list_add(&tv.head, &list); | ||
| 563 | |||
| 564 | if (args->operation == AMDGPU_VA_OP_MAP) { | ||
| 565 | tv_pd.bo = &fpriv->vm.page_directory->tbo; | ||
| 566 | tv_pd.shared = true; | ||
| 567 | list_add(&tv_pd.head, &list); | ||
| 568 | } | ||
| 569 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); | ||
| 553 | if (r) { | 570 | if (r) { |
| 554 | mutex_unlock(&fpriv->vm.mutex); | 571 | mutex_unlock(&fpriv->vm.mutex); |
| 555 | drm_gem_object_unreference_unlocked(gobj); | 572 | drm_gem_object_unreference_unlocked(gobj); |
| @@ -558,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 558 | 575 | ||
| 559 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 576 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); |
| 560 | if (!bo_va) { | 577 | if (!bo_va) { |
| 561 | amdgpu_bo_unreserve(rbo); | 578 | ttm_eu_backoff_reservation(&ticket, &list); |
| 579 | drm_gem_object_unreference_unlocked(gobj); | ||
| 562 | mutex_unlock(&fpriv->vm.mutex); | 580 | mutex_unlock(&fpriv->vm.mutex); |
| 563 | return -ENOENT; | 581 | return -ENOENT; |
| 564 | } | 582 | } |
| @@ -581,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 581 | default: | 599 | default: |
| 582 | break; | 600 | break; |
| 583 | } | 601 | } |
| 584 | 602 | ttm_eu_backoff_reservation(&ticket, &list); | |
| 585 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 603 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
| 586 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 604 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
| 587 | mutex_unlock(&fpriv->vm.mutex); | 605 | mutex_unlock(&fpriv->vm.mutex); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index e65987743871..9e25edafa721 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -62,7 +62,7 @@ int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, | |||
| 62 | int r; | 62 | int r; |
| 63 | 63 | ||
| 64 | if (size) { | 64 | if (size) { |
| 65 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | 65 | r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, |
| 66 | &ib->sa_bo, size, 256); | 66 | &ib->sa_bo, size, 256); |
| 67 | if (r) { | 67 | if (r) { |
| 68 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); | 68 | dev_err(adev->dev, "failed to get a new IB (%d)\n", r); |
| @@ -216,7 +216,7 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | |||
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | if (ib->vm) | 218 | if (ib->vm) |
| 219 | amdgpu_vm_fence(adev, ib->vm, ib->fence); | 219 | amdgpu_vm_fence(adev, ib->vm, &ib->fence->base); |
| 220 | 220 | ||
| 221 | amdgpu_ring_unlock_commit(ring); | 221 | amdgpu_ring_unlock_commit(ring); |
| 222 | return 0; | 222 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 3c2ff4567798..ea756e77b023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
| @@ -189,10 +189,9 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 189 | struct amdgpu_sa_manager *sa_manager); | 189 | struct amdgpu_sa_manager *sa_manager); |
| 190 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, | 190 | int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev, |
| 191 | struct amdgpu_sa_manager *sa_manager); | 191 | struct amdgpu_sa_manager *sa_manager); |
| 192 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | 192 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
| 193 | struct amdgpu_sa_manager *sa_manager, | 193 | struct amdgpu_sa_bo **sa_bo, |
| 194 | struct amdgpu_sa_bo **sa_bo, | 194 | unsigned size, unsigned align); |
| 195 | unsigned size, unsigned align); | ||
| 196 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, | 195 | void amdgpu_sa_bo_free(struct amdgpu_device *adev, |
| 197 | struct amdgpu_sa_bo **sa_bo, | 196 | struct amdgpu_sa_bo **sa_bo, |
| 198 | struct fence *fence); | 197 | struct fence *fence); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 0212b31dc194..8b88edb0434b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -311,8 +311,7 @@ static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager, | |||
| 311 | return false; | 311 | return false; |
| 312 | } | 312 | } |
| 313 | 313 | ||
| 314 | int amdgpu_sa_bo_new(struct amdgpu_device *adev, | 314 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
| 315 | struct amdgpu_sa_manager *sa_manager, | ||
| 316 | struct amdgpu_sa_bo **sa_bo, | 315 | struct amdgpu_sa_bo **sa_bo, |
| 317 | unsigned size, unsigned align) | 316 | unsigned size, unsigned align) |
| 318 | { | 317 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index dcf4a8aca680..438c05254695 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
| 27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
| 28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
| 29 | #include "amdgpu_trace.h" | ||
| 29 | 30 | ||
| 30 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) | 31 | static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job) |
| 31 | { | 32 | { |
| @@ -44,11 +45,8 @@ static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job) | |||
| 44 | return NULL; | 45 | return NULL; |
| 45 | } | 46 | } |
| 46 | job = to_amdgpu_job(sched_job); | 47 | job = to_amdgpu_job(sched_job); |
| 47 | mutex_lock(&job->job_lock); | 48 | trace_amdgpu_sched_run_job(job); |
| 48 | r = amdgpu_ib_schedule(job->adev, | 49 | r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner); |
| 49 | job->num_ibs, | ||
| 50 | job->ibs, | ||
| 51 | job->base.owner); | ||
| 52 | if (r) { | 50 | if (r) { |
| 53 | DRM_ERROR("Error scheduling IBs (%d)\n", r); | 51 | DRM_ERROR("Error scheduling IBs (%d)\n", r); |
| 54 | goto err; | 52 | goto err; |
| @@ -61,8 +59,6 @@ err: | |||
| 61 | if (job->free_job) | 59 | if (job->free_job) |
| 62 | job->free_job(job); | 60 | job->free_job(job); |
| 63 | 61 | ||
| 64 | mutex_unlock(&job->job_lock); | ||
| 65 | fence_put(&job->base.s_fence->base); | ||
| 66 | kfree(job); | 62 | kfree(job); |
| 67 | return fence ? &fence->base : NULL; | 63 | return fence ? &fence->base : NULL; |
| 68 | } | 64 | } |
| @@ -88,21 +84,19 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
| 88 | return -ENOMEM; | 84 | return -ENOMEM; |
| 89 | job->base.sched = &ring->sched; | 85 | job->base.sched = &ring->sched; |
| 90 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; | 86 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; |
| 87 | job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); | ||
| 88 | if (!job->base.s_fence) { | ||
| 89 | kfree(job); | ||
| 90 | return -ENOMEM; | ||
| 91 | } | ||
| 92 | *f = fence_get(&job->base.s_fence->base); | ||
| 93 | |||
| 91 | job->adev = adev; | 94 | job->adev = adev; |
| 92 | job->ibs = ibs; | 95 | job->ibs = ibs; |
| 93 | job->num_ibs = num_ibs; | 96 | job->num_ibs = num_ibs; |
| 94 | job->base.owner = owner; | 97 | job->owner = owner; |
| 95 | mutex_init(&job->job_lock); | ||
| 96 | job->free_job = free_job; | 98 | job->free_job = free_job; |
| 97 | mutex_lock(&job->job_lock); | 99 | amd_sched_entity_push_job(&job->base); |
| 98 | r = amd_sched_entity_push_job(&job->base); | ||
| 99 | if (r) { | ||
| 100 | mutex_unlock(&job->job_lock); | ||
| 101 | kfree(job); | ||
| 102 | return r; | ||
| 103 | } | ||
| 104 | *f = fence_get(&job->base.s_fence->base); | ||
| 105 | mutex_unlock(&job->job_lock); | ||
| 106 | } else { | 100 | } else { |
| 107 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | 101 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); |
| 108 | if (r) | 102 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c index ff3ca52ec6fe..1caaf201b708 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c | |||
| @@ -40,7 +40,7 @@ int amdgpu_semaphore_create(struct amdgpu_device *adev, | |||
| 40 | if (*semaphore == NULL) { | 40 | if (*semaphore == NULL) { |
| 41 | return -ENOMEM; | 41 | return -ENOMEM; |
| 42 | } | 42 | } |
| 43 | r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo, | 43 | r = amdgpu_sa_bo_new(&adev->ring_tmp_bo, |
| 44 | &(*semaphore)->sa_bo, 8, 8); | 44 | &(*semaphore)->sa_bo, 8, 8); |
| 45 | if (r) { | 45 | if (r) { |
| 46 | kfree(*semaphore); | 46 | kfree(*semaphore); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index a6697fd05217..dd005c336c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c | |||
| @@ -302,8 +302,14 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync, | |||
| 302 | return -EINVAL; | 302 | return -EINVAL; |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores || | 305 | if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) { |
| 306 | (count >= AMDGPU_NUM_SYNCS)) { | 306 | r = fence_wait(&fence->base, true); |
| 307 | if (r) | ||
| 308 | return r; | ||
| 309 | continue; | ||
| 310 | } | ||
| 311 | |||
| 312 | if (count >= AMDGPU_NUM_SYNCS) { | ||
| 307 | /* not enough room, wait manually */ | 313 | /* not enough room, wait manually */ |
| 308 | r = fence_wait(&fence->base, false); | 314 | r = fence_wait(&fence->base, false); |
| 309 | if (r) | 315 | if (r) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 76ecbaf72a2e..8f9834ab1bd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
| @@ -48,6 +48,57 @@ TRACE_EVENT(amdgpu_cs, | |||
| 48 | __entry->fences) | 48 | __entry->fences) |
| 49 | ); | 49 | ); |
| 50 | 50 | ||
| 51 | TRACE_EVENT(amdgpu_cs_ioctl, | ||
| 52 | TP_PROTO(struct amdgpu_job *job), | ||
| 53 | TP_ARGS(job), | ||
| 54 | TP_STRUCT__entry( | ||
| 55 | __field(struct amdgpu_device *, adev) | ||
| 56 | __field(struct amd_sched_job *, sched_job) | ||
| 57 | __field(struct amdgpu_ib *, ib) | ||
| 58 | __field(struct fence *, fence) | ||
| 59 | __field(char *, ring_name) | ||
| 60 | __field(u32, num_ibs) | ||
| 61 | ), | ||
| 62 | |||
| 63 | TP_fast_assign( | ||
| 64 | __entry->adev = job->adev; | ||
| 65 | __entry->sched_job = &job->base; | ||
| 66 | __entry->ib = job->ibs; | ||
| 67 | __entry->fence = &job->base.s_fence->base; | ||
| 68 | __entry->ring_name = job->ibs[0].ring->name; | ||
| 69 | __entry->num_ibs = job->num_ibs; | ||
| 70 | ), | ||
| 71 | TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", | ||
| 72 | __entry->adev, __entry->sched_job, __entry->ib, | ||
| 73 | __entry->fence, __entry->ring_name, __entry->num_ibs) | ||
| 74 | ); | ||
| 75 | |||
| 76 | TRACE_EVENT(amdgpu_sched_run_job, | ||
| 77 | TP_PROTO(struct amdgpu_job *job), | ||
| 78 | TP_ARGS(job), | ||
| 79 | TP_STRUCT__entry( | ||
| 80 | __field(struct amdgpu_device *, adev) | ||
| 81 | __field(struct amd_sched_job *, sched_job) | ||
| 82 | __field(struct amdgpu_ib *, ib) | ||
| 83 | __field(struct fence *, fence) | ||
| 84 | __field(char *, ring_name) | ||
| 85 | __field(u32, num_ibs) | ||
| 86 | ), | ||
| 87 | |||
| 88 | TP_fast_assign( | ||
| 89 | __entry->adev = job->adev; | ||
| 90 | __entry->sched_job = &job->base; | ||
| 91 | __entry->ib = job->ibs; | ||
| 92 | __entry->fence = &job->base.s_fence->base; | ||
| 93 | __entry->ring_name = job->ibs[0].ring->name; | ||
| 94 | __entry->num_ibs = job->num_ibs; | ||
| 95 | ), | ||
| 96 | TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u", | ||
| 97 | __entry->adev, __entry->sched_job, __entry->ib, | ||
| 98 | __entry->fence, __entry->ring_name, __entry->num_ibs) | ||
| 99 | ); | ||
| 100 | |||
| 101 | |||
| 51 | TRACE_EVENT(amdgpu_vm_grab_id, | 102 | TRACE_EVENT(amdgpu_vm_grab_id, |
| 52 | TP_PROTO(unsigned vmid, int ring), | 103 | TP_PROTO(unsigned vmid, int ring), |
| 53 | TP_ARGS(vmid, ring), | 104 | TP_ARGS(vmid, ring), |
| @@ -196,49 +247,6 @@ TRACE_EVENT(amdgpu_bo_list_set, | |||
| 196 | TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) | 247 | TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) |
| 197 | ); | 248 | ); |
| 198 | 249 | ||
| 199 | DECLARE_EVENT_CLASS(amdgpu_fence_request, | ||
| 200 | |||
| 201 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 202 | |||
| 203 | TP_ARGS(dev, ring, seqno), | ||
| 204 | |||
| 205 | TP_STRUCT__entry( | ||
| 206 | __field(u32, dev) | ||
| 207 | __field(int, ring) | ||
| 208 | __field(u32, seqno) | ||
| 209 | ), | ||
| 210 | |||
| 211 | TP_fast_assign( | ||
| 212 | __entry->dev = dev->primary->index; | ||
| 213 | __entry->ring = ring; | ||
| 214 | __entry->seqno = seqno; | ||
| 215 | ), | ||
| 216 | |||
| 217 | TP_printk("dev=%u, ring=%d, seqno=%u", | ||
| 218 | __entry->dev, __entry->ring, __entry->seqno) | ||
| 219 | ); | ||
| 220 | |||
| 221 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit, | ||
| 222 | |||
| 223 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 224 | |||
| 225 | TP_ARGS(dev, ring, seqno) | ||
| 226 | ); | ||
| 227 | |||
| 228 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin, | ||
| 229 | |||
| 230 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 231 | |||
| 232 | TP_ARGS(dev, ring, seqno) | ||
| 233 | ); | ||
| 234 | |||
| 235 | DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end, | ||
| 236 | |||
| 237 | TP_PROTO(struct drm_device *dev, int ring, u32 seqno), | ||
| 238 | |||
| 239 | TP_ARGS(dev, ring, seqno) | ||
| 240 | ); | ||
| 241 | |||
| 242 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, | 250 | DECLARE_EVENT_CLASS(amdgpu_semaphore_request, |
| 243 | 251 | ||
| 244 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), | 252 | TP_PROTO(int ring, struct amdgpu_semaphore *sem), |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 81bb8e9fc26d..d4bac5f49939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -1073,10 +1073,10 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |||
| 1073 | ret = drm_mm_dump_table(m, mm); | 1073 | ret = drm_mm_dump_table(m, mm); |
| 1074 | spin_unlock(&glob->lru_lock); | 1074 | spin_unlock(&glob->lru_lock); |
| 1075 | if (ttm_pl == TTM_PL_VRAM) | 1075 | if (ttm_pl == TTM_PL_VRAM) |
| 1076 | seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n", | 1076 | seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", |
| 1077 | adev->mman.bdev.man[ttm_pl].size, | 1077 | adev->mman.bdev.man[ttm_pl].size, |
| 1078 | atomic64_read(&adev->vram_usage) >> 20, | 1078 | (u64)atomic64_read(&adev->vram_usage) >> 20, |
| 1079 | atomic64_read(&adev->vram_vis_usage) >> 20); | 1079 | (u64)atomic64_read(&adev->vram_vis_usage) >> 20); |
| 1080 | return ret; | 1080 | return ret; |
| 1081 | } | 1081 | } |
| 1082 | 1082 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 633a32a48560..159ce54bbd8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -143,10 +143,15 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 143 | unsigned i; | 143 | unsigned i; |
| 144 | 144 | ||
| 145 | /* check if the id is still valid */ | 145 | /* check if the id is still valid */ |
| 146 | if (vm_id->id && vm_id->last_id_use && | 146 | if (vm_id->id) { |
| 147 | vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) { | 147 | unsigned id = vm_id->id; |
| 148 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | 148 | long owner; |
| 149 | return 0; | 149 | |
| 150 | owner = atomic_long_read(&adev->vm_manager.ids[id].owner); | ||
| 151 | if (owner == (long)vm) { | ||
| 152 | trace_amdgpu_vm_grab_id(vm_id->id, ring->idx); | ||
| 153 | return 0; | ||
| 154 | } | ||
| 150 | } | 155 | } |
| 151 | 156 | ||
| 152 | /* we definately need to flush */ | 157 | /* we definately need to flush */ |
| @@ -154,7 +159,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 154 | 159 | ||
| 155 | /* skip over VMID 0, since it is the system VM */ | 160 | /* skip over VMID 0, since it is the system VM */ |
| 156 | for (i = 1; i < adev->vm_manager.nvm; ++i) { | 161 | for (i = 1; i < adev->vm_manager.nvm; ++i) { |
| 157 | struct fence *fence = adev->vm_manager.active[i]; | 162 | struct fence *fence = adev->vm_manager.ids[i].active; |
| 158 | struct amdgpu_ring *fring; | 163 | struct amdgpu_ring *fring; |
| 159 | 164 | ||
| 160 | if (fence == NULL) { | 165 | if (fence == NULL) { |
| @@ -176,7 +181,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | |||
| 176 | if (choices[i]) { | 181 | if (choices[i]) { |
| 177 | struct fence *fence; | 182 | struct fence *fence; |
| 178 | 183 | ||
| 179 | fence = adev->vm_manager.active[choices[i]]; | 184 | fence = adev->vm_manager.ids[choices[i]].active; |
| 180 | vm_id->id = choices[i]; | 185 | vm_id->id = choices[i]; |
| 181 | 186 | ||
| 182 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); | 187 | trace_amdgpu_vm_grab_id(choices[i], ring->idx); |
| @@ -207,24 +212,21 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 207 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); | 212 | uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); |
| 208 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; | 213 | struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx]; |
| 209 | struct fence *flushed_updates = vm_id->flushed_updates; | 214 | struct fence *flushed_updates = vm_id->flushed_updates; |
| 210 | bool is_earlier = false; | 215 | bool is_later; |
| 211 | |||
| 212 | if (flushed_updates && updates) { | ||
| 213 | BUG_ON(flushed_updates->context != updates->context); | ||
| 214 | is_earlier = (updates->seqno - flushed_updates->seqno <= | ||
| 215 | INT_MAX) ? true : false; | ||
| 216 | } | ||
| 217 | 216 | ||
| 218 | if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates || | 217 | if (!flushed_updates) |
| 219 | is_earlier) { | 218 | is_later = true; |
| 219 | else if (!updates) | ||
| 220 | is_later = false; | ||
| 221 | else | ||
| 222 | is_later = fence_is_later(updates, flushed_updates); | ||
| 220 | 223 | ||
| 224 | if (pd_addr != vm_id->pd_gpu_addr || is_later) { | ||
| 221 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); | 225 | trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id); |
| 222 | if (is_earlier) { | 226 | if (is_later) { |
| 223 | vm_id->flushed_updates = fence_get(updates); | 227 | vm_id->flushed_updates = fence_get(updates); |
| 224 | fence_put(flushed_updates); | 228 | fence_put(flushed_updates); |
| 225 | } | 229 | } |
| 226 | if (!flushed_updates) | ||
| 227 | vm_id->flushed_updates = fence_get(updates); | ||
| 228 | vm_id->pd_gpu_addr = pd_addr; | 230 | vm_id->pd_gpu_addr = pd_addr; |
| 229 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); | 231 | amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr); |
| 230 | } | 232 | } |
| @@ -244,16 +246,14 @@ void amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 244 | */ | 246 | */ |
| 245 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 247 | void amdgpu_vm_fence(struct amdgpu_device *adev, |
| 246 | struct amdgpu_vm *vm, | 248 | struct amdgpu_vm *vm, |
| 247 | struct amdgpu_fence *fence) | 249 | struct fence *fence) |
| 248 | { | 250 | { |
| 249 | unsigned ridx = fence->ring->idx; | 251 | struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence); |
| 250 | unsigned vm_id = vm->ids[ridx].id; | 252 | unsigned vm_id = vm->ids[ring->idx].id; |
| 251 | |||
| 252 | fence_put(adev->vm_manager.active[vm_id]); | ||
| 253 | adev->vm_manager.active[vm_id] = fence_get(&fence->base); | ||
| 254 | 253 | ||
| 255 | fence_put(vm->ids[ridx].last_id_use); | 254 | fence_put(adev->vm_manager.ids[vm_id].active); |
| 256 | vm->ids[ridx].last_id_use = fence_get(&fence->base); | 255 | adev->vm_manager.ids[vm_id].active = fence_get(fence); |
| 256 | atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm); | ||
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | /** | 259 | /** |
| @@ -332,6 +332,8 @@ int amdgpu_vm_free_job(struct amdgpu_job *job) | |||
| 332 | * | 332 | * |
| 333 | * @adev: amdgpu_device pointer | 333 | * @adev: amdgpu_device pointer |
| 334 | * @bo: bo to clear | 334 | * @bo: bo to clear |
| 335 | * | ||
| 336 | * need to reserve bo first before calling it. | ||
| 335 | */ | 337 | */ |
| 336 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | 338 | static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, |
| 337 | struct amdgpu_bo *bo) | 339 | struct amdgpu_bo *bo) |
| @@ -343,24 +345,20 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 343 | uint64_t addr; | 345 | uint64_t addr; |
| 344 | int r; | 346 | int r; |
| 345 | 347 | ||
| 346 | r = amdgpu_bo_reserve(bo, false); | ||
| 347 | if (r) | ||
| 348 | return r; | ||
| 349 | |||
| 350 | r = reservation_object_reserve_shared(bo->tbo.resv); | 348 | r = reservation_object_reserve_shared(bo->tbo.resv); |
| 351 | if (r) | 349 | if (r) |
| 352 | return r; | 350 | return r; |
| 353 | 351 | ||
| 354 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | 352 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 355 | if (r) | 353 | if (r) |
| 356 | goto error_unreserve; | 354 | goto error; |
| 357 | 355 | ||
| 358 | addr = amdgpu_bo_gpu_offset(bo); | 356 | addr = amdgpu_bo_gpu_offset(bo); |
| 359 | entries = amdgpu_bo_size(bo) / 8; | 357 | entries = amdgpu_bo_size(bo) / 8; |
| 360 | 358 | ||
| 361 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); | 359 | ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL); |
| 362 | if (!ib) | 360 | if (!ib) |
| 363 | goto error_unreserve; | 361 | goto error; |
| 364 | 362 | ||
| 365 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); | 363 | r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib); |
| 366 | if (r) | 364 | if (r) |
| @@ -378,16 +376,14 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, | |||
| 378 | if (!r) | 376 | if (!r) |
| 379 | amdgpu_bo_fence(bo, fence, true); | 377 | amdgpu_bo_fence(bo, fence, true); |
| 380 | fence_put(fence); | 378 | fence_put(fence); |
| 381 | if (amdgpu_enable_scheduler) { | 379 | if (amdgpu_enable_scheduler) |
| 382 | amdgpu_bo_unreserve(bo); | ||
| 383 | return 0; | 380 | return 0; |
| 384 | } | 381 | |
| 385 | error_free: | 382 | error_free: |
| 386 | amdgpu_ib_free(adev, ib); | 383 | amdgpu_ib_free(adev, ib); |
| 387 | kfree(ib); | 384 | kfree(ib); |
| 388 | 385 | ||
| 389 | error_unreserve: | 386 | error: |
| 390 | amdgpu_bo_unreserve(bo); | ||
| 391 | return r; | 387 | return r; |
| 392 | } | 388 | } |
| 393 | 389 | ||
| @@ -989,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | |||
| 989 | * Add a mapping of the BO at the specefied addr into the VM. | 985 | * Add a mapping of the BO at the specefied addr into the VM. |
| 990 | * Returns 0 for success, error for failure. | 986 | * Returns 0 for success, error for failure. |
| 991 | * | 987 | * |
| 992 | * Object has to be reserved and gets unreserved by this function! | 988 | * Object has to be reserved and unreserved outside! |
| 993 | */ | 989 | */ |
| 994 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | 990 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
| 995 | struct amdgpu_bo_va *bo_va, | 991 | struct amdgpu_bo_va *bo_va, |
| @@ -1005,30 +1001,27 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1005 | 1001 | ||
| 1006 | /* validate the parameters */ | 1002 | /* validate the parameters */ |
| 1007 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || | 1003 | if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || |
| 1008 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) { | 1004 | size == 0 || size & AMDGPU_GPU_PAGE_MASK) |
| 1009 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1010 | return -EINVAL; | 1005 | return -EINVAL; |
| 1011 | } | ||
| 1012 | 1006 | ||
| 1013 | /* make sure object fit at this offset */ | 1007 | /* make sure object fit at this offset */ |
| 1014 | eaddr = saddr + size; | 1008 | eaddr = saddr + size; |
| 1015 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { | 1009 | if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) |
| 1016 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1017 | return -EINVAL; | 1010 | return -EINVAL; |
| 1018 | } | ||
| 1019 | 1011 | ||
| 1020 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; | 1012 | last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; |
| 1021 | if (last_pfn > adev->vm_manager.max_pfn) { | 1013 | if (last_pfn > adev->vm_manager.max_pfn) { |
| 1022 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", | 1014 | dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", |
| 1023 | last_pfn, adev->vm_manager.max_pfn); | 1015 | last_pfn, adev->vm_manager.max_pfn); |
| 1024 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1025 | return -EINVAL; | 1016 | return -EINVAL; |
| 1026 | } | 1017 | } |
| 1027 | 1018 | ||
| 1028 | saddr /= AMDGPU_GPU_PAGE_SIZE; | 1019 | saddr /= AMDGPU_GPU_PAGE_SIZE; |
| 1029 | eaddr /= AMDGPU_GPU_PAGE_SIZE; | 1020 | eaddr /= AMDGPU_GPU_PAGE_SIZE; |
| 1030 | 1021 | ||
| 1022 | spin_lock(&vm->it_lock); | ||
| 1031 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); | 1023 | it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); |
| 1024 | spin_unlock(&vm->it_lock); | ||
| 1032 | if (it) { | 1025 | if (it) { |
| 1033 | struct amdgpu_bo_va_mapping *tmp; | 1026 | struct amdgpu_bo_va_mapping *tmp; |
| 1034 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); | 1027 | tmp = container_of(it, struct amdgpu_bo_va_mapping, it); |
| @@ -1036,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1036 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " | 1029 | dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " |
| 1037 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, | 1030 | "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, |
| 1038 | tmp->it.start, tmp->it.last + 1); | 1031 | tmp->it.start, tmp->it.last + 1); |
| 1039 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1040 | r = -EINVAL; | 1032 | r = -EINVAL; |
| 1041 | goto error; | 1033 | goto error; |
| 1042 | } | 1034 | } |
| 1043 | 1035 | ||
| 1044 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); | 1036 | mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); |
| 1045 | if (!mapping) { | 1037 | if (!mapping) { |
| 1046 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1047 | r = -ENOMEM; | 1038 | r = -ENOMEM; |
| 1048 | goto error; | 1039 | goto error; |
| 1049 | } | 1040 | } |
| @@ -1055,7 +1046,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1055 | mapping->flags = flags; | 1046 | mapping->flags = flags; |
| 1056 | 1047 | ||
| 1057 | list_add(&mapping->list, &bo_va->invalids); | 1048 | list_add(&mapping->list, &bo_va->invalids); |
| 1049 | spin_lock(&vm->it_lock); | ||
| 1058 | interval_tree_insert(&mapping->it, &vm->va); | 1050 | interval_tree_insert(&mapping->it, &vm->va); |
| 1051 | spin_unlock(&vm->it_lock); | ||
| 1059 | trace_amdgpu_vm_bo_map(bo_va, mapping); | 1052 | trace_amdgpu_vm_bo_map(bo_va, mapping); |
| 1060 | 1053 | ||
| 1061 | /* Make sure the page tables are allocated */ | 1054 | /* Make sure the page tables are allocated */ |
| @@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1067 | if (eaddr > vm->max_pde_used) | 1060 | if (eaddr > vm->max_pde_used) |
| 1068 | vm->max_pde_used = eaddr; | 1061 | vm->max_pde_used = eaddr; |
| 1069 | 1062 | ||
| 1070 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1071 | |||
| 1072 | /* walk over the address space and allocate the page tables */ | 1063 | /* walk over the address space and allocate the page tables */ |
| 1073 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { | 1064 | for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { |
| 1074 | struct reservation_object *resv = vm->page_directory->tbo.resv; | 1065 | struct reservation_object *resv = vm->page_directory->tbo.resv; |
| @@ -1077,13 +1068,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1077 | if (vm->page_tables[pt_idx].bo) | 1068 | if (vm->page_tables[pt_idx].bo) |
| 1078 | continue; | 1069 | continue; |
| 1079 | 1070 | ||
| 1080 | ww_mutex_lock(&resv->lock, NULL); | ||
| 1081 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, | 1071 | r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, |
| 1082 | AMDGPU_GPU_PAGE_SIZE, true, | 1072 | AMDGPU_GPU_PAGE_SIZE, true, |
| 1083 | AMDGPU_GEM_DOMAIN_VRAM, | 1073 | AMDGPU_GEM_DOMAIN_VRAM, |
| 1084 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, | 1074 | AMDGPU_GEM_CREATE_NO_CPU_ACCESS, |
| 1085 | NULL, resv, &pt); | 1075 | NULL, resv, &pt); |
| 1086 | ww_mutex_unlock(&resv->lock); | ||
| 1087 | if (r) | 1076 | if (r) |
| 1088 | goto error_free; | 1077 | goto error_free; |
| 1089 | 1078 | ||
| @@ -1101,7 +1090,9 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
| 1101 | 1090 | ||
| 1102 | error_free: | 1091 | error_free: |
| 1103 | list_del(&mapping->list); | 1092 | list_del(&mapping->list); |
| 1093 | spin_lock(&vm->it_lock); | ||
| 1104 | interval_tree_remove(&mapping->it, &vm->va); | 1094 | interval_tree_remove(&mapping->it, &vm->va); |
| 1095 | spin_unlock(&vm->it_lock); | ||
| 1105 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1096 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1106 | kfree(mapping); | 1097 | kfree(mapping); |
| 1107 | 1098 | ||
| @@ -1119,7 +1110,7 @@ error: | |||
| 1119 | * Remove a mapping of the BO at the specefied addr from the VM. | 1110 | * Remove a mapping of the BO at the specefied addr from the VM. |
| 1120 | * Returns 0 for success, error for failure. | 1111 | * Returns 0 for success, error for failure. |
| 1121 | * | 1112 | * |
| 1122 | * Object has to be reserved and gets unreserved by this function! | 1113 | * Object has to be reserved and unreserved outside! |
| 1123 | */ | 1114 | */ |
| 1124 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | 1115 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
| 1125 | struct amdgpu_bo_va *bo_va, | 1116 | struct amdgpu_bo_va *bo_va, |
| @@ -1144,21 +1135,20 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
| 1144 | break; | 1135 | break; |
| 1145 | } | 1136 | } |
| 1146 | 1137 | ||
| 1147 | if (&mapping->list == &bo_va->invalids) { | 1138 | if (&mapping->list == &bo_va->invalids) |
| 1148 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1149 | return -ENOENT; | 1139 | return -ENOENT; |
| 1150 | } | ||
| 1151 | } | 1140 | } |
| 1152 | 1141 | ||
| 1153 | list_del(&mapping->list); | 1142 | list_del(&mapping->list); |
| 1143 | spin_lock(&vm->it_lock); | ||
| 1154 | interval_tree_remove(&mapping->it, &vm->va); | 1144 | interval_tree_remove(&mapping->it, &vm->va); |
| 1145 | spin_unlock(&vm->it_lock); | ||
| 1155 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1146 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1156 | 1147 | ||
| 1157 | if (valid) | 1148 | if (valid) |
| 1158 | list_add(&mapping->list, &vm->freed); | 1149 | list_add(&mapping->list, &vm->freed); |
| 1159 | else | 1150 | else |
| 1160 | kfree(mapping); | 1151 | kfree(mapping); |
| 1161 | amdgpu_bo_unreserve(bo_va->bo); | ||
| 1162 | 1152 | ||
| 1163 | return 0; | 1153 | return 0; |
| 1164 | } | 1154 | } |
| @@ -1187,13 +1177,17 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
| 1187 | 1177 | ||
| 1188 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { | 1178 | list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { |
| 1189 | list_del(&mapping->list); | 1179 | list_del(&mapping->list); |
| 1180 | spin_lock(&vm->it_lock); | ||
| 1190 | interval_tree_remove(&mapping->it, &vm->va); | 1181 | interval_tree_remove(&mapping->it, &vm->va); |
| 1182 | spin_unlock(&vm->it_lock); | ||
| 1191 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1183 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
| 1192 | list_add(&mapping->list, &vm->freed); | 1184 | list_add(&mapping->list, &vm->freed); |
| 1193 | } | 1185 | } |
| 1194 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | 1186 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { |
| 1195 | list_del(&mapping->list); | 1187 | list_del(&mapping->list); |
| 1188 | spin_lock(&vm->it_lock); | ||
| 1196 | interval_tree_remove(&mapping->it, &vm->va); | 1189 | interval_tree_remove(&mapping->it, &vm->va); |
| 1190 | spin_unlock(&vm->it_lock); | ||
| 1197 | kfree(mapping); | 1191 | kfree(mapping); |
| 1198 | } | 1192 | } |
| 1199 | 1193 | ||
| @@ -1241,7 +1235,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1241 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1235 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 1242 | vm->ids[i].id = 0; | 1236 | vm->ids[i].id = 0; |
| 1243 | vm->ids[i].flushed_updates = NULL; | 1237 | vm->ids[i].flushed_updates = NULL; |
| 1244 | vm->ids[i].last_id_use = NULL; | ||
| 1245 | } | 1238 | } |
| 1246 | mutex_init(&vm->mutex); | 1239 | mutex_init(&vm->mutex); |
| 1247 | vm->va = RB_ROOT; | 1240 | vm->va = RB_ROOT; |
| @@ -1249,7 +1242,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1249 | INIT_LIST_HEAD(&vm->invalidated); | 1242 | INIT_LIST_HEAD(&vm->invalidated); |
| 1250 | INIT_LIST_HEAD(&vm->cleared); | 1243 | INIT_LIST_HEAD(&vm->cleared); |
| 1251 | INIT_LIST_HEAD(&vm->freed); | 1244 | INIT_LIST_HEAD(&vm->freed); |
| 1252 | 1245 | spin_lock_init(&vm->it_lock); | |
| 1253 | pd_size = amdgpu_vm_directory_size(adev); | 1246 | pd_size = amdgpu_vm_directory_size(adev); |
| 1254 | pd_entries = amdgpu_vm_num_pdes(adev); | 1247 | pd_entries = amdgpu_vm_num_pdes(adev); |
| 1255 | 1248 | ||
| @@ -1269,8 +1262,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1269 | NULL, NULL, &vm->page_directory); | 1262 | NULL, NULL, &vm->page_directory); |
| 1270 | if (r) | 1263 | if (r) |
| 1271 | return r; | 1264 | return r; |
| 1272 | 1265 | r = amdgpu_bo_reserve(vm->page_directory, false); | |
| 1266 | if (r) { | ||
| 1267 | amdgpu_bo_unref(&vm->page_directory); | ||
| 1268 | vm->page_directory = NULL; | ||
| 1269 | return r; | ||
| 1270 | } | ||
| 1273 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); | 1271 | r = amdgpu_vm_clear_bo(adev, vm->page_directory); |
| 1272 | amdgpu_bo_unreserve(vm->page_directory); | ||
| 1274 | if (r) { | 1273 | if (r) { |
| 1275 | amdgpu_bo_unref(&vm->page_directory); | 1274 | amdgpu_bo_unref(&vm->page_directory); |
| 1276 | vm->page_directory = NULL; | 1275 | vm->page_directory = NULL; |
| @@ -1313,11 +1312,28 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1313 | 1312 | ||
| 1314 | amdgpu_bo_unref(&vm->page_directory); | 1313 | amdgpu_bo_unref(&vm->page_directory); |
| 1315 | fence_put(vm->page_directory_fence); | 1314 | fence_put(vm->page_directory_fence); |
| 1316 | |||
| 1317 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1315 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| 1316 | unsigned id = vm->ids[i].id; | ||
| 1317 | |||
| 1318 | atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner, | ||
| 1319 | (long)vm, 0); | ||
| 1318 | fence_put(vm->ids[i].flushed_updates); | 1320 | fence_put(vm->ids[i].flushed_updates); |
| 1319 | fence_put(vm->ids[i].last_id_use); | ||
| 1320 | } | 1321 | } |
| 1321 | 1322 | ||
| 1322 | mutex_destroy(&vm->mutex); | 1323 | mutex_destroy(&vm->mutex); |
| 1323 | } | 1324 | } |
| 1325 | |||
| 1326 | /** | ||
| 1327 | * amdgpu_vm_manager_fini - cleanup VM manager | ||
| 1328 | * | ||
| 1329 | * @adev: amdgpu_device pointer | ||
| 1330 | * | ||
| 1331 | * Cleanup the VM manager and free resources. | ||
| 1332 | */ | ||
| 1333 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev) | ||
| 1334 | { | ||
| 1335 | unsigned i; | ||
| 1336 | |||
| 1337 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
| 1338 | fence_put(adev->vm_manager.ids[i].active); | ||
| 1339 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index a1a35a5df8e7..57a2e347f04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6569,12 +6569,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, | |||
| 6569 | switch (state) { | 6569 | switch (state) { |
| 6570 | case AMDGPU_IRQ_STATE_DISABLE: | 6570 | case AMDGPU_IRQ_STATE_DISABLE: |
| 6571 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6571 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6572 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; | 6572 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
| 6573 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6573 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6574 | break; | 6574 | break; |
| 6575 | case AMDGPU_IRQ_STATE_ENABLE: | 6575 | case AMDGPU_IRQ_STATE_ENABLE: |
| 6576 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6576 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6577 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; | 6577 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; |
| 6578 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6578 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6579 | break; | 6579 | break; |
| 6580 | default: | 6580 | default: |
| @@ -6586,12 +6586,12 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev, | |||
| 6586 | switch (state) { | 6586 | switch (state) { |
| 6587 | case AMDGPU_IRQ_STATE_DISABLE: | 6587 | case AMDGPU_IRQ_STATE_DISABLE: |
| 6588 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6588 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6589 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; | 6589 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
| 6590 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6590 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6591 | break; | 6591 | break; |
| 6592 | case AMDGPU_IRQ_STATE_ENABLE: | 6592 | case AMDGPU_IRQ_STATE_ENABLE: |
| 6593 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); | 6593 | cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT); |
| 6594 | cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; | 6594 | cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; |
| 6595 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); | 6595 | WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int); |
| 6596 | break; | 6596 | break; |
| 6597 | default: | 6597 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6776cf756d40..e1dcab98e249 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -268,7 +268,6 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
| 268 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, | 268 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, |
| 269 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, | 269 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, |
| 270 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, | 270 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, |
| 271 | mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, | ||
| 272 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, | 271 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, |
| 273 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, | 272 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, |
| 274 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, | 273 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, |
| @@ -296,10 +295,6 @@ static const u32 fiji_mgcg_cgcg_init[] = | |||
| 296 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, | 295 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, |
| 297 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, | 296 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, |
| 298 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, | 297 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, |
| 299 | mmPCIE_INDEX, 0xffffffff, 0x0140001c, | ||
| 300 | mmPCIE_DATA, 0x000f0000, 0x00000000, | ||
| 301 | mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, | ||
| 302 | mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, | ||
| 303 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, | 298 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, |
| 304 | }; | 299 | }; |
| 305 | 300 | ||
| @@ -1000,7 +995,7 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) | |||
| 1000 | adev->gfx.config.max_cu_per_sh = 16; | 995 | adev->gfx.config.max_cu_per_sh = 16; |
| 1001 | adev->gfx.config.max_sh_per_se = 1; | 996 | adev->gfx.config.max_sh_per_se = 1; |
| 1002 | adev->gfx.config.max_backends_per_se = 4; | 997 | adev->gfx.config.max_backends_per_se = 4; |
| 1003 | adev->gfx.config.max_texture_channel_caches = 8; | 998 | adev->gfx.config.max_texture_channel_caches = 16; |
| 1004 | adev->gfx.config.max_gprs = 256; | 999 | adev->gfx.config.max_gprs = 256; |
| 1005 | adev->gfx.config.max_gs_threads = 32; | 1000 | adev->gfx.config.max_gs_threads = 32; |
| 1006 | adev->gfx.config.max_hw_contexts = 8; | 1001 | adev->gfx.config.max_hw_contexts = 8; |
| @@ -1613,6 +1608,296 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) | |||
| 1613 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | 1608 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); |
| 1614 | } | 1609 | } |
| 1615 | case CHIP_FIJI: | 1610 | case CHIP_FIJI: |
| 1611 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | ||
| 1612 | switch (reg_offset) { | ||
| 1613 | case 0: | ||
| 1614 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1615 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1616 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
| 1617 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1618 | break; | ||
| 1619 | case 1: | ||
| 1620 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1621 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1622 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
| 1623 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1624 | break; | ||
| 1625 | case 2: | ||
| 1626 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1627 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1628 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
| 1629 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1630 | break; | ||
| 1631 | case 3: | ||
| 1632 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1633 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1634 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
| 1635 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1636 | break; | ||
| 1637 | case 4: | ||
| 1638 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1639 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1640 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1641 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1642 | break; | ||
| 1643 | case 5: | ||
| 1644 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1645 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1646 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1647 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1648 | break; | ||
| 1649 | case 6: | ||
| 1650 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1651 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1652 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1653 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1654 | break; | ||
| 1655 | case 7: | ||
| 1656 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1657 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1658 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
| 1659 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
| 1660 | break; | ||
| 1661 | case 8: | ||
| 1662 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
| 1663 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); | ||
| 1664 | break; | ||
| 1665 | case 9: | ||
| 1666 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1667 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1668 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1669 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1670 | break; | ||
| 1671 | case 10: | ||
| 1672 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1673 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1674 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1675 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1676 | break; | ||
| 1677 | case 11: | ||
| 1678 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1679 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1680 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1681 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1682 | break; | ||
| 1683 | case 12: | ||
| 1684 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1685 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1686 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
| 1687 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1688 | break; | ||
| 1689 | case 13: | ||
| 1690 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1691 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1692 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1693 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1694 | break; | ||
| 1695 | case 14: | ||
| 1696 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1697 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1698 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1699 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1700 | break; | ||
| 1701 | case 15: | ||
| 1702 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | | ||
| 1703 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1704 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1705 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1706 | break; | ||
| 1707 | case 16: | ||
| 1708 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1709 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1710 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1711 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1712 | break; | ||
| 1713 | case 17: | ||
| 1714 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1715 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1716 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1717 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1718 | break; | ||
| 1719 | case 18: | ||
| 1720 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
| 1721 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1722 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1723 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1724 | break; | ||
| 1725 | case 19: | ||
| 1726 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
| 1727 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1728 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1729 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1730 | break; | ||
| 1731 | case 20: | ||
| 1732 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
| 1733 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1734 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1735 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1736 | break; | ||
| 1737 | case 21: | ||
| 1738 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | | ||
| 1739 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1740 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1741 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1742 | break; | ||
| 1743 | case 22: | ||
| 1744 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
| 1745 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1746 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1747 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1748 | break; | ||
| 1749 | case 23: | ||
| 1750 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
| 1751 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1752 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1753 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1754 | break; | ||
| 1755 | case 24: | ||
| 1756 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
| 1757 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1758 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
| 1759 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1760 | break; | ||
| 1761 | case 25: | ||
| 1762 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | | ||
| 1763 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1764 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1765 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1766 | break; | ||
| 1767 | case 26: | ||
| 1768 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | | ||
| 1769 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1770 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
| 1771 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
| 1772 | break; | ||
| 1773 | case 27: | ||
| 1774 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
| 1775 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1776 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1777 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1778 | break; | ||
| 1779 | case 28: | ||
| 1780 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
| 1781 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1782 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1783 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
| 1784 | break; | ||
| 1785 | case 29: | ||
| 1786 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1787 | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | | ||
| 1788 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1789 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1790 | break; | ||
| 1791 | case 30: | ||
| 1792 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
| 1793 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
| 1794 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
| 1795 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
| 1796 | break; | ||
| 1797 | default: | ||
| 1798 | gb_tile_moden = 0; | ||
| 1799 | break; | ||
| 1800 | } | ||
| 1801 | adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; | ||
| 1802 | WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); | ||
| 1803 | } | ||
| 1804 | for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { | ||
| 1805 | switch (reg_offset) { | ||
| 1806 | case 0: | ||
| 1807 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1808 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1809 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1810 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1811 | break; | ||
| 1812 | case 1: | ||
| 1813 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1814 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1815 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1816 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1817 | break; | ||
| 1818 | case 2: | ||
| 1819 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1820 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1821 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1822 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1823 | break; | ||
| 1824 | case 3: | ||
| 1825 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1826 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1827 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1828 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1829 | break; | ||
| 1830 | case 4: | ||
| 1831 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1832 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1833 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1834 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1835 | break; | ||
| 1836 | case 5: | ||
| 1837 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1838 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1839 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1840 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1841 | break; | ||
| 1842 | case 6: | ||
| 1843 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1844 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1845 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1846 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1847 | break; | ||
| 1848 | case 8: | ||
| 1849 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1850 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | | ||
| 1851 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1852 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1853 | break; | ||
| 1854 | case 9: | ||
| 1855 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1856 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
| 1857 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1858 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1859 | break; | ||
| 1860 | case 10: | ||
| 1861 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1862 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1863 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1864 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1865 | break; | ||
| 1866 | case 11: | ||
| 1867 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1868 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1869 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1870 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1871 | break; | ||
| 1872 | case 12: | ||
| 1873 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1874 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
| 1875 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1876 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1877 | break; | ||
| 1878 | case 13: | ||
| 1879 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1880 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1881 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
| 1882 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
| 1883 | break; | ||
| 1884 | case 14: | ||
| 1885 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
| 1886 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
| 1887 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
| 1888 | NUM_BANKS(ADDR_SURF_4_BANK)); | ||
| 1889 | break; | ||
| 1890 | case 7: | ||
| 1891 | /* unused idx */ | ||
| 1892 | continue; | ||
| 1893 | default: | ||
| 1894 | gb_tile_moden = 0; | ||
| 1895 | break; | ||
| 1896 | } | ||
| 1897 | adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; | ||
| 1898 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | ||
| 1899 | } | ||
| 1900 | break; | ||
| 1616 | case CHIP_TONGA: | 1901 | case CHIP_TONGA: |
| 1617 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | 1902 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
| 1618 | switch (reg_offset) { | 1903 | switch (reg_offset) { |
| @@ -2971,10 +3256,13 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 2971 | amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); | 3256 | amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); |
| 2972 | switch (adev->asic_type) { | 3257 | switch (adev->asic_type) { |
| 2973 | case CHIP_TONGA: | 3258 | case CHIP_TONGA: |
| 2974 | case CHIP_FIJI: | ||
| 2975 | amdgpu_ring_write(ring, 0x16000012); | 3259 | amdgpu_ring_write(ring, 0x16000012); |
| 2976 | amdgpu_ring_write(ring, 0x0000002A); | 3260 | amdgpu_ring_write(ring, 0x0000002A); |
| 2977 | break; | 3261 | break; |
| 3262 | case CHIP_FIJI: | ||
| 3263 | amdgpu_ring_write(ring, 0x3a00161a); | ||
| 3264 | amdgpu_ring_write(ring, 0x0000002e); | ||
| 3265 | break; | ||
| 2978 | case CHIP_TOPAZ: | 3266 | case CHIP_TOPAZ: |
| 2979 | case CHIP_CARRIZO: | 3267 | case CHIP_CARRIZO: |
| 2980 | amdgpu_ring_write(ring, 0x00000002); | 3268 | amdgpu_ring_write(ring, 0x00000002); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 85bbcdc73fff..7427d8cd4c43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
| @@ -40,7 +40,7 @@ | |||
| 40 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); | 40 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); |
| 41 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); | 41 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); |
| 42 | 42 | ||
| 43 | MODULE_FIRMWARE("radeon/boniare_mc.bin"); | 43 | MODULE_FIRMWARE("radeon/bonaire_mc.bin"); |
| 44 | MODULE_FIRMWARE("radeon/hawaii_mc.bin"); | 44 | MODULE_FIRMWARE("radeon/hawaii_mc.bin"); |
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| @@ -501,6 +501,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | |||
| 501 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | 501 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); |
| 502 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | 502 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); |
| 503 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | 503 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
| 504 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); | ||
| 504 | WREG32(mmVM_L2_CNTL, tmp); | 505 | WREG32(mmVM_L2_CNTL, tmp); |
| 505 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | 506 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
| 506 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | 507 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); |
| @@ -960,12 +961,10 @@ static int gmc_v7_0_sw_init(void *handle) | |||
| 960 | 961 | ||
| 961 | static int gmc_v7_0_sw_fini(void *handle) | 962 | static int gmc_v7_0_sw_fini(void *handle) |
| 962 | { | 963 | { |
| 963 | int i; | ||
| 964 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 964 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 965 | 965 | ||
| 966 | if (adev->vm_manager.enabled) { | 966 | if (adev->vm_manager.enabled) { |
| 967 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 967 | amdgpu_vm_manager_fini(adev); |
| 968 | fence_put(adev->vm_manager.active[i]); | ||
| 969 | gmc_v7_0_vm_fini(adev); | 968 | gmc_v7_0_vm_fini(adev); |
| 970 | adev->vm_manager.enabled = false; | 969 | adev->vm_manager.enabled = false; |
| 971 | } | 970 | } |
| @@ -1010,12 +1009,10 @@ static int gmc_v7_0_hw_fini(void *handle) | |||
| 1010 | 1009 | ||
| 1011 | static int gmc_v7_0_suspend(void *handle) | 1010 | static int gmc_v7_0_suspend(void *handle) |
| 1012 | { | 1011 | { |
| 1013 | int i; | ||
| 1014 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1012 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1015 | 1013 | ||
| 1016 | if (adev->vm_manager.enabled) { | 1014 | if (adev->vm_manager.enabled) { |
| 1017 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 1015 | amdgpu_vm_manager_fini(adev); |
| 1018 | fence_put(adev->vm_manager.active[i]); | ||
| 1019 | gmc_v7_0_vm_fini(adev); | 1016 | gmc_v7_0_vm_fini(adev); |
| 1020 | adev->vm_manager.enabled = false; | 1017 | adev->vm_manager.enabled = false; |
| 1021 | } | 1018 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1bcc4e74e3b4..cb0e50ebb528 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
| @@ -629,6 +629,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |||
| 629 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | 629 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); |
| 630 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | 630 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); |
| 631 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | 631 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
| 632 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); | ||
| 632 | WREG32(mmVM_L2_CNTL, tmp); | 633 | WREG32(mmVM_L2_CNTL, tmp); |
| 633 | tmp = RREG32(mmVM_L2_CNTL2); | 634 | tmp = RREG32(mmVM_L2_CNTL2); |
| 634 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | 635 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
| @@ -979,12 +980,10 @@ static int gmc_v8_0_sw_init(void *handle) | |||
| 979 | 980 | ||
| 980 | static int gmc_v8_0_sw_fini(void *handle) | 981 | static int gmc_v8_0_sw_fini(void *handle) |
| 981 | { | 982 | { |
| 982 | int i; | ||
| 983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 983 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 984 | 984 | ||
| 985 | if (adev->vm_manager.enabled) { | 985 | if (adev->vm_manager.enabled) { |
| 986 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 986 | amdgpu_vm_manager_fini(adev); |
| 987 | fence_put(adev->vm_manager.active[i]); | ||
| 988 | gmc_v8_0_vm_fini(adev); | 987 | gmc_v8_0_vm_fini(adev); |
| 989 | adev->vm_manager.enabled = false; | 988 | adev->vm_manager.enabled = false; |
| 990 | } | 989 | } |
| @@ -1031,12 +1030,10 @@ static int gmc_v8_0_hw_fini(void *handle) | |||
| 1031 | 1030 | ||
| 1032 | static int gmc_v8_0_suspend(void *handle) | 1031 | static int gmc_v8_0_suspend(void *handle) |
| 1033 | { | 1032 | { |
| 1034 | int i; | ||
| 1035 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1033 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1036 | 1034 | ||
| 1037 | if (adev->vm_manager.enabled) { | 1035 | if (adev->vm_manager.enabled) { |
| 1038 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | 1036 | amdgpu_vm_manager_fini(adev); |
| 1039 | fence_put(adev->vm_manager.active[i]); | ||
| 1040 | gmc_v8_0_vm_fini(adev); | 1037 | gmc_v8_0_vm_fini(adev); |
| 1041 | adev->vm_manager.enabled = false; | 1038 | adev->vm_manager.enabled = false; |
| 1042 | } | 1039 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index 144f50acc971..c89dc777768f 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h | |||
| @@ -16,6 +16,8 @@ TRACE_EVENT(amd_sched_job, | |||
| 16 | TP_ARGS(sched_job), | 16 | TP_ARGS(sched_job), |
| 17 | TP_STRUCT__entry( | 17 | TP_STRUCT__entry( |
| 18 | __field(struct amd_sched_entity *, entity) | 18 | __field(struct amd_sched_entity *, entity) |
| 19 | __field(struct amd_sched_job *, sched_job) | ||
| 20 | __field(struct fence *, fence) | ||
| 19 | __field(const char *, name) | 21 | __field(const char *, name) |
| 20 | __field(u32, job_count) | 22 | __field(u32, job_count) |
| 21 | __field(int, hw_job_count) | 23 | __field(int, hw_job_count) |
| @@ -23,16 +25,32 @@ TRACE_EVENT(amd_sched_job, | |||
| 23 | 25 | ||
| 24 | TP_fast_assign( | 26 | TP_fast_assign( |
| 25 | __entry->entity = sched_job->s_entity; | 27 | __entry->entity = sched_job->s_entity; |
| 28 | __entry->sched_job = sched_job; | ||
| 29 | __entry->fence = &sched_job->s_fence->base; | ||
| 26 | __entry->name = sched_job->sched->name; | 30 | __entry->name = sched_job->sched->name; |
| 27 | __entry->job_count = kfifo_len( | 31 | __entry->job_count = kfifo_len( |
| 28 | &sched_job->s_entity->job_queue) / sizeof(sched_job); | 32 | &sched_job->s_entity->job_queue) / sizeof(sched_job); |
| 29 | __entry->hw_job_count = atomic_read( | 33 | __entry->hw_job_count = atomic_read( |
| 30 | &sched_job->sched->hw_rq_count); | 34 | &sched_job->sched->hw_rq_count); |
| 31 | ), | 35 | ), |
| 32 | TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d", | 36 | TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d", |
| 33 | __entry->entity, __entry->name, __entry->job_count, | 37 | __entry->entity, __entry->sched_job, __entry->fence, __entry->name, |
| 34 | __entry->hw_job_count) | 38 | __entry->job_count, __entry->hw_job_count) |
| 35 | ); | 39 | ); |
| 40 | |||
| 41 | TRACE_EVENT(amd_sched_process_job, | ||
| 42 | TP_PROTO(struct amd_sched_fence *fence), | ||
| 43 | TP_ARGS(fence), | ||
| 44 | TP_STRUCT__entry( | ||
| 45 | __field(struct fence *, fence) | ||
| 46 | ), | ||
| 47 | |||
| 48 | TP_fast_assign( | ||
| 49 | __entry->fence = &fence->base; | ||
| 50 | ), | ||
| 51 | TP_printk("fence=%p signaled", __entry->fence) | ||
| 52 | ); | ||
| 53 | |||
| 36 | #endif | 54 | #endif |
| 37 | 55 | ||
| 38 | /* This part must be outside protection */ | 56 | /* This part must be outside protection */ |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 89619a5a4289..ea30d6ad4c13 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
| @@ -34,6 +34,9 @@ static struct amd_sched_job * | |||
| 34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | 34 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); |
| 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); | 35 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
| 36 | 36 | ||
| 37 | struct kmem_cache *sched_fence_slab; | ||
| 38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | ||
| 39 | |||
| 37 | /* Initialize a given run queue struct */ | 40 | /* Initialize a given run queue struct */ |
| 38 | static void amd_sched_rq_init(struct amd_sched_rq *rq) | 41 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
| 39 | { | 42 | { |
| @@ -273,22 +276,13 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | |||
| 273 | * | 276 | * |
| 274 | * Returns 0 for success, negative error code otherwise. | 277 | * Returns 0 for success, negative error code otherwise. |
| 275 | */ | 278 | */ |
| 276 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job) | 279 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
| 277 | { | 280 | { |
| 278 | struct amd_sched_entity *entity = sched_job->s_entity; | 281 | struct amd_sched_entity *entity = sched_job->s_entity; |
| 279 | struct amd_sched_fence *fence = amd_sched_fence_create( | ||
| 280 | entity, sched_job->owner); | ||
| 281 | |||
| 282 | if (!fence) | ||
| 283 | return -ENOMEM; | ||
| 284 | |||
| 285 | fence_get(&fence->base); | ||
| 286 | sched_job->s_fence = fence; | ||
| 287 | 282 | ||
| 288 | wait_event(entity->sched->job_scheduled, | 283 | wait_event(entity->sched->job_scheduled, |
| 289 | amd_sched_entity_in(sched_job)); | 284 | amd_sched_entity_in(sched_job)); |
| 290 | trace_amd_sched_job(sched_job); | 285 | trace_amd_sched_job(sched_job); |
| 291 | return 0; | ||
| 292 | } | 286 | } |
| 293 | 287 | ||
| 294 | /** | 288 | /** |
| @@ -343,6 +337,7 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) | |||
| 343 | list_del_init(&s_fence->list); | 337 | list_del_init(&s_fence->list); |
| 344 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); | 338 | spin_unlock_irqrestore(&sched->fence_list_lock, flags); |
| 345 | } | 339 | } |
| 340 | trace_amd_sched_process_job(s_fence); | ||
| 346 | fence_put(&s_fence->base); | 341 | fence_put(&s_fence->base); |
| 347 | wake_up_interruptible(&sched->wake_up_worker); | 342 | wake_up_interruptible(&sched->wake_up_worker); |
| 348 | } | 343 | } |
| @@ -450,6 +445,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, | |||
| 450 | init_waitqueue_head(&sched->wake_up_worker); | 445 | init_waitqueue_head(&sched->wake_up_worker); |
| 451 | init_waitqueue_head(&sched->job_scheduled); | 446 | init_waitqueue_head(&sched->job_scheduled); |
| 452 | atomic_set(&sched->hw_rq_count, 0); | 447 | atomic_set(&sched->hw_rq_count, 0); |
| 448 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { | ||
| 449 | sched_fence_slab = kmem_cache_create( | ||
| 450 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | ||
| 451 | SLAB_HWCACHE_ALIGN, NULL); | ||
| 452 | if (!sched_fence_slab) | ||
| 453 | return -ENOMEM; | ||
| 454 | } | ||
| 453 | 455 | ||
| 454 | /* Each scheduler will run on a seperate kernel thread */ | 456 | /* Each scheduler will run on a seperate kernel thread */ |
| 455 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); | 457 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
| @@ -470,4 +472,6 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) | |||
| 470 | { | 472 | { |
| 471 | if (sched->thread) | 473 | if (sched->thread) |
| 472 | kthread_stop(sched->thread); | 474 | kthread_stop(sched->thread); |
| 475 | if (atomic_dec_and_test(&sched_fence_slab_ref)) | ||
| 476 | kmem_cache_destroy(sched_fence_slab); | ||
| 473 | } | 477 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 929e9aced041..939692b14f4b 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
| @@ -30,6 +30,9 @@ | |||
| 30 | struct amd_gpu_scheduler; | 30 | struct amd_gpu_scheduler; |
| 31 | struct amd_sched_rq; | 31 | struct amd_sched_rq; |
| 32 | 32 | ||
| 33 | extern struct kmem_cache *sched_fence_slab; | ||
| 34 | extern atomic_t sched_fence_slab_ref; | ||
| 35 | |||
| 33 | /** | 36 | /** |
| 34 | * A scheduler entity is a wrapper around a job queue or a group | 37 | * A scheduler entity is a wrapper around a job queue or a group |
| 35 | * of other entities. Entities take turns emitting jobs from their | 38 | * of other entities. Entities take turns emitting jobs from their |
| @@ -76,7 +79,6 @@ struct amd_sched_job { | |||
| 76 | struct amd_gpu_scheduler *sched; | 79 | struct amd_gpu_scheduler *sched; |
| 77 | struct amd_sched_entity *s_entity; | 80 | struct amd_sched_entity *s_entity; |
| 78 | struct amd_sched_fence *s_fence; | 81 | struct amd_sched_fence *s_fence; |
| 79 | void *owner; | ||
| 80 | }; | 82 | }; |
| 81 | 83 | ||
| 82 | extern const struct fence_ops amd_sched_fence_ops; | 84 | extern const struct fence_ops amd_sched_fence_ops; |
| @@ -128,7 +130,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, | |||
| 128 | uint32_t jobs); | 130 | uint32_t jobs); |
| 129 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | 131 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
| 130 | struct amd_sched_entity *entity); | 132 | struct amd_sched_entity *entity); |
| 131 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job); | 133 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job); |
| 132 | 134 | ||
| 133 | struct amd_sched_fence *amd_sched_fence_create( | 135 | struct amd_sched_fence *amd_sched_fence_create( |
| 134 | struct amd_sched_entity *s_entity, void *owner); | 136 | struct amd_sched_entity *s_entity, void *owner); |
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index d802638094f4..8d2130b9ff05 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c | |||
| @@ -32,7 +32,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity | |||
| 32 | struct amd_sched_fence *fence = NULL; | 32 | struct amd_sched_fence *fence = NULL; |
| 33 | unsigned seq; | 33 | unsigned seq; |
| 34 | 34 | ||
| 35 | fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL); | 35 | fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL); |
| 36 | if (fence == NULL) | 36 | if (fence == NULL) |
| 37 | return NULL; | 37 | return NULL; |
| 38 | fence->owner = owner; | 38 | fence->owner = owner; |
| @@ -71,11 +71,17 @@ static bool amd_sched_fence_enable_signaling(struct fence *f) | |||
| 71 | return true; | 71 | return true; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static void amd_sched_fence_release(struct fence *f) | ||
| 75 | { | ||
| 76 | struct amd_sched_fence *fence = to_amd_sched_fence(f); | ||
| 77 | kmem_cache_free(sched_fence_slab, fence); | ||
| 78 | } | ||
| 79 | |||
| 74 | const struct fence_ops amd_sched_fence_ops = { | 80 | const struct fence_ops amd_sched_fence_ops = { |
| 75 | .get_driver_name = amd_sched_fence_get_driver_name, | 81 | .get_driver_name = amd_sched_fence_get_driver_name, |
| 76 | .get_timeline_name = amd_sched_fence_get_timeline_name, | 82 | .get_timeline_name = amd_sched_fence_get_timeline_name, |
| 77 | .enable_signaling = amd_sched_fence_enable_signaling, | 83 | .enable_signaling = amd_sched_fence_enable_signaling, |
| 78 | .signaled = NULL, | 84 | .signaled = NULL, |
| 79 | .wait = fence_default_wait, | 85 | .wait = fence_default_wait, |
| 80 | .release = NULL, | 86 | .release = amd_sched_fence_release, |
| 81 | }; | 87 | }; |
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 7bb3845d9974..aeee083c7f95 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -1432,6 +1432,45 @@ static int atomic_set_prop(struct drm_atomic_state *state, | |||
| 1432 | return ret; | 1432 | return ret; |
| 1433 | } | 1433 | } |
| 1434 | 1434 | ||
| 1435 | /** | ||
| 1436 | * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers. | ||
| 1437 | * | ||
| 1438 | * @dev: drm device to check. | ||
| 1439 | * @plane_mask: plane mask for planes that were updated. | ||
| 1440 | * @ret: return value, can be -EDEADLK for a retry. | ||
| 1441 | * | ||
| 1442 | * Before doing an update plane->old_fb is set to plane->fb, | ||
| 1443 | * but before dropping the locks old_fb needs to be set to NULL | ||
| 1444 | * and plane->fb updated. This is a common operation for each | ||
| 1445 | * atomic update, so this call is split off as a helper. | ||
| 1446 | */ | ||
| 1447 | void drm_atomic_clean_old_fb(struct drm_device *dev, | ||
| 1448 | unsigned plane_mask, | ||
| 1449 | int ret) | ||
| 1450 | { | ||
| 1451 | struct drm_plane *plane; | ||
| 1452 | |||
| 1453 | /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping | ||
| 1454 | * locks (ie. while it is still safe to deref plane->state). We | ||
| 1455 | * need to do this here because the driver entry points cannot | ||
| 1456 | * distinguish between legacy and atomic ioctls. | ||
| 1457 | */ | ||
| 1458 | drm_for_each_plane_mask(plane, dev, plane_mask) { | ||
| 1459 | if (ret == 0) { | ||
| 1460 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1461 | if (new_fb) | ||
| 1462 | drm_framebuffer_reference(new_fb); | ||
| 1463 | plane->fb = new_fb; | ||
| 1464 | plane->crtc = plane->state->crtc; | ||
| 1465 | |||
| 1466 | if (plane->old_fb) | ||
| 1467 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1468 | } | ||
| 1469 | plane->old_fb = NULL; | ||
| 1470 | } | ||
| 1471 | } | ||
| 1472 | EXPORT_SYMBOL(drm_atomic_clean_old_fb); | ||
| 1473 | |||
| 1435 | int drm_mode_atomic_ioctl(struct drm_device *dev, | 1474 | int drm_mode_atomic_ioctl(struct drm_device *dev, |
| 1436 | void *data, struct drm_file *file_priv) | 1475 | void *data, struct drm_file *file_priv) |
| 1437 | { | 1476 | { |
| @@ -1446,7 +1485,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 1446 | struct drm_plane *plane; | 1485 | struct drm_plane *plane; |
| 1447 | struct drm_crtc *crtc; | 1486 | struct drm_crtc *crtc; |
| 1448 | struct drm_crtc_state *crtc_state; | 1487 | struct drm_crtc_state *crtc_state; |
| 1449 | unsigned plane_mask = 0; | 1488 | unsigned plane_mask; |
| 1450 | int ret = 0; | 1489 | int ret = 0; |
| 1451 | unsigned int i, j; | 1490 | unsigned int i, j; |
| 1452 | 1491 | ||
| @@ -1486,6 +1525,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, | |||
| 1486 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); | 1525 | state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); |
| 1487 | 1526 | ||
| 1488 | retry: | 1527 | retry: |
| 1528 | plane_mask = 0; | ||
| 1489 | copied_objs = 0; | 1529 | copied_objs = 0; |
| 1490 | copied_props = 0; | 1530 | copied_props = 0; |
| 1491 | 1531 | ||
| @@ -1576,24 +1616,7 @@ retry: | |||
| 1576 | } | 1616 | } |
| 1577 | 1617 | ||
| 1578 | out: | 1618 | out: |
| 1579 | /* if succeeded, fixup legacy plane crtc/fb ptrs before dropping | 1619 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 1580 | * locks (ie. while it is still safe to deref plane->state). We | ||
| 1581 | * need to do this here because the driver entry points cannot | ||
| 1582 | * distinguish between legacy and atomic ioctls. | ||
| 1583 | */ | ||
| 1584 | drm_for_each_plane_mask(plane, dev, plane_mask) { | ||
| 1585 | if (ret == 0) { | ||
| 1586 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1587 | if (new_fb) | ||
| 1588 | drm_framebuffer_reference(new_fb); | ||
| 1589 | plane->fb = new_fb; | ||
| 1590 | plane->crtc = plane->state->crtc; | ||
| 1591 | |||
| 1592 | if (plane->old_fb) | ||
| 1593 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1594 | } | ||
| 1595 | plane->old_fb = NULL; | ||
| 1596 | } | ||
| 1597 | 1620 | ||
| 1598 | if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { | 1621 | if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { |
| 1599 | /* | 1622 | /* |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 0c6f62168776..e5aec45bf985 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -210,6 +210,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) | |||
| 210 | return -EINVAL; | 210 | return -EINVAL; |
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) { | ||
| 214 | DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n", | ||
| 215 | new_encoder->base.id, | ||
| 216 | new_encoder->name, | ||
| 217 | connector_state->crtc->base.id); | ||
| 218 | return -EINVAL; | ||
| 219 | } | ||
| 220 | |||
| 213 | if (new_encoder == connector_state->best_encoder) { | 221 | if (new_encoder == connector_state->best_encoder) { |
| 214 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", | 222 | DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", |
| 215 | connector->base.id, | 223 | connector->base.id, |
| @@ -1553,6 +1561,9 @@ retry: | |||
| 1553 | goto fail; | 1561 | goto fail; |
| 1554 | } | 1562 | } |
| 1555 | 1563 | ||
| 1564 | if (plane_state->crtc && (plane == plane->crtc->cursor)) | ||
| 1565 | plane_state->state->legacy_cursor_update = true; | ||
| 1566 | |||
| 1556 | ret = __drm_atomic_helper_disable_plane(plane, plane_state); | 1567 | ret = __drm_atomic_helper_disable_plane(plane, plane_state); |
| 1557 | if (ret != 0) | 1568 | if (ret != 0) |
| 1558 | goto fail; | 1569 | goto fail; |
| @@ -1605,9 +1616,6 @@ int __drm_atomic_helper_disable_plane(struct drm_plane *plane, | |||
| 1605 | plane_state->src_h = 0; | 1616 | plane_state->src_h = 0; |
| 1606 | plane_state->src_w = 0; | 1617 | plane_state->src_w = 0; |
| 1607 | 1618 | ||
| 1608 | if (plane->crtc && (plane == plane->crtc->cursor)) | ||
| 1609 | plane_state->state->legacy_cursor_update = true; | ||
| 1610 | |||
| 1611 | return 0; | 1619 | return 0; |
| 1612 | } | 1620 | } |
| 1613 | 1621 | ||
| @@ -1741,6 +1749,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, | |||
| 1741 | struct drm_crtc_state *crtc_state; | 1749 | struct drm_crtc_state *crtc_state; |
| 1742 | struct drm_plane_state *primary_state; | 1750 | struct drm_plane_state *primary_state; |
| 1743 | struct drm_crtc *crtc = set->crtc; | 1751 | struct drm_crtc *crtc = set->crtc; |
| 1752 | int hdisplay, vdisplay; | ||
| 1744 | int ret; | 1753 | int ret; |
| 1745 | 1754 | ||
| 1746 | crtc_state = drm_atomic_get_crtc_state(state, crtc); | 1755 | crtc_state = drm_atomic_get_crtc_state(state, crtc); |
| @@ -1783,19 +1792,21 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, | |||
| 1783 | if (ret != 0) | 1792 | if (ret != 0) |
| 1784 | return ret; | 1793 | return ret; |
| 1785 | 1794 | ||
| 1795 | drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); | ||
| 1796 | |||
| 1786 | drm_atomic_set_fb_for_plane(primary_state, set->fb); | 1797 | drm_atomic_set_fb_for_plane(primary_state, set->fb); |
| 1787 | primary_state->crtc_x = 0; | 1798 | primary_state->crtc_x = 0; |
| 1788 | primary_state->crtc_y = 0; | 1799 | primary_state->crtc_y = 0; |
| 1789 | primary_state->crtc_h = set->mode->vdisplay; | 1800 | primary_state->crtc_h = vdisplay; |
| 1790 | primary_state->crtc_w = set->mode->hdisplay; | 1801 | primary_state->crtc_w = hdisplay; |
| 1791 | primary_state->src_x = set->x << 16; | 1802 | primary_state->src_x = set->x << 16; |
| 1792 | primary_state->src_y = set->y << 16; | 1803 | primary_state->src_y = set->y << 16; |
| 1793 | if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { | 1804 | if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) { |
| 1794 | primary_state->src_h = set->mode->hdisplay << 16; | 1805 | primary_state->src_h = hdisplay << 16; |
| 1795 | primary_state->src_w = set->mode->vdisplay << 16; | 1806 | primary_state->src_w = vdisplay << 16; |
| 1796 | } else { | 1807 | } else { |
| 1797 | primary_state->src_h = set->mode->vdisplay << 16; | 1808 | primary_state->src_h = vdisplay << 16; |
| 1798 | primary_state->src_w = set->mode->hdisplay << 16; | 1809 | primary_state->src_w = hdisplay << 16; |
| 1799 | } | 1810 | } |
| 1800 | 1811 | ||
| 1801 | commit: | 1812 | commit: |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e673c13c7391..69cbab5e5c81 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -342,6 +342,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) | |||
| 342 | struct drm_plane *plane; | 342 | struct drm_plane *plane; |
| 343 | struct drm_atomic_state *state; | 343 | struct drm_atomic_state *state; |
| 344 | int i, ret; | 344 | int i, ret; |
| 345 | unsigned plane_mask; | ||
| 345 | 346 | ||
| 346 | state = drm_atomic_state_alloc(dev); | 347 | state = drm_atomic_state_alloc(dev); |
| 347 | if (!state) | 348 | if (!state) |
| @@ -349,11 +350,10 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper) | |||
| 349 | 350 | ||
| 350 | state->acquire_ctx = dev->mode_config.acquire_ctx; | 351 | state->acquire_ctx = dev->mode_config.acquire_ctx; |
| 351 | retry: | 352 | retry: |
| 353 | plane_mask = 0; | ||
| 352 | drm_for_each_plane(plane, dev) { | 354 | drm_for_each_plane(plane, dev) { |
| 353 | struct drm_plane_state *plane_state; | 355 | struct drm_plane_state *plane_state; |
| 354 | 356 | ||
| 355 | plane->old_fb = plane->fb; | ||
| 356 | |||
| 357 | plane_state = drm_atomic_get_plane_state(state, plane); | 357 | plane_state = drm_atomic_get_plane_state(state, plane); |
| 358 | if (IS_ERR(plane_state)) { | 358 | if (IS_ERR(plane_state)) { |
| 359 | ret = PTR_ERR(plane_state); | 359 | ret = PTR_ERR(plane_state); |
| @@ -362,6 +362,9 @@ retry: | |||
| 362 | 362 | ||
| 363 | plane_state->rotation = BIT(DRM_ROTATE_0); | 363 | plane_state->rotation = BIT(DRM_ROTATE_0); |
| 364 | 364 | ||
| 365 | plane->old_fb = plane->fb; | ||
| 366 | plane_mask |= 1 << drm_plane_index(plane); | ||
| 367 | |||
| 365 | /* disable non-primary: */ | 368 | /* disable non-primary: */ |
| 366 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) | 369 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) |
| 367 | continue; | 370 | continue; |
| @@ -382,19 +385,7 @@ retry: | |||
| 382 | ret = drm_atomic_commit(state); | 385 | ret = drm_atomic_commit(state); |
| 383 | 386 | ||
| 384 | fail: | 387 | fail: |
| 385 | drm_for_each_plane(plane, dev) { | 388 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 386 | if (ret == 0) { | ||
| 387 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 388 | if (new_fb) | ||
| 389 | drm_framebuffer_reference(new_fb); | ||
| 390 | plane->fb = new_fb; | ||
| 391 | plane->crtc = plane->state->crtc; | ||
| 392 | |||
| 393 | if (plane->old_fb) | ||
| 394 | drm_framebuffer_unreference(plane->old_fb); | ||
| 395 | } | ||
| 396 | plane->old_fb = NULL; | ||
| 397 | } | ||
| 398 | 389 | ||
| 399 | if (ret == -EDEADLK) | 390 | if (ret == -EDEADLK) |
| 400 | goto backoff; | 391 | goto backoff; |
| @@ -1236,7 +1227,9 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, | |||
| 1236 | struct drm_fb_helper *fb_helper = info->par; | 1227 | struct drm_fb_helper *fb_helper = info->par; |
| 1237 | struct drm_device *dev = fb_helper->dev; | 1228 | struct drm_device *dev = fb_helper->dev; |
| 1238 | struct drm_atomic_state *state; | 1229 | struct drm_atomic_state *state; |
| 1230 | struct drm_plane *plane; | ||
| 1239 | int i, ret; | 1231 | int i, ret; |
| 1232 | unsigned plane_mask; | ||
| 1240 | 1233 | ||
| 1241 | state = drm_atomic_state_alloc(dev); | 1234 | state = drm_atomic_state_alloc(dev); |
| 1242 | if (!state) | 1235 | if (!state) |
| @@ -1244,19 +1237,22 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, | |||
| 1244 | 1237 | ||
| 1245 | state->acquire_ctx = dev->mode_config.acquire_ctx; | 1238 | state->acquire_ctx = dev->mode_config.acquire_ctx; |
| 1246 | retry: | 1239 | retry: |
| 1240 | plane_mask = 0; | ||
| 1247 | for(i = 0; i < fb_helper->crtc_count; i++) { | 1241 | for(i = 0; i < fb_helper->crtc_count; i++) { |
| 1248 | struct drm_mode_set *mode_set; | 1242 | struct drm_mode_set *mode_set; |
| 1249 | 1243 | ||
| 1250 | mode_set = &fb_helper->crtc_info[i].mode_set; | 1244 | mode_set = &fb_helper->crtc_info[i].mode_set; |
| 1251 | 1245 | ||
| 1252 | mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb; | ||
| 1253 | |||
| 1254 | mode_set->x = var->xoffset; | 1246 | mode_set->x = var->xoffset; |
| 1255 | mode_set->y = var->yoffset; | 1247 | mode_set->y = var->yoffset; |
| 1256 | 1248 | ||
| 1257 | ret = __drm_atomic_helper_set_config(mode_set, state); | 1249 | ret = __drm_atomic_helper_set_config(mode_set, state); |
| 1258 | if (ret != 0) | 1250 | if (ret != 0) |
| 1259 | goto fail; | 1251 | goto fail; |
| 1252 | |||
| 1253 | plane = mode_set->crtc->primary; | ||
| 1254 | plane_mask |= drm_plane_index(plane); | ||
| 1255 | plane->old_fb = plane->fb; | ||
| 1260 | } | 1256 | } |
| 1261 | 1257 | ||
| 1262 | ret = drm_atomic_commit(state); | 1258 | ret = drm_atomic_commit(state); |
| @@ -1268,26 +1264,7 @@ retry: | |||
| 1268 | 1264 | ||
| 1269 | 1265 | ||
| 1270 | fail: | 1266 | fail: |
| 1271 | for(i = 0; i < fb_helper->crtc_count; i++) { | 1267 | drm_atomic_clean_old_fb(dev, plane_mask, ret); |
| 1272 | struct drm_mode_set *mode_set; | ||
| 1273 | struct drm_plane *plane; | ||
| 1274 | |||
| 1275 | mode_set = &fb_helper->crtc_info[i].mode_set; | ||
| 1276 | plane = mode_set->crtc->primary; | ||
| 1277 | |||
| 1278 | if (ret == 0) { | ||
| 1279 | struct drm_framebuffer *new_fb = plane->state->fb; | ||
| 1280 | |||
| 1281 | if (new_fb) | ||
| 1282 | drm_framebuffer_reference(new_fb); | ||
| 1283 | plane->fb = new_fb; | ||
| 1284 | plane->crtc = plane->state->crtc; | ||
| 1285 | |||
| 1286 | if (plane->old_fb) | ||
| 1287 | drm_framebuffer_unreference(plane->old_fb); | ||
| 1288 | } | ||
| 1289 | plane->old_fb = NULL; | ||
| 1290 | } | ||
| 1291 | 1268 | ||
| 1292 | if (ret == -EDEADLK) | 1269 | if (ret == -EDEADLK) |
| 1293 | goto backoff; | 1270 | goto backoff; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8afda459a26e..95bb27de774f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -351,6 +351,8 @@ enum intel_dpll_id { | |||
| 351 | /* hsw/bdw */ | 351 | /* hsw/bdw */ |
| 352 | DPLL_ID_WRPLL1 = 0, | 352 | DPLL_ID_WRPLL1 = 0, |
| 353 | DPLL_ID_WRPLL2 = 1, | 353 | DPLL_ID_WRPLL2 = 1, |
| 354 | DPLL_ID_SPLL = 2, | ||
| 355 | |||
| 354 | /* skl */ | 356 | /* skl */ |
| 355 | DPLL_ID_SKL_DPLL1 = 0, | 357 | DPLL_ID_SKL_DPLL1 = 0, |
| 356 | DPLL_ID_SKL_DPLL2 = 1, | 358 | DPLL_ID_SKL_DPLL2 = 1, |
| @@ -367,6 +369,7 @@ struct intel_dpll_hw_state { | |||
| 367 | 369 | ||
| 368 | /* hsw, bdw */ | 370 | /* hsw, bdw */ |
| 369 | uint32_t wrpll; | 371 | uint32_t wrpll; |
| 372 | uint32_t spll; | ||
| 370 | 373 | ||
| 371 | /* skl */ | 374 | /* skl */ |
| 372 | /* | 375 | /* |
| @@ -2648,6 +2651,7 @@ struct i915_params { | |||
| 2648 | int enable_cmd_parser; | 2651 | int enable_cmd_parser; |
| 2649 | /* leave bools at the end to not create holes */ | 2652 | /* leave bools at the end to not create holes */ |
| 2650 | bool enable_hangcheck; | 2653 | bool enable_hangcheck; |
| 2654 | bool fastboot; | ||
| 2651 | bool prefault_disable; | 2655 | bool prefault_disable; |
| 2652 | bool load_detect_test; | 2656 | bool load_detect_test; |
| 2653 | bool reset; | 2657 | bool reset; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5cf4a1998273..91bb1fc27420 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3809,6 +3809,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3809 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | 3809 | int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
| 3810 | struct drm_file *file) | 3810 | struct drm_file *file) |
| 3811 | { | 3811 | { |
| 3812 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 3812 | struct drm_i915_gem_caching *args = data; | 3813 | struct drm_i915_gem_caching *args = data; |
| 3813 | struct drm_i915_gem_object *obj; | 3814 | struct drm_i915_gem_object *obj; |
| 3814 | enum i915_cache_level level; | 3815 | enum i915_cache_level level; |
| @@ -3837,9 +3838,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3837 | return -EINVAL; | 3838 | return -EINVAL; |
| 3838 | } | 3839 | } |
| 3839 | 3840 | ||
| 3841 | intel_runtime_pm_get(dev_priv); | ||
| 3842 | |||
| 3840 | ret = i915_mutex_lock_interruptible(dev); | 3843 | ret = i915_mutex_lock_interruptible(dev); |
| 3841 | if (ret) | 3844 | if (ret) |
| 3842 | return ret; | 3845 | goto rpm_put; |
| 3843 | 3846 | ||
| 3844 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); | 3847 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
| 3845 | if (&obj->base == NULL) { | 3848 | if (&obj->base == NULL) { |
| @@ -3852,6 +3855,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, | |||
| 3852 | drm_gem_object_unreference(&obj->base); | 3855 | drm_gem_object_unreference(&obj->base); |
| 3853 | unlock: | 3856 | unlock: |
| 3854 | mutex_unlock(&dev->struct_mutex); | 3857 | mutex_unlock(&dev->struct_mutex); |
| 3858 | rpm_put: | ||
| 3859 | intel_runtime_pm_put(dev_priv); | ||
| 3860 | |||
| 3855 | return ret; | 3861 | return ret; |
| 3856 | } | 3862 | } |
| 3857 | 3863 | ||
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 96bb23865eac..4be13a5eb932 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
| @@ -40,6 +40,7 @@ struct i915_params i915 __read_mostly = { | |||
| 40 | .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), | 40 | .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), |
| 41 | .disable_power_well = -1, | 41 | .disable_power_well = -1, |
| 42 | .enable_ips = 1, | 42 | .enable_ips = 1, |
| 43 | .fastboot = 0, | ||
| 43 | .prefault_disable = 0, | 44 | .prefault_disable = 0, |
| 44 | .load_detect_test = 0, | 45 | .load_detect_test = 0, |
| 45 | .reset = true, | 46 | .reset = true, |
| @@ -133,6 +134,10 @@ MODULE_PARM_DESC(disable_power_well, | |||
| 133 | module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); | 134 | module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); |
| 134 | MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); | 135 | MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); |
| 135 | 136 | ||
| 137 | module_param_named(fastboot, i915.fastboot, bool, 0600); | ||
| 138 | MODULE_PARM_DESC(fastboot, | ||
| 139 | "Try to skip unnecessary mode sets at boot time (default: false)"); | ||
| 140 | |||
| 136 | module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); | 141 | module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); |
| 137 | MODULE_PARM_DESC(prefault_disable, | 142 | MODULE_PARM_DESC(prefault_disable, |
| 138 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " | 143 | "Disable page prefaulting for pread/pwrite/reloc (default:false). " |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index b84aaa0bb48a..6a2c76e367a5 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -138,18 +138,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, | |||
| 138 | pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); | 138 | pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static void hsw_crt_pre_enable(struct intel_encoder *encoder) | ||
| 142 | { | ||
| 143 | struct drm_device *dev = encoder->base.dev; | ||
| 144 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 145 | |||
| 146 | WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n"); | ||
| 147 | I915_WRITE(SPLL_CTL, | ||
| 148 | SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC); | ||
| 149 | POSTING_READ(SPLL_CTL); | ||
| 150 | udelay(20); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* Note: The caller is required to filter out dpms modes not supported by the | 141 | /* Note: The caller is required to filter out dpms modes not supported by the |
| 154 | * platform. */ | 142 | * platform. */ |
| 155 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | 143 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
| @@ -216,19 +204,6 @@ static void pch_post_disable_crt(struct intel_encoder *encoder) | |||
| 216 | intel_disable_crt(encoder); | 204 | intel_disable_crt(encoder); |
| 217 | } | 205 | } |
| 218 | 206 | ||
| 219 | static void hsw_crt_post_disable(struct intel_encoder *encoder) | ||
| 220 | { | ||
| 221 | struct drm_device *dev = encoder->base.dev; | ||
| 222 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
| 223 | uint32_t val; | ||
| 224 | |||
| 225 | DRM_DEBUG_KMS("Disabling SPLL\n"); | ||
| 226 | val = I915_READ(SPLL_CTL); | ||
| 227 | WARN_ON(!(val & SPLL_PLL_ENABLE)); | ||
| 228 | I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); | ||
| 229 | POSTING_READ(SPLL_CTL); | ||
| 230 | } | ||
| 231 | |||
| 232 | static void intel_enable_crt(struct intel_encoder *encoder) | 207 | static void intel_enable_crt(struct intel_encoder *encoder) |
| 233 | { | 208 | { |
| 234 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | 209 | struct intel_crt *crt = intel_encoder_to_crt(encoder); |
| @@ -280,6 +255,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, | |||
| 280 | if (HAS_DDI(dev)) { | 255 | if (HAS_DDI(dev)) { |
| 281 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; | 256 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; |
| 282 | pipe_config->port_clock = 135000 * 2; | 257 | pipe_config->port_clock = 135000 * 2; |
| 258 | |||
| 259 | pipe_config->dpll_hw_state.wrpll = 0; | ||
| 260 | pipe_config->dpll_hw_state.spll = | ||
| 261 | SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; | ||
| 283 | } | 262 | } |
| 284 | 263 | ||
| 285 | return true; | 264 | return true; |
| @@ -860,8 +839,6 @@ void intel_crt_init(struct drm_device *dev) | |||
| 860 | if (HAS_DDI(dev)) { | 839 | if (HAS_DDI(dev)) { |
| 861 | crt->base.get_config = hsw_crt_get_config; | 840 | crt->base.get_config = hsw_crt_get_config; |
| 862 | crt->base.get_hw_state = intel_ddi_get_hw_state; | 841 | crt->base.get_hw_state = intel_ddi_get_hw_state; |
| 863 | crt->base.pre_enable = hsw_crt_pre_enable; | ||
| 864 | crt->base.post_disable = hsw_crt_post_disable; | ||
| 865 | } else { | 842 | } else { |
| 866 | crt->base.get_config = intel_crt_get_config; | 843 | crt->base.get_config = intel_crt_get_config; |
| 867 | crt->base.get_hw_state = intel_crt_get_hw_state; | 844 | crt->base.get_hw_state = intel_crt_get_hw_state; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index b25e99a432fb..a6752a61d99f 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1286,6 +1286,18 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc, | |||
| 1286 | } | 1286 | } |
| 1287 | 1287 | ||
| 1288 | crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); | 1288 | crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); |
| 1289 | } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) { | ||
| 1290 | struct drm_atomic_state *state = crtc_state->base.state; | ||
| 1291 | struct intel_shared_dpll_config *spll = | ||
| 1292 | &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL]; | ||
| 1293 | |||
| 1294 | if (spll->crtc_mask && | ||
| 1295 | WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll)) | ||
| 1296 | return false; | ||
| 1297 | |||
| 1298 | crtc_state->shared_dpll = DPLL_ID_SPLL; | ||
| 1299 | spll->hw_state.spll = crtc_state->dpll_hw_state.spll; | ||
| 1300 | spll->crtc_mask |= 1 << intel_crtc->pipe; | ||
| 1289 | } | 1301 | } |
| 1290 | 1302 | ||
| 1291 | return true; | 1303 | return true; |
| @@ -2437,7 +2449,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) | |||
| 2437 | } | 2449 | } |
| 2438 | } | 2450 | } |
| 2439 | 2451 | ||
| 2440 | static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, | 2452 | static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, |
| 2441 | struct intel_shared_dpll *pll) | 2453 | struct intel_shared_dpll *pll) |
| 2442 | { | 2454 | { |
| 2443 | I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); | 2455 | I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); |
| @@ -2445,9 +2457,17 @@ static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
| 2445 | udelay(20); | 2457 | udelay(20); |
| 2446 | } | 2458 | } |
| 2447 | 2459 | ||
| 2448 | static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, | 2460 | static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, |
| 2449 | struct intel_shared_dpll *pll) | 2461 | struct intel_shared_dpll *pll) |
| 2450 | { | 2462 | { |
| 2463 | I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); | ||
| 2464 | POSTING_READ(SPLL_CTL); | ||
| 2465 | udelay(20); | ||
| 2466 | } | ||
| 2467 | |||
| 2468 | static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, | ||
| 2469 | struct intel_shared_dpll *pll) | ||
| 2470 | { | ||
| 2451 | uint32_t val; | 2471 | uint32_t val; |
| 2452 | 2472 | ||
| 2453 | val = I915_READ(WRPLL_CTL(pll->id)); | 2473 | val = I915_READ(WRPLL_CTL(pll->id)); |
| @@ -2455,9 +2475,19 @@ static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv, | |||
| 2455 | POSTING_READ(WRPLL_CTL(pll->id)); | 2475 | POSTING_READ(WRPLL_CTL(pll->id)); |
| 2456 | } | 2476 | } |
| 2457 | 2477 | ||
| 2458 | static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | 2478 | static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, |
| 2459 | struct intel_shared_dpll *pll, | 2479 | struct intel_shared_dpll *pll) |
| 2460 | struct intel_dpll_hw_state *hw_state) | 2480 | { |
| 2481 | uint32_t val; | ||
| 2482 | |||
| 2483 | val = I915_READ(SPLL_CTL); | ||
| 2484 | I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); | ||
| 2485 | POSTING_READ(SPLL_CTL); | ||
| 2486 | } | ||
| 2487 | |||
| 2488 | static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, | ||
| 2489 | struct intel_shared_dpll *pll, | ||
| 2490 | struct intel_dpll_hw_state *hw_state) | ||
| 2461 | { | 2491 | { |
| 2462 | uint32_t val; | 2492 | uint32_t val; |
| 2463 | 2493 | ||
| @@ -2470,25 +2500,50 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, | |||
| 2470 | return val & WRPLL_PLL_ENABLE; | 2500 | return val & WRPLL_PLL_ENABLE; |
| 2471 | } | 2501 | } |
| 2472 | 2502 | ||
| 2503 | static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, | ||
| 2504 | struct intel_shared_dpll *pll, | ||
| 2505 | struct intel_dpll_hw_state *hw_state) | ||
| 2506 | { | ||
| 2507 | uint32_t val; | ||
| 2508 | |||
| 2509 | if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) | ||
| 2510 | return false; | ||
| 2511 | |||
| 2512 | val = I915_READ(SPLL_CTL); | ||
| 2513 | hw_state->spll = val; | ||
| 2514 | |||
| 2515 | return val & SPLL_PLL_ENABLE; | ||
| 2516 | } | ||
| 2517 | |||
| 2518 | |||
| 2473 | static const char * const hsw_ddi_pll_names[] = { | 2519 | static const char * const hsw_ddi_pll_names[] = { |
| 2474 | "WRPLL 1", | 2520 | "WRPLL 1", |
| 2475 | "WRPLL 2", | 2521 | "WRPLL 2", |
| 2522 | "SPLL" | ||
| 2476 | }; | 2523 | }; |
| 2477 | 2524 | ||
| 2478 | static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) | 2525 | static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) |
| 2479 | { | 2526 | { |
| 2480 | int i; | 2527 | int i; |
| 2481 | 2528 | ||
| 2482 | dev_priv->num_shared_dpll = 2; | 2529 | dev_priv->num_shared_dpll = 3; |
| 2483 | 2530 | ||
| 2484 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 2531 | for (i = 0; i < 2; i++) { |
| 2485 | dev_priv->shared_dplls[i].id = i; | 2532 | dev_priv->shared_dplls[i].id = i; |
| 2486 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; | 2533 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; |
| 2487 | dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable; | 2534 | dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable; |
| 2488 | dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable; | 2535 | dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable; |
| 2489 | dev_priv->shared_dplls[i].get_hw_state = | 2536 | dev_priv->shared_dplls[i].get_hw_state = |
| 2490 | hsw_ddi_pll_get_hw_state; | 2537 | hsw_ddi_wrpll_get_hw_state; |
| 2491 | } | 2538 | } |
| 2539 | |||
| 2540 | /* SPLL is special, but needs to be initialized anyway.. */ | ||
| 2541 | dev_priv->shared_dplls[i].id = i; | ||
| 2542 | dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; | ||
| 2543 | dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable; | ||
| 2544 | dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable; | ||
| 2545 | dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state; | ||
| 2546 | |||
| 2492 | } | 2547 | } |
| 2493 | 2548 | ||
| 2494 | static const char * const skl_ddi_pll_names[] = { | 2549 | static const char * const skl_ddi_pll_names[] = { |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f62ffc04c21d..71860f8680f9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2646,11 +2646,13 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2646 | return; | 2646 | return; |
| 2647 | 2647 | ||
| 2648 | valid_fb: | 2648 | valid_fb: |
| 2649 | plane_state->src_x = plane_state->src_y = 0; | 2649 | plane_state->src_x = 0; |
| 2650 | plane_state->src_y = 0; | ||
| 2650 | plane_state->src_w = fb->width << 16; | 2651 | plane_state->src_w = fb->width << 16; |
| 2651 | plane_state->src_h = fb->height << 16; | 2652 | plane_state->src_h = fb->height << 16; |
| 2652 | 2653 | ||
| 2653 | plane_state->crtc_x = plane_state->src_y = 0; | 2654 | plane_state->crtc_x = 0; |
| 2655 | plane_state->crtc_y = 0; | ||
| 2654 | plane_state->crtc_w = fb->width; | 2656 | plane_state->crtc_w = fb->width; |
| 2655 | plane_state->crtc_h = fb->height; | 2657 | plane_state->crtc_h = fb->height; |
| 2656 | 2658 | ||
| @@ -4237,6 +4239,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4237 | struct intel_shared_dpll *pll; | 4239 | struct intel_shared_dpll *pll; |
| 4238 | struct intel_shared_dpll_config *shared_dpll; | 4240 | struct intel_shared_dpll_config *shared_dpll; |
| 4239 | enum intel_dpll_id i; | 4241 | enum intel_dpll_id i; |
| 4242 | int max = dev_priv->num_shared_dpll; | ||
| 4240 | 4243 | ||
| 4241 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); | 4244 | shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); |
| 4242 | 4245 | ||
| @@ -4271,9 +4274,11 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, | |||
| 4271 | WARN_ON(shared_dpll[i].crtc_mask); | 4274 | WARN_ON(shared_dpll[i].crtc_mask); |
| 4272 | 4275 | ||
| 4273 | goto found; | 4276 | goto found; |
| 4274 | } | 4277 | } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) |
| 4278 | /* Do not consider SPLL */ | ||
| 4279 | max = 2; | ||
| 4275 | 4280 | ||
| 4276 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { | 4281 | for (i = 0; i < max; i++) { |
| 4277 | pll = &dev_priv->shared_dplls[i]; | 4282 | pll = &dev_priv->shared_dplls[i]; |
| 4278 | 4283 | ||
| 4279 | /* Only want to check enabled timings first */ | 4284 | /* Only want to check enabled timings first */ |
| @@ -9723,6 +9728,8 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, | |||
| 9723 | case PORT_CLK_SEL_WRPLL2: | 9728 | case PORT_CLK_SEL_WRPLL2: |
| 9724 | pipe_config->shared_dpll = DPLL_ID_WRPLL2; | 9729 | pipe_config->shared_dpll = DPLL_ID_WRPLL2; |
| 9725 | break; | 9730 | break; |
| 9731 | case PORT_CLK_SEL_SPLL: | ||
| 9732 | pipe_config->shared_dpll = DPLL_ID_SPLL; | ||
| 9726 | } | 9733 | } |
| 9727 | } | 9734 | } |
| 9728 | 9735 | ||
| @@ -12003,9 +12010,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, | |||
| 12003 | pipe_config->dpll_hw_state.cfgcr1, | 12010 | pipe_config->dpll_hw_state.cfgcr1, |
| 12004 | pipe_config->dpll_hw_state.cfgcr2); | 12011 | pipe_config->dpll_hw_state.cfgcr2); |
| 12005 | } else if (HAS_DDI(dev)) { | 12012 | } else if (HAS_DDI(dev)) { |
| 12006 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", | 12013 | DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", |
| 12007 | pipe_config->ddi_pll_sel, | 12014 | pipe_config->ddi_pll_sel, |
| 12008 | pipe_config->dpll_hw_state.wrpll); | 12015 | pipe_config->dpll_hw_state.wrpll, |
| 12016 | pipe_config->dpll_hw_state.spll); | ||
| 12009 | } else { | 12017 | } else { |
| 12010 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " | 12018 | DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " |
| 12011 | "fp0: 0x%x, fp1: 0x%x\n", | 12019 | "fp0: 0x%x, fp1: 0x%x\n", |
| @@ -12528,6 +12536,7 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
| 12528 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); | 12536 | PIPE_CONF_CHECK_X(dpll_hw_state.fp0); |
| 12529 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); | 12537 | PIPE_CONF_CHECK_X(dpll_hw_state.fp1); |
| 12530 | PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); | 12538 | PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); |
| 12539 | PIPE_CONF_CHECK_X(dpll_hw_state.spll); | ||
| 12531 | PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); | 12540 | PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); |
| 12532 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); | 12541 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); |
| 12533 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); | 12542 | PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); |
| @@ -13032,6 +13041,9 @@ static int intel_atomic_check(struct drm_device *dev, | |||
| 13032 | struct intel_crtc_state *pipe_config = | 13041 | struct intel_crtc_state *pipe_config = |
| 13033 | to_intel_crtc_state(crtc_state); | 13042 | to_intel_crtc_state(crtc_state); |
| 13034 | 13043 | ||
| 13044 | memset(&to_intel_crtc(crtc)->atomic, 0, | ||
| 13045 | sizeof(struct intel_crtc_atomic_commit)); | ||
| 13046 | |||
| 13035 | /* Catch I915_MODE_FLAG_INHERITED */ | 13047 | /* Catch I915_MODE_FLAG_INHERITED */ |
| 13036 | if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) | 13048 | if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) |
| 13037 | crtc_state->mode_changed = true; | 13049 | crtc_state->mode_changed = true; |
| @@ -13056,7 +13068,8 @@ static int intel_atomic_check(struct drm_device *dev, | |||
| 13056 | if (ret) | 13068 | if (ret) |
| 13057 | return ret; | 13069 | return ret; |
| 13058 | 13070 | ||
| 13059 | if (intel_pipe_config_compare(state->dev, | 13071 | if (i915.fastboot && |
| 13072 | intel_pipe_config_compare(state->dev, | ||
| 13060 | to_intel_crtc_state(crtc->state), | 13073 | to_intel_crtc_state(crtc->state), |
| 13061 | pipe_config, true)) { | 13074 | pipe_config, true)) { |
| 13062 | crtc_state->mode_changed = false; | 13075 | crtc_state->mode_changed = false; |
| @@ -14364,16 +14377,17 @@ static int intel_framebuffer_init(struct drm_device *dev, | |||
| 14364 | static struct drm_framebuffer * | 14377 | static struct drm_framebuffer * |
| 14365 | intel_user_framebuffer_create(struct drm_device *dev, | 14378 | intel_user_framebuffer_create(struct drm_device *dev, |
| 14366 | struct drm_file *filp, | 14379 | struct drm_file *filp, |
| 14367 | struct drm_mode_fb_cmd2 *mode_cmd) | 14380 | struct drm_mode_fb_cmd2 *user_mode_cmd) |
| 14368 | { | 14381 | { |
| 14369 | struct drm_i915_gem_object *obj; | 14382 | struct drm_i915_gem_object *obj; |
| 14383 | struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; | ||
| 14370 | 14384 | ||
| 14371 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, | 14385 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, |
| 14372 | mode_cmd->handles[0])); | 14386 | mode_cmd.handles[0])); |
| 14373 | if (&obj->base == NULL) | 14387 | if (&obj->base == NULL) |
| 14374 | return ERR_PTR(-ENOENT); | 14388 | return ERR_PTR(-ENOENT); |
| 14375 | 14389 | ||
| 14376 | return intel_framebuffer_create(dev, mode_cmd, obj); | 14390 | return intel_framebuffer_create(dev, &mode_cmd, obj); |
| 14377 | } | 14391 | } |
| 14378 | 14392 | ||
| 14379 | #ifndef CONFIG_DRM_FBDEV_EMULATION | 14393 | #ifndef CONFIG_DRM_FBDEV_EMULATION |
| @@ -14705,6 +14719,9 @@ static struct intel_quirk intel_quirks[] = { | |||
| 14705 | /* Apple Macbook 2,1 (Core 2 T7400) */ | 14719 | /* Apple Macbook 2,1 (Core 2 T7400) */ |
| 14706 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, | 14720 | { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, |
| 14707 | 14721 | ||
| 14722 | /* Apple Macbook 4,1 */ | ||
| 14723 | { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, | ||
| 14724 | |||
| 14708 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ | 14725 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ |
| 14709 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, | 14726 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, |
| 14710 | 14727 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d52a15df6917..071a76b9ac52 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4449,7 +4449,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) | |||
| 4449 | POSTING_READ(GEN6_RPNSWREQ); | 4449 | POSTING_READ(GEN6_RPNSWREQ); |
| 4450 | 4450 | ||
| 4451 | dev_priv->rps.cur_freq = val; | 4451 | dev_priv->rps.cur_freq = val; |
| 4452 | trace_intel_gpu_freq_change(val * 50); | 4452 | trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); |
| 4453 | } | 4453 | } |
| 4454 | 4454 | ||
| 4455 | static void valleyview_set_rps(struct drm_device *dev, u8 val) | 4455 | static void valleyview_set_rps(struct drm_device *dev, u8 val) |
| @@ -7255,7 +7255,8 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
| 7255 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | 7255 | int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) |
| 7256 | { | 7256 | { |
| 7257 | if (IS_GEN9(dev_priv->dev)) | 7257 | if (IS_GEN9(dev_priv->dev)) |
| 7258 | return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; | 7258 | return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, |
| 7259 | GEN9_FREQ_SCALER); | ||
| 7259 | else if (IS_CHERRYVIEW(dev_priv->dev)) | 7260 | else if (IS_CHERRYVIEW(dev_priv->dev)) |
| 7260 | return chv_gpu_freq(dev_priv, val); | 7261 | return chv_gpu_freq(dev_priv, val); |
| 7261 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 7262 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
| @@ -7267,13 +7268,14 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) | |||
| 7267 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) | 7268 | int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) |
| 7268 | { | 7269 | { |
| 7269 | if (IS_GEN9(dev_priv->dev)) | 7270 | if (IS_GEN9(dev_priv->dev)) |
| 7270 | return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; | 7271 | return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, |
| 7272 | GT_FREQUENCY_MULTIPLIER); | ||
| 7271 | else if (IS_CHERRYVIEW(dev_priv->dev)) | 7273 | else if (IS_CHERRYVIEW(dev_priv->dev)) |
| 7272 | return chv_freq_opcode(dev_priv, val); | 7274 | return chv_freq_opcode(dev_priv, val); |
| 7273 | else if (IS_VALLEYVIEW(dev_priv->dev)) | 7275 | else if (IS_VALLEYVIEW(dev_priv->dev)) |
| 7274 | return byt_freq_opcode(dev_priv, val); | 7276 | return byt_freq_opcode(dev_priv, val); |
| 7275 | else | 7277 | else |
| 7276 | return val / GT_FREQUENCY_MULTIPLIER; | 7278 | return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); |
| 7277 | } | 7279 | } |
| 7278 | 7280 | ||
| 7279 | struct request_boost { | 7281 | struct request_boost { |
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 4f2068fe5d88..a7bf6a90eae5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c | |||
| @@ -70,6 +70,11 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 70 | BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); | 70 | BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev); |
| 71 | BUG_ON(pixels_current == pixels_prev); | 71 | BUG_ON(pixels_current == pixels_prev); |
| 72 | 72 | ||
| 73 | if (!handle || !file_priv) { | ||
| 74 | mga_hide_cursor(mdev); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 73 | obj = drm_gem_object_lookup(dev, file_priv, handle); | 78 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
| 74 | if (!obj) | 79 | if (!obj) |
| 75 | return -ENOENT; | 80 | return -ENOENT; |
| @@ -88,12 +93,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 88 | goto out_unreserve1; | 93 | goto out_unreserve1; |
| 89 | } | 94 | } |
| 90 | 95 | ||
| 91 | if (!handle) { | ||
| 92 | mga_hide_cursor(mdev); | ||
| 93 | ret = 0; | ||
| 94 | goto out1; | ||
| 95 | } | ||
| 96 | |||
| 97 | /* Move cursor buffers into VRAM if they aren't already */ | 96 | /* Move cursor buffers into VRAM if they aren't already */ |
| 98 | if (!pixels_1->pin_count) { | 97 | if (!pixels_1->pin_count) { |
| 99 | ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, | 98 | ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM, |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d3024883b844..84d45633d28c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -221,11 +221,17 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 221 | if (!(rdev->flags & RADEON_IS_PCIE)) | 221 | if (!(rdev->flags & RADEON_IS_PCIE)) |
| 222 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | 222 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 223 | 223 | ||
| 224 | /* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx | ||
| 225 | * See https://bugs.freedesktop.org/show_bug.cgi?id=91268 | ||
| 226 | */ | ||
| 227 | if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635) | ||
| 228 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | ||
| 229 | |||
| 224 | #ifdef CONFIG_X86_32 | 230 | #ifdef CONFIG_X86_32 |
| 225 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit | 231 | /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit |
| 226 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 | 232 | * See https://bugs.freedesktop.org/show_bug.cgi?id=84627 |
| 227 | */ | 233 | */ |
| 228 | bo->flags &= ~RADEON_GEM_GTT_WC; | 234 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
| 229 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) | 235 | #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT) |
| 230 | /* Don't try to enable write-combining when it can't work, or things | 236 | /* Don't try to enable write-combining when it can't work, or things |
| 231 | * may be slow | 237 | * may be slow |
| @@ -235,9 +241,10 @@ int radeon_bo_create(struct radeon_device *rdev, | |||
| 235 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ | 241 | #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ |
| 236 | thanks to write-combining | 242 | thanks to write-combining |
| 237 | 243 | ||
| 238 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " | 244 | if (bo->flags & RADEON_GEM_GTT_WC) |
| 239 | "better performance thanks to write-combining\n"); | 245 | DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " |
| 240 | bo->flags &= ~RADEON_GEM_GTT_WC; | 246 | "better performance thanks to write-combining\n"); |
| 247 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | ||
| 241 | #endif | 248 | #endif |
| 242 | 249 | ||
| 243 | radeon_ttm_placement_from_domain(bo, domain); | 250 | radeon_ttm_placement_from_domain(bo, domain); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6d80dde23400..f4f03dcc1530 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
| @@ -1542,8 +1542,7 @@ int radeon_pm_late_init(struct radeon_device *rdev) | |||
| 1542 | ret = device_create_file(rdev->dev, &dev_attr_power_method); | 1542 | ret = device_create_file(rdev->dev, &dev_attr_power_method); |
| 1543 | if (ret) | 1543 | if (ret) |
| 1544 | DRM_ERROR("failed to create device file for power method\n"); | 1544 | DRM_ERROR("failed to create device file for power method\n"); |
| 1545 | if (!ret) | 1545 | rdev->pm.sysfs_initialized = true; |
| 1546 | rdev->pm.sysfs_initialized = true; | ||
| 1547 | } | 1546 | } |
| 1548 | 1547 | ||
| 1549 | mutex_lock(&rdev->pm.mutex); | 1548 | mutex_lock(&rdev->pm.mutex); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e72bf46042e0..a82b891ae1fe 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2927,7 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { | |||
| 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 2927 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
| 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | 2928 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, |
| 2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, | 2929 | { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, |
| 2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, | 2930 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 }, |
| 2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, | 2931 | { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, |
| 2932 | { 0, 0, 0, 0 }, | 2932 | { 0, 0, 0, 0 }, |
| 2933 | }; | 2933 | }; |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 7a9f4768591e..265064c62d49 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -168,7 +168,7 @@ static int vc4_get_clock_select(struct drm_crtc *crtc) | |||
| 168 | struct drm_connector *connector; | 168 | struct drm_connector *connector; |
| 169 | 169 | ||
| 170 | drm_for_each_connector(connector, crtc->dev) { | 170 | drm_for_each_connector(connector, crtc->dev) { |
| 171 | if (connector && connector->state->crtc == crtc) { | 171 | if (connector->state->crtc == crtc) { |
| 172 | struct drm_encoder *encoder = connector->encoder; | 172 | struct drm_encoder *encoder = connector->encoder; |
| 173 | struct vc4_encoder *vc4_encoder = | 173 | struct vc4_encoder *vc4_encoder = |
| 174 | to_vc4_encoder(encoder); | 174 | to_vc4_encoder(encoder); |
| @@ -401,7 +401,8 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 401 | dlist_next++; | 401 | dlist_next++; |
| 402 | 402 | ||
| 403 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | 403 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), |
| 404 | (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist); | 404 | (u32 __iomem *)vc4_crtc->dlist - |
| 405 | (u32 __iomem *)vc4->hvs->dlist); | ||
| 405 | 406 | ||
| 406 | /* Make the next display list start after ours. */ | 407 | /* Make the next display list start after ours. */ |
| 407 | vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); | 408 | vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist); |
| @@ -591,14 +592,14 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) | |||
| 591 | * that will take too much. | 592 | * that will take too much. |
| 592 | */ | 593 | */ |
| 593 | primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); | 594 | primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY); |
| 594 | if (!primary_plane) { | 595 | if (IS_ERR(primary_plane)) { |
| 595 | dev_err(dev, "failed to construct primary plane\n"); | 596 | dev_err(dev, "failed to construct primary plane\n"); |
| 596 | ret = PTR_ERR(primary_plane); | 597 | ret = PTR_ERR(primary_plane); |
| 597 | goto err; | 598 | goto err; |
| 598 | } | 599 | } |
| 599 | 600 | ||
| 600 | cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); | 601 | cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR); |
| 601 | if (!cursor_plane) { | 602 | if (IS_ERR(cursor_plane)) { |
| 602 | dev_err(dev, "failed to construct cursor plane\n"); | 603 | dev_err(dev, "failed to construct cursor plane\n"); |
| 603 | ret = PTR_ERR(cursor_plane); | 604 | ret = PTR_ERR(cursor_plane); |
| 604 | goto err_primary; | 605 | goto err_primary; |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 6e730605edcc..d5db9e0f3b73 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -259,7 +259,6 @@ static struct platform_driver vc4_platform_driver = { | |||
| 259 | .remove = vc4_platform_drm_remove, | 259 | .remove = vc4_platform_drm_remove, |
| 260 | .driver = { | 260 | .driver = { |
| 261 | .name = "vc4-drm", | 261 | .name = "vc4-drm", |
| 262 | .owner = THIS_MODULE, | ||
| 263 | .of_match_table = vc4_of_match, | 262 | .of_match_table = vc4_of_match, |
| 264 | }, | 263 | }, |
| 265 | }; | 264 | }; |
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index ab1673f672a4..8098c5b21ba4 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c | |||
| @@ -75,10 +75,10 @@ void vc4_hvs_dump_state(struct drm_device *dev) | |||
| 75 | for (i = 0; i < 64; i += 4) { | 75 | for (i = 0; i < 64; i += 4) { |
| 76 | DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", | 76 | DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n", |
| 77 | i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", | 77 | i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D", |
| 78 | ((uint32_t *)vc4->hvs->dlist)[i + 0], | 78 | readl((u32 __iomem *)vc4->hvs->dlist + i + 0), |
| 79 | ((uint32_t *)vc4->hvs->dlist)[i + 1], | 79 | readl((u32 __iomem *)vc4->hvs->dlist + i + 1), |
| 80 | ((uint32_t *)vc4->hvs->dlist)[i + 2], | 80 | readl((u32 __iomem *)vc4->hvs->dlist + i + 2), |
| 81 | ((uint32_t *)vc4->hvs->dlist)[i + 3]); | 81 | readl((u32 __iomem *)vc4->hvs->dlist + i + 3)); |
| 82 | } | 82 | } |
| 83 | } | 83 | } |
| 84 | 84 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index cdd8b10c0147..887f3caad0be 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -70,7 +70,7 @@ static bool plane_enabled(struct drm_plane_state *state) | |||
| 70 | return state->fb && state->crtc; | 70 | return state->fb && state->crtc; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) | 73 | static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) |
| 74 | { | 74 | { |
| 75 | struct vc4_plane_state *vc4_state; | 75 | struct vc4_plane_state *vc4_state; |
| 76 | 76 | ||
| @@ -97,8 +97,8 @@ struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane) | |||
| 97 | return &vc4_state->base; | 97 | return &vc4_state->base; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | void vc4_plane_destroy_state(struct drm_plane *plane, | 100 | static void vc4_plane_destroy_state(struct drm_plane *plane, |
| 101 | struct drm_plane_state *state) | 101 | struct drm_plane_state *state) |
| 102 | { | 102 | { |
| 103 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); | 103 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); |
| 104 | 104 | ||
| @@ -108,7 +108,7 @@ void vc4_plane_destroy_state(struct drm_plane *plane, | |||
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | /* Called during init to allocate the plane's atomic state. */ | 110 | /* Called during init to allocate the plane's atomic state. */ |
| 111 | void vc4_plane_reset(struct drm_plane *plane) | 111 | static void vc4_plane_reset(struct drm_plane *plane) |
| 112 | { | 112 | { |
| 113 | struct vc4_plane_state *vc4_state; | 113 | struct vc4_plane_state *vc4_state; |
| 114 | 114 | ||
| @@ -157,6 +157,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, | |||
| 157 | int crtc_w = state->crtc_w; | 157 | int crtc_w = state->crtc_w; |
| 158 | int crtc_h = state->crtc_h; | 158 | int crtc_h = state->crtc_h; |
| 159 | 159 | ||
| 160 | if (state->crtc_w << 16 != state->src_w || | ||
| 161 | state->crtc_h << 16 != state->src_h) { | ||
| 162 | /* We don't support scaling yet, which involves | ||
| 163 | * allocating the LBM memory for scaling temporary | ||
| 164 | * storage, and putting filter kernels in the HVS | ||
| 165 | * context. | ||
| 166 | */ | ||
| 167 | return -EINVAL; | ||
| 168 | } | ||
| 169 | |||
| 160 | if (crtc_x < 0) { | 170 | if (crtc_x < 0) { |
| 161 | offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; | 171 | offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x; |
| 162 | crtc_w += crtc_x; | 172 | crtc_w += crtc_x; |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index e24c2b680b47..7b0aa82ea38b 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -126,6 +126,7 @@ config I2C_I801 | |||
| 126 | Sunrise Point-LP (PCH) | 126 | Sunrise Point-LP (PCH) |
| 127 | DNV (SOC) | 127 | DNV (SOC) |
| 128 | Broxton (SOC) | 128 | Broxton (SOC) |
| 129 | Lewisburg (PCH) | ||
| 129 | 130 | ||
| 130 | This driver can also be built as a module. If so, the module | 131 | This driver can also be built as a module. If so, the module |
| 131 | will be called i2c-i801. | 132 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index c306751ceadb..f62d69799a9c 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -62,6 +62,8 @@ | |||
| 62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes | 62 | * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes |
| 63 | * DNV (SOC) 0x19df 32 hard yes yes yes | 63 | * DNV (SOC) 0x19df 32 hard yes yes yes |
| 64 | * Broxton (SOC) 0x5ad4 32 hard yes yes yes | 64 | * Broxton (SOC) 0x5ad4 32 hard yes yes yes |
| 65 | * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes | ||
| 66 | * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes | ||
| 65 | * | 67 | * |
| 66 | * Features supported by this driver: | 68 | * Features supported by this driver: |
| 67 | * Software PEC no | 69 | * Software PEC no |
| @@ -206,6 +208,8 @@ | |||
| 206 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 | 208 | #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 |
| 207 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df | 209 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df |
| 208 | #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 | 210 | #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 |
| 211 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 | ||
| 212 | #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 | ||
| 209 | 213 | ||
| 210 | struct i801_mux_config { | 214 | struct i801_mux_config { |
| 211 | char *gpio_chip; | 215 | char *gpio_chip; |
| @@ -869,6 +873,8 @@ static const struct pci_device_id i801_ids[] = { | |||
| 869 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, | 873 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, |
| 870 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, | 874 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, |
| 871 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, | 875 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, |
| 876 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, | ||
| 877 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, | ||
| 872 | { 0, } | 878 | { 0, } |
| 873 | }; | 879 | }; |
| 874 | 880 | ||
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 1e4d99da4164..9bb0b056b25f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -50,6 +50,7 @@ | |||
| 50 | #include <linux/of_device.h> | 50 | #include <linux/of_device.h> |
| 51 | #include <linux/of_dma.h> | 51 | #include <linux/of_dma.h> |
| 52 | #include <linux/of_gpio.h> | 52 | #include <linux/of_gpio.h> |
| 53 | #include <linux/pinctrl/consumer.h> | ||
| 53 | #include <linux/platform_data/i2c-imx.h> | 54 | #include <linux/platform_data/i2c-imx.h> |
| 54 | #include <linux/platform_device.h> | 55 | #include <linux/platform_device.h> |
| 55 | #include <linux/sched.h> | 56 | #include <linux/sched.h> |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index e23a7b068c60..0b20449e48cf 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
| @@ -662,8 +662,10 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c) | |||
| 662 | 662 | ||
| 663 | static void xiic_start_xfer(struct xiic_i2c *i2c) | 663 | static void xiic_start_xfer(struct xiic_i2c *i2c) |
| 664 | { | 664 | { |
| 665 | 665 | spin_lock(&i2c->lock); | |
| 666 | xiic_reinit(i2c); | ||
| 666 | __xiic_start_xfer(i2c); | 667 | __xiic_start_xfer(i2c); |
| 668 | spin_unlock(&i2c->lock); | ||
| 667 | } | 669 | } |
| 668 | 670 | ||
| 669 | static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) | 671 | static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) |
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 040af5cc8143..ba8eb087f224 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c | |||
| @@ -715,7 +715,7 @@ static int i2c_device_probe(struct device *dev) | |||
| 715 | if (wakeirq > 0 && wakeirq != client->irq) | 715 | if (wakeirq > 0 && wakeirq != client->irq) |
| 716 | status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); | 716 | status = dev_pm_set_dedicated_wake_irq(dev, wakeirq); |
| 717 | else if (client->irq > 0) | 717 | else if (client->irq > 0) |
| 718 | status = dev_pm_set_wake_irq(dev, wakeirq); | 718 | status = dev_pm_set_wake_irq(dev, client->irq); |
| 719 | else | 719 | else |
| 720 | status = 0; | 720 | status = 0; |
| 721 | 721 | ||
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index eea0c79111e7..4d960d3b93c0 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c | |||
| @@ -101,7 +101,7 @@ | |||
| 101 | #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ | 101 | #define AD7795_CH_AIN1M_AIN1M 8 /* AIN1(-) - AIN1(-) */ |
| 102 | 102 | ||
| 103 | /* ID Register Bit Designations (AD7793_REG_ID) */ | 103 | /* ID Register Bit Designations (AD7793_REG_ID) */ |
| 104 | #define AD7785_ID 0xB | 104 | #define AD7785_ID 0x3 |
| 105 | #define AD7792_ID 0xA | 105 | #define AD7792_ID 0xA |
| 106 | #define AD7793_ID 0xB | 106 | #define AD7793_ID 0xB |
| 107 | #define AD7794_ID 0xF | 107 | #define AD7794_ID 0xF |
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 599cde3d03a1..b10f629cc44b 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
| @@ -106,6 +106,13 @@ | |||
| 106 | 106 | ||
| 107 | #define DEFAULT_SAMPLE_TIME 1000 | 107 | #define DEFAULT_SAMPLE_TIME 1000 |
| 108 | 108 | ||
| 109 | /* V at 25°C of 696 mV */ | ||
| 110 | #define VF610_VTEMP25_3V0 950 | ||
| 111 | /* V at 25°C of 699 mV */ | ||
| 112 | #define VF610_VTEMP25_3V3 867 | ||
| 113 | /* Typical sensor slope coefficient at all temperatures */ | ||
| 114 | #define VF610_TEMP_SLOPE_COEFF 1840 | ||
| 115 | |||
| 109 | enum clk_sel { | 116 | enum clk_sel { |
| 110 | VF610_ADCIOC_BUSCLK_SET, | 117 | VF610_ADCIOC_BUSCLK_SET, |
| 111 | VF610_ADCIOC_ALTCLK_SET, | 118 | VF610_ADCIOC_ALTCLK_SET, |
| @@ -197,6 +204,8 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) | |||
| 197 | adc_feature->clk_div = 8; | 204 | adc_feature->clk_div = 8; |
| 198 | } | 205 | } |
| 199 | 206 | ||
| 207 | adck_rate = ipg_rate / adc_feature->clk_div; | ||
| 208 | |||
| 200 | /* | 209 | /* |
| 201 | * Determine the long sample time adder value to be used based | 210 | * Determine the long sample time adder value to be used based |
| 202 | * on the default minimum sample time provided. | 211 | * on the default minimum sample time provided. |
| @@ -221,7 +230,6 @@ static inline void vf610_adc_calculate_rates(struct vf610_adc *info) | |||
| 221 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode | 230 | * BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode |
| 222 | * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles | 231 | * LSTAdder(Long Sample Time): 3, 5, 7, 9, 13, 17, 21, 25 ADCK cycles |
| 223 | */ | 232 | */ |
| 224 | adck_rate = ipg_rate / info->adc_feature.clk_div; | ||
| 225 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) | 233 | for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++) |
| 226 | info->sample_freq_avail[i] = | 234 | info->sample_freq_avail[i] = |
| 227 | adck_rate / (6 + vf610_hw_avgs[i] * | 235 | adck_rate / (6 + vf610_hw_avgs[i] * |
| @@ -663,11 +671,13 @@ static int vf610_read_raw(struct iio_dev *indio_dev, | |||
| 663 | break; | 671 | break; |
| 664 | case IIO_TEMP: | 672 | case IIO_TEMP: |
| 665 | /* | 673 | /* |
| 666 | * Calculate in degree Celsius times 1000 | 674 | * Calculate in degree Celsius times 1000 |
| 667 | * Using sensor slope of 1.84 mV/°C and | 675 | * Using the typical sensor slope of 1.84 mV/°C |
| 668 | * V at 25°C of 696 mV | 676 | * and VREFH_ADC at 3.3V, V at 25°C of 699 mV |
| 669 | */ | 677 | */ |
| 670 | *val = 25000 - ((int)info->value - 864) * 1000000 / 1840; | 678 | *val = 25000 - ((int)info->value - VF610_VTEMP25_3V3) * |
| 679 | 1000000 / VF610_TEMP_SLOPE_COEFF; | ||
| 680 | |||
| 671 | break; | 681 | break; |
| 672 | default: | 682 | default: |
| 673 | mutex_unlock(&indio_dev->mlock); | 683 | mutex_unlock(&indio_dev->mlock); |
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c index 0370624a35db..02e636a1c49a 100644 --- a/drivers/iio/adc/xilinx-xadc-core.c +++ b/drivers/iio/adc/xilinx-xadc-core.c | |||
| @@ -841,6 +841,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, | |||
| 841 | case XADC_REG_VCCINT: | 841 | case XADC_REG_VCCINT: |
| 842 | case XADC_REG_VCCAUX: | 842 | case XADC_REG_VCCAUX: |
| 843 | case XADC_REG_VREFP: | 843 | case XADC_REG_VREFP: |
| 844 | case XADC_REG_VREFN: | ||
| 844 | case XADC_REG_VCCBRAM: | 845 | case XADC_REG_VCCBRAM: |
| 845 | case XADC_REG_VCCPINT: | 846 | case XADC_REG_VCCPINT: |
| 846 | case XADC_REG_VCCPAUX: | 847 | case XADC_REG_VCCPAUX: |
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c index 9e4d2c18b554..81ca0081a019 100644 --- a/drivers/iio/dac/ad5064.c +++ b/drivers/iio/dac/ad5064.c | |||
| @@ -113,12 +113,16 @@ enum ad5064_type { | |||
| 113 | ID_AD5065, | 113 | ID_AD5065, |
| 114 | ID_AD5628_1, | 114 | ID_AD5628_1, |
| 115 | ID_AD5628_2, | 115 | ID_AD5628_2, |
| 116 | ID_AD5629_1, | ||
| 117 | ID_AD5629_2, | ||
| 116 | ID_AD5648_1, | 118 | ID_AD5648_1, |
| 117 | ID_AD5648_2, | 119 | ID_AD5648_2, |
| 118 | ID_AD5666_1, | 120 | ID_AD5666_1, |
| 119 | ID_AD5666_2, | 121 | ID_AD5666_2, |
| 120 | ID_AD5668_1, | 122 | ID_AD5668_1, |
| 121 | ID_AD5668_2, | 123 | ID_AD5668_2, |
| 124 | ID_AD5669_1, | ||
| 125 | ID_AD5669_2, | ||
| 122 | }; | 126 | }; |
| 123 | 127 | ||
| 124 | static int ad5064_write(struct ad5064_state *st, unsigned int cmd, | 128 | static int ad5064_write(struct ad5064_state *st, unsigned int cmd, |
| @@ -291,7 +295,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { | |||
| 291 | { }, | 295 | { }, |
| 292 | }; | 296 | }; |
| 293 | 297 | ||
| 294 | #define AD5064_CHANNEL(chan, addr, bits) { \ | 298 | #define AD5064_CHANNEL(chan, addr, bits, _shift) { \ |
| 295 | .type = IIO_VOLTAGE, \ | 299 | .type = IIO_VOLTAGE, \ |
| 296 | .indexed = 1, \ | 300 | .indexed = 1, \ |
| 297 | .output = 1, \ | 301 | .output = 1, \ |
| @@ -303,36 +307,39 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = { | |||
| 303 | .sign = 'u', \ | 307 | .sign = 'u', \ |
| 304 | .realbits = (bits), \ | 308 | .realbits = (bits), \ |
| 305 | .storagebits = 16, \ | 309 | .storagebits = 16, \ |
| 306 | .shift = 20 - bits, \ | 310 | .shift = (_shift), \ |
| 307 | }, \ | 311 | }, \ |
| 308 | .ext_info = ad5064_ext_info, \ | 312 | .ext_info = ad5064_ext_info, \ |
| 309 | } | 313 | } |
| 310 | 314 | ||
| 311 | #define DECLARE_AD5064_CHANNELS(name, bits) \ | 315 | #define DECLARE_AD5064_CHANNELS(name, bits, shift) \ |
| 312 | const struct iio_chan_spec name[] = { \ | 316 | const struct iio_chan_spec name[] = { \ |
| 313 | AD5064_CHANNEL(0, 0, bits), \ | 317 | AD5064_CHANNEL(0, 0, bits, shift), \ |
| 314 | AD5064_CHANNEL(1, 1, bits), \ | 318 | AD5064_CHANNEL(1, 1, bits, shift), \ |
| 315 | AD5064_CHANNEL(2, 2, bits), \ | 319 | AD5064_CHANNEL(2, 2, bits, shift), \ |
| 316 | AD5064_CHANNEL(3, 3, bits), \ | 320 | AD5064_CHANNEL(3, 3, bits, shift), \ |
| 317 | AD5064_CHANNEL(4, 4, bits), \ | 321 | AD5064_CHANNEL(4, 4, bits, shift), \ |
| 318 | AD5064_CHANNEL(5, 5, bits), \ | 322 | AD5064_CHANNEL(5, 5, bits, shift), \ |
| 319 | AD5064_CHANNEL(6, 6, bits), \ | 323 | AD5064_CHANNEL(6, 6, bits, shift), \ |
| 320 | AD5064_CHANNEL(7, 7, bits), \ | 324 | AD5064_CHANNEL(7, 7, bits, shift), \ |
| 321 | } | 325 | } |
| 322 | 326 | ||
| 323 | #define DECLARE_AD5065_CHANNELS(name, bits) \ | 327 | #define DECLARE_AD5065_CHANNELS(name, bits, shift) \ |
| 324 | const struct iio_chan_spec name[] = { \ | 328 | const struct iio_chan_spec name[] = { \ |
| 325 | AD5064_CHANNEL(0, 0, bits), \ | 329 | AD5064_CHANNEL(0, 0, bits, shift), \ |
| 326 | AD5064_CHANNEL(1, 3, bits), \ | 330 | AD5064_CHANNEL(1, 3, bits, shift), \ |
| 327 | } | 331 | } |
| 328 | 332 | ||
| 329 | static DECLARE_AD5064_CHANNELS(ad5024_channels, 12); | 333 | static DECLARE_AD5064_CHANNELS(ad5024_channels, 12, 8); |
| 330 | static DECLARE_AD5064_CHANNELS(ad5044_channels, 14); | 334 | static DECLARE_AD5064_CHANNELS(ad5044_channels, 14, 6); |
| 331 | static DECLARE_AD5064_CHANNELS(ad5064_channels, 16); | 335 | static DECLARE_AD5064_CHANNELS(ad5064_channels, 16, 4); |
| 332 | 336 | ||
| 333 | static DECLARE_AD5065_CHANNELS(ad5025_channels, 12); | 337 | static DECLARE_AD5065_CHANNELS(ad5025_channels, 12, 8); |
| 334 | static DECLARE_AD5065_CHANNELS(ad5045_channels, 14); | 338 | static DECLARE_AD5065_CHANNELS(ad5045_channels, 14, 6); |
| 335 | static DECLARE_AD5065_CHANNELS(ad5065_channels, 16); | 339 | static DECLARE_AD5065_CHANNELS(ad5065_channels, 16, 4); |
| 340 | |||
| 341 | static DECLARE_AD5064_CHANNELS(ad5629_channels, 12, 4); | ||
| 342 | static DECLARE_AD5064_CHANNELS(ad5669_channels, 16, 0); | ||
| 336 | 343 | ||
| 337 | static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | 344 | static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { |
| 338 | [ID_AD5024] = { | 345 | [ID_AD5024] = { |
| @@ -382,6 +389,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | |||
| 382 | .channels = ad5024_channels, | 389 | .channels = ad5024_channels, |
| 383 | .num_channels = 8, | 390 | .num_channels = 8, |
| 384 | }, | 391 | }, |
| 392 | [ID_AD5629_1] = { | ||
| 393 | .shared_vref = true, | ||
| 394 | .internal_vref = 2500000, | ||
| 395 | .channels = ad5629_channels, | ||
| 396 | .num_channels = 8, | ||
| 397 | }, | ||
| 398 | [ID_AD5629_2] = { | ||
| 399 | .shared_vref = true, | ||
| 400 | .internal_vref = 5000000, | ||
| 401 | .channels = ad5629_channels, | ||
| 402 | .num_channels = 8, | ||
| 403 | }, | ||
| 385 | [ID_AD5648_1] = { | 404 | [ID_AD5648_1] = { |
| 386 | .shared_vref = true, | 405 | .shared_vref = true, |
| 387 | .internal_vref = 2500000, | 406 | .internal_vref = 2500000, |
| @@ -418,6 +437,18 @@ static const struct ad5064_chip_info ad5064_chip_info_tbl[] = { | |||
| 418 | .channels = ad5064_channels, | 437 | .channels = ad5064_channels, |
| 419 | .num_channels = 8, | 438 | .num_channels = 8, |
| 420 | }, | 439 | }, |
| 440 | [ID_AD5669_1] = { | ||
| 441 | .shared_vref = true, | ||
| 442 | .internal_vref = 2500000, | ||
| 443 | .channels = ad5669_channels, | ||
| 444 | .num_channels = 8, | ||
| 445 | }, | ||
| 446 | [ID_AD5669_2] = { | ||
| 447 | .shared_vref = true, | ||
| 448 | .internal_vref = 5000000, | ||
| 449 | .channels = ad5669_channels, | ||
| 450 | .num_channels = 8, | ||
| 451 | }, | ||
| 421 | }; | 452 | }; |
| 422 | 453 | ||
| 423 | static inline unsigned int ad5064_num_vref(struct ad5064_state *st) | 454 | static inline unsigned int ad5064_num_vref(struct ad5064_state *st) |
| @@ -597,10 +628,16 @@ static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd, | |||
| 597 | unsigned int addr, unsigned int val) | 628 | unsigned int addr, unsigned int val) |
| 598 | { | 629 | { |
| 599 | struct i2c_client *i2c = to_i2c_client(st->dev); | 630 | struct i2c_client *i2c = to_i2c_client(st->dev); |
| 631 | int ret; | ||
| 600 | 632 | ||
| 601 | st->data.i2c[0] = (cmd << 4) | addr; | 633 | st->data.i2c[0] = (cmd << 4) | addr; |
| 602 | put_unaligned_be16(val, &st->data.i2c[1]); | 634 | put_unaligned_be16(val, &st->data.i2c[1]); |
| 603 | return i2c_master_send(i2c, st->data.i2c, 3); | 635 | |
| 636 | ret = i2c_master_send(i2c, st->data.i2c, 3); | ||
| 637 | if (ret < 0) | ||
| 638 | return ret; | ||
| 639 | |||
| 640 | return 0; | ||
| 604 | } | 641 | } |
| 605 | 642 | ||
| 606 | static int ad5064_i2c_probe(struct i2c_client *i2c, | 643 | static int ad5064_i2c_probe(struct i2c_client *i2c, |
| @@ -616,12 +653,12 @@ static int ad5064_i2c_remove(struct i2c_client *i2c) | |||
| 616 | } | 653 | } |
| 617 | 654 | ||
| 618 | static const struct i2c_device_id ad5064_i2c_ids[] = { | 655 | static const struct i2c_device_id ad5064_i2c_ids[] = { |
| 619 | {"ad5629-1", ID_AD5628_1}, | 656 | {"ad5629-1", ID_AD5629_1}, |
| 620 | {"ad5629-2", ID_AD5628_2}, | 657 | {"ad5629-2", ID_AD5629_2}, |
| 621 | {"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */ | 658 | {"ad5629-3", ID_AD5629_2}, /* similar enough to ad5629-2 */ |
| 622 | {"ad5669-1", ID_AD5668_1}, | 659 | {"ad5669-1", ID_AD5669_1}, |
| 623 | {"ad5669-2", ID_AD5668_2}, | 660 | {"ad5669-2", ID_AD5669_2}, |
| 624 | {"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */ | 661 | {"ad5669-3", ID_AD5669_2}, /* similar enough to ad5669-2 */ |
| 625 | {} | 662 | {} |
| 626 | }; | 663 | }; |
| 627 | MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); | 664 | MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids); |
diff --git a/drivers/iio/humidity/si7020.c b/drivers/iio/humidity/si7020.c index 12128d1ca570..71991b5c0658 100644 --- a/drivers/iio/humidity/si7020.c +++ b/drivers/iio/humidity/si7020.c | |||
| @@ -50,10 +50,10 @@ static int si7020_read_raw(struct iio_dev *indio_dev, | |||
| 50 | 50 | ||
| 51 | switch (mask) { | 51 | switch (mask) { |
| 52 | case IIO_CHAN_INFO_RAW: | 52 | case IIO_CHAN_INFO_RAW: |
| 53 | ret = i2c_smbus_read_word_data(*client, | 53 | ret = i2c_smbus_read_word_swapped(*client, |
| 54 | chan->type == IIO_TEMP ? | 54 | chan->type == IIO_TEMP ? |
| 55 | SI7020CMD_TEMP_HOLD : | 55 | SI7020CMD_TEMP_HOLD : |
| 56 | SI7020CMD_RH_HOLD); | 56 | SI7020CMD_RH_HOLD); |
| 57 | if (ret < 0) | 57 | if (ret < 0) |
| 58 | return ret; | 58 | return ret; |
| 59 | *val = ret >> 2; | 59 | *val = ret >> 2; |
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 44a077f3a4a2..f174ce0ca361 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c | |||
| @@ -84,12 +84,15 @@ void __init gic_dist_config(void __iomem *base, int gic_irqs, | |||
| 84 | writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); | 84 | writel_relaxed(GICD_INT_DEF_PRI_X4, base + GIC_DIST_PRI + i); |
| 85 | 85 | ||
| 86 | /* | 86 | /* |
| 87 | * Disable all interrupts. Leave the PPI and SGIs alone | 87 | * Deactivate and disable all SPIs. Leave the PPI and SGIs |
| 88 | * as they are enabled by redistributor registers. | 88 | * alone as they are in the redistributor registers on GICv3. |
| 89 | */ | 89 | */ |
| 90 | for (i = 32; i < gic_irqs; i += 32) | 90 | for (i = 32; i < gic_irqs; i += 32) { |
| 91 | writel_relaxed(GICD_INT_EN_CLR_X32, | 91 | writel_relaxed(GICD_INT_EN_CLR_X32, |
| 92 | base + GIC_DIST_ENABLE_CLEAR + i / 8); | 92 | base + GIC_DIST_ACTIVE_CLEAR + i / 8); |
| 93 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 94 | base + GIC_DIST_ENABLE_CLEAR + i / 8); | ||
| 95 | } | ||
| 93 | 96 | ||
| 94 | if (sync_access) | 97 | if (sync_access) |
| 95 | sync_access(); | 98 | sync_access(); |
| @@ -102,7 +105,9 @@ void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) | |||
| 102 | /* | 105 | /* |
| 103 | * Deal with the banked PPI and SGI interrupts - disable all | 106 | * Deal with the banked PPI and SGI interrupts - disable all |
| 104 | * PPI interrupts, ensure all SGI interrupts are enabled. | 107 | * PPI interrupts, ensure all SGI interrupts are enabled. |
| 108 | * Make sure everything is deactivated. | ||
| 105 | */ | 109 | */ |
| 110 | writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); | ||
| 106 | writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); | 111 | writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); |
| 107 | writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); | 112 | writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); |
| 108 | 113 | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 515c823c1c95..abf2ffaed392 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
| @@ -73,9 +73,11 @@ struct gic_chip_data { | |||
| 73 | union gic_base cpu_base; | 73 | union gic_base cpu_base; |
| 74 | #ifdef CONFIG_CPU_PM | 74 | #ifdef CONFIG_CPU_PM |
| 75 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; | 75 | u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; |
| 76 | u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; | ||
| 76 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; | 77 | u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; |
| 77 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; | 78 | u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; |
| 78 | u32 __percpu *saved_ppi_enable; | 79 | u32 __percpu *saved_ppi_enable; |
| 80 | u32 __percpu *saved_ppi_active; | ||
| 79 | u32 __percpu *saved_ppi_conf; | 81 | u32 __percpu *saved_ppi_conf; |
| 80 | #endif | 82 | #endif |
| 81 | struct irq_domain *domain; | 83 | struct irq_domain *domain; |
| @@ -566,6 +568,10 @@ static void gic_dist_save(unsigned int gic_nr) | |||
| 566 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 568 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) |
| 567 | gic_data[gic_nr].saved_spi_enable[i] = | 569 | gic_data[gic_nr].saved_spi_enable[i] = |
| 568 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 570 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 571 | |||
| 572 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
| 573 | gic_data[gic_nr].saved_spi_active[i] = | ||
| 574 | readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 569 | } | 575 | } |
| 570 | 576 | ||
| 571 | /* | 577 | /* |
| @@ -604,9 +610,19 @@ static void gic_dist_restore(unsigned int gic_nr) | |||
| 604 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | 610 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], |
| 605 | dist_base + GIC_DIST_TARGET + i * 4); | 611 | dist_base + GIC_DIST_TARGET + i * 4); |
| 606 | 612 | ||
| 607 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | 613 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { |
| 614 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 615 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); | ||
| 608 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | 616 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], |
| 609 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | 617 | dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 618 | } | ||
| 619 | |||
| 620 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { | ||
| 621 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 622 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); | ||
| 623 | writel_relaxed(gic_data[gic_nr].saved_spi_active[i], | ||
| 624 | dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 625 | } | ||
| 610 | 626 | ||
| 611 | writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); | 627 | writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); |
| 612 | } | 628 | } |
| @@ -631,6 +647,10 @@ static void gic_cpu_save(unsigned int gic_nr) | |||
| 631 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 647 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) |
| 632 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | 648 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 633 | 649 | ||
| 650 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); | ||
| 651 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
| 652 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 653 | |||
| 634 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 654 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 635 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 655 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| 636 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | 656 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); |
| @@ -654,8 +674,18 @@ static void gic_cpu_restore(unsigned int gic_nr) | |||
| 654 | return; | 674 | return; |
| 655 | 675 | ||
| 656 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | 676 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); |
| 657 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | 677 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { |
| 678 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 679 | dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); | ||
| 658 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | 680 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); |
| 681 | } | ||
| 682 | |||
| 683 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); | ||
| 684 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { | ||
| 685 | writel_relaxed(GICD_INT_EN_CLR_X32, | ||
| 686 | dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); | ||
| 687 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); | ||
| 688 | } | ||
| 659 | 689 | ||
| 660 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | 690 | ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); |
| 661 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | 691 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) |
| @@ -710,6 +740,10 @@ static void __init gic_pm_init(struct gic_chip_data *gic) | |||
| 710 | sizeof(u32)); | 740 | sizeof(u32)); |
| 711 | BUG_ON(!gic->saved_ppi_enable); | 741 | BUG_ON(!gic->saved_ppi_enable); |
| 712 | 742 | ||
| 743 | gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | ||
| 744 | sizeof(u32)); | ||
| 745 | BUG_ON(!gic->saved_ppi_active); | ||
| 746 | |||
| 713 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | 747 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, |
| 714 | sizeof(u32)); | 748 | sizeof(u32)); |
| 715 | BUG_ON(!gic->saved_ppi_conf); | 749 | BUG_ON(!gic->saved_ppi_conf); |
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index f659e605a406..5178645ac42b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
| @@ -160,11 +160,6 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
| 160 | } | 160 | } |
| 161 | EXPORT_SYMBOL(nvm_erase_blk); | 161 | EXPORT_SYMBOL(nvm_erase_blk); |
| 162 | 162 | ||
| 163 | static void nvm_core_free(struct nvm_dev *dev) | ||
| 164 | { | ||
| 165 | kfree(dev); | ||
| 166 | } | ||
| 167 | |||
| 168 | static int nvm_core_init(struct nvm_dev *dev) | 163 | static int nvm_core_init(struct nvm_dev *dev) |
| 169 | { | 164 | { |
| 170 | struct nvm_id *id = &dev->identity; | 165 | struct nvm_id *id = &dev->identity; |
| @@ -179,12 +174,21 @@ static int nvm_core_init(struct nvm_dev *dev) | |||
| 179 | dev->sec_size = grp->csecs; | 174 | dev->sec_size = grp->csecs; |
| 180 | dev->oob_size = grp->sos; | 175 | dev->oob_size = grp->sos; |
| 181 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; | 176 | dev->sec_per_pg = grp->fpg_sz / grp->csecs; |
| 182 | dev->addr_mode = id->ppat; | 177 | memcpy(&dev->ppaf, &id->ppaf, sizeof(struct nvm_addr_format)); |
| 183 | dev->addr_format = id->ppaf; | ||
| 184 | 178 | ||
| 185 | dev->plane_mode = NVM_PLANE_SINGLE; | 179 | dev->plane_mode = NVM_PLANE_SINGLE; |
| 186 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; | 180 | dev->max_rq_size = dev->ops->max_phys_sect * dev->sec_size; |
| 187 | 181 | ||
| 182 | if (grp->mtype != 0) { | ||
| 183 | pr_err("nvm: memory type not supported\n"); | ||
| 184 | return -EINVAL; | ||
| 185 | } | ||
| 186 | |||
| 187 | if (grp->fmtype != 0 && grp->fmtype != 1) { | ||
| 188 | pr_err("nvm: flash type not supported\n"); | ||
| 189 | return -EINVAL; | ||
| 190 | } | ||
| 191 | |||
| 188 | if (grp->mpos & 0x020202) | 192 | if (grp->mpos & 0x020202) |
| 189 | dev->plane_mode = NVM_PLANE_DOUBLE; | 193 | dev->plane_mode = NVM_PLANE_DOUBLE; |
| 190 | if (grp->mpos & 0x040404) | 194 | if (grp->mpos & 0x040404) |
| @@ -213,21 +217,18 @@ static void nvm_free(struct nvm_dev *dev) | |||
| 213 | 217 | ||
| 214 | if (dev->mt) | 218 | if (dev->mt) |
| 215 | dev->mt->unregister_mgr(dev); | 219 | dev->mt->unregister_mgr(dev); |
| 216 | |||
| 217 | nvm_core_free(dev); | ||
| 218 | } | 220 | } |
| 219 | 221 | ||
| 220 | static int nvm_init(struct nvm_dev *dev) | 222 | static int nvm_init(struct nvm_dev *dev) |
| 221 | { | 223 | { |
| 222 | struct nvmm_type *mt; | 224 | struct nvmm_type *mt; |
| 223 | int ret = 0; | 225 | int ret = -EINVAL; |
| 224 | 226 | ||
| 225 | if (!dev->q || !dev->ops) | 227 | if (!dev->q || !dev->ops) |
| 226 | return -EINVAL; | 228 | return ret; |
| 227 | 229 | ||
| 228 | if (dev->ops->identity(dev->q, &dev->identity)) { | 230 | if (dev->ops->identity(dev->q, &dev->identity)) { |
| 229 | pr_err("nvm: device could not be identified\n"); | 231 | pr_err("nvm: device could not be identified\n"); |
| 230 | ret = -EINVAL; | ||
| 231 | goto err; | 232 | goto err; |
| 232 | } | 233 | } |
| 233 | 234 | ||
| @@ -273,7 +274,6 @@ static int nvm_init(struct nvm_dev *dev) | |||
| 273 | dev->nr_chnls); | 274 | dev->nr_chnls); |
| 274 | return 0; | 275 | return 0; |
| 275 | err: | 276 | err: |
| 276 | nvm_free(dev); | ||
| 277 | pr_err("nvm: failed to initialize nvm\n"); | 277 | pr_err("nvm: failed to initialize nvm\n"); |
| 278 | return ret; | 278 | return ret; |
| 279 | } | 279 | } |
| @@ -308,22 +308,24 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
| 308 | if (ret) | 308 | if (ret) |
| 309 | goto err_init; | 309 | goto err_init; |
| 310 | 310 | ||
| 311 | down_write(&nvm_lock); | ||
| 312 | list_add(&dev->devices, &nvm_devices); | ||
| 313 | up_write(&nvm_lock); | ||
| 314 | |||
| 315 | if (dev->ops->max_phys_sect > 1) { | 311 | if (dev->ops->max_phys_sect > 1) { |
| 316 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, | 312 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, |
| 317 | "ppalist"); | 313 | "ppalist"); |
| 318 | if (!dev->ppalist_pool) { | 314 | if (!dev->ppalist_pool) { |
| 319 | pr_err("nvm: could not create ppa pool\n"); | 315 | pr_err("nvm: could not create ppa pool\n"); |
| 320 | return -ENOMEM; | 316 | ret = -ENOMEM; |
| 317 | goto err_init; | ||
| 321 | } | 318 | } |
| 322 | } else if (dev->ops->max_phys_sect > 256) { | 319 | } else if (dev->ops->max_phys_sect > 256) { |
| 323 | pr_info("nvm: max sectors supported is 256.\n"); | 320 | pr_info("nvm: max sectors supported is 256.\n"); |
| 324 | return -EINVAL; | 321 | ret = -EINVAL; |
| 322 | goto err_init; | ||
| 325 | } | 323 | } |
| 326 | 324 | ||
| 325 | down_write(&nvm_lock); | ||
| 326 | list_add(&dev->devices, &nvm_devices); | ||
| 327 | up_write(&nvm_lock); | ||
| 328 | |||
| 327 | return 0; | 329 | return 0; |
| 328 | err_init: | 330 | err_init: |
| 329 | kfree(dev); | 331 | kfree(dev); |
| @@ -341,11 +343,12 @@ void nvm_unregister(char *disk_name) | |||
| 341 | return; | 343 | return; |
| 342 | } | 344 | } |
| 343 | 345 | ||
| 344 | nvm_exit(dev); | ||
| 345 | |||
| 346 | down_write(&nvm_lock); | 346 | down_write(&nvm_lock); |
| 347 | list_del(&dev->devices); | 347 | list_del(&dev->devices); |
| 348 | up_write(&nvm_lock); | 348 | up_write(&nvm_lock); |
| 349 | |||
| 350 | nvm_exit(dev); | ||
| 351 | kfree(dev); | ||
| 349 | } | 352 | } |
| 350 | EXPORT_SYMBOL(nvm_unregister); | 353 | EXPORT_SYMBOL(nvm_unregister); |
| 351 | 354 | ||
| @@ -457,11 +460,11 @@ static void nvm_remove_target(struct nvm_target *t) | |||
| 457 | lockdep_assert_held(&nvm_lock); | 460 | lockdep_assert_held(&nvm_lock); |
| 458 | 461 | ||
| 459 | del_gendisk(tdisk); | 462 | del_gendisk(tdisk); |
| 463 | blk_cleanup_queue(q); | ||
| 464 | |||
| 460 | if (tt->exit) | 465 | if (tt->exit) |
| 461 | tt->exit(tdisk->private_data); | 466 | tt->exit(tdisk->private_data); |
| 462 | 467 | ||
| 463 | blk_cleanup_queue(q); | ||
| 464 | |||
| 465 | put_disk(tdisk); | 468 | put_disk(tdisk); |
| 466 | 469 | ||
| 467 | list_del(&t->list); | 470 | list_del(&t->list); |
| @@ -541,7 +544,7 @@ static int nvm_configure_show(const char *val) | |||
| 541 | if (!dev->mt) | 544 | if (!dev->mt) |
| 542 | return 0; | 545 | return 0; |
| 543 | 546 | ||
| 544 | dev->mt->free_blocks_print(dev); | 547 | dev->mt->lun_info_print(dev); |
| 545 | 548 | ||
| 546 | return 0; | 549 | return 0; |
| 547 | } | 550 | } |
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index ae1fb2bdc5f4..e20e74ec6b91 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c | |||
| @@ -60,23 +60,28 @@ static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
| 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; | 60 | lun->vlun.lun_id = i % dev->luns_per_chnl; |
| 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; | 61 | lun->vlun.chnl_id = i / dev->luns_per_chnl; |
| 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; | 62 | lun->vlun.nr_free_blocks = dev->blks_per_lun; |
| 63 | lun->vlun.nr_inuse_blocks = 0; | ||
| 64 | lun->vlun.nr_bad_blocks = 0; | ||
| 63 | } | 65 | } |
| 64 | return 0; | 66 | return 0; |
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | 69 | static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, |
| 68 | void *private) | 70 | void *private) |
| 69 | { | 71 | { |
| 70 | struct gen_nvm *gn = private; | 72 | struct gen_nvm *gn = private; |
| 71 | struct gen_lun *lun = &gn->luns[lun_id]; | 73 | struct nvm_dev *dev = gn->dev; |
| 74 | struct gen_lun *lun; | ||
| 72 | struct nvm_block *blk; | 75 | struct nvm_block *blk; |
| 73 | int i; | 76 | int i; |
| 74 | 77 | ||
| 75 | if (unlikely(bitmap_empty(bb_bitmap, nr_blocks))) | 78 | ppa = dev_to_generic_addr(gn->dev, ppa); |
| 76 | return 0; | 79 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; |
| 80 | |||
| 81 | for (i = 0; i < nr_blocks; i++) { | ||
| 82 | if (blks[i] == 0) | ||
| 83 | continue; | ||
| 77 | 84 | ||
| 78 | i = -1; | ||
| 79 | while ((i = find_next_bit(bb_bitmap, nr_blocks, i + 1)) < nr_blocks) { | ||
| 80 | blk = &lun->vlun.blocks[i]; | 85 | blk = &lun->vlun.blocks[i]; |
| 81 | if (!blk) { | 86 | if (!blk) { |
| 82 | pr_err("gennvm: BB data is out of bounds.\n"); | 87 | pr_err("gennvm: BB data is out of bounds.\n"); |
| @@ -84,6 +89,7 @@ static int gennvm_block_bb(u32 lun_id, void *bb_bitmap, unsigned int nr_blocks, | |||
| 84 | } | 89 | } |
| 85 | 90 | ||
| 86 | list_move_tail(&blk->list, &lun->bb_list); | 91 | list_move_tail(&blk->list, &lun->bb_list); |
| 92 | lun->vlun.nr_bad_blocks++; | ||
| 87 | } | 93 | } |
| 88 | 94 | ||
| 89 | return 0; | 95 | return 0; |
| @@ -136,6 +142,7 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private) | |||
| 136 | list_move_tail(&blk->list, &lun->used_list); | 142 | list_move_tail(&blk->list, &lun->used_list); |
| 137 | blk->type = 1; | 143 | blk->type = 1; |
| 138 | lun->vlun.nr_free_blocks--; | 144 | lun->vlun.nr_free_blocks--; |
| 145 | lun->vlun.nr_inuse_blocks++; | ||
| 139 | } | 146 | } |
| 140 | } | 147 | } |
| 141 | 148 | ||
| @@ -164,15 +171,25 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
| 164 | block->id = cur_block_id++; | 171 | block->id = cur_block_id++; |
| 165 | 172 | ||
| 166 | /* First block is reserved for device */ | 173 | /* First block is reserved for device */ |
| 167 | if (unlikely(lun_iter == 0 && blk_iter == 0)) | 174 | if (unlikely(lun_iter == 0 && blk_iter == 0)) { |
| 175 | lun->vlun.nr_free_blocks--; | ||
| 168 | continue; | 176 | continue; |
| 177 | } | ||
| 169 | 178 | ||
| 170 | list_add_tail(&block->list, &lun->free_list); | 179 | list_add_tail(&block->list, &lun->free_list); |
| 171 | } | 180 | } |
| 172 | 181 | ||
| 173 | if (dev->ops->get_bb_tbl) { | 182 | if (dev->ops->get_bb_tbl) { |
| 174 | ret = dev->ops->get_bb_tbl(dev->q, lun->vlun.id, | 183 | struct ppa_addr ppa; |
| 175 | dev->blks_per_lun, gennvm_block_bb, gn); | 184 | |
| 185 | ppa.ppa = 0; | ||
| 186 | ppa.g.ch = lun->vlun.chnl_id; | ||
| 187 | ppa.g.lun = lun->vlun.id; | ||
| 188 | ppa = generic_to_dev_addr(dev, ppa); | ||
| 189 | |||
| 190 | ret = dev->ops->get_bb_tbl(dev->q, ppa, | ||
| 191 | dev->blks_per_lun, | ||
| 192 | gennvm_block_bb, gn); | ||
| 176 | if (ret) | 193 | if (ret) |
| 177 | pr_err("gennvm: could not read BB table\n"); | 194 | pr_err("gennvm: could not read BB table\n"); |
| 178 | } | 195 | } |
| @@ -199,6 +216,7 @@ static int gennvm_register(struct nvm_dev *dev) | |||
| 199 | if (!gn) | 216 | if (!gn) |
| 200 | return -ENOMEM; | 217 | return -ENOMEM; |
| 201 | 218 | ||
| 219 | gn->dev = dev; | ||
| 202 | gn->nr_luns = dev->nr_luns; | 220 | gn->nr_luns = dev->nr_luns; |
| 203 | dev->mp = gn; | 221 | dev->mp = gn; |
| 204 | 222 | ||
| @@ -254,6 +272,7 @@ static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, | |||
| 254 | blk->type = 1; | 272 | blk->type = 1; |
| 255 | 273 | ||
| 256 | lun->vlun.nr_free_blocks--; | 274 | lun->vlun.nr_free_blocks--; |
| 275 | lun->vlun.nr_inuse_blocks++; | ||
| 257 | 276 | ||
| 258 | spin_unlock(&vlun->lock); | 277 | spin_unlock(&vlun->lock); |
| 259 | out: | 278 | out: |
| @@ -271,16 +290,21 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) | |||
| 271 | case 1: | 290 | case 1: |
| 272 | list_move_tail(&blk->list, &lun->free_list); | 291 | list_move_tail(&blk->list, &lun->free_list); |
| 273 | lun->vlun.nr_free_blocks++; | 292 | lun->vlun.nr_free_blocks++; |
| 293 | lun->vlun.nr_inuse_blocks--; | ||
| 274 | blk->type = 0; | 294 | blk->type = 0; |
| 275 | break; | 295 | break; |
| 276 | case 2: | 296 | case 2: |
| 277 | list_move_tail(&blk->list, &lun->bb_list); | 297 | list_move_tail(&blk->list, &lun->bb_list); |
| 298 | lun->vlun.nr_bad_blocks++; | ||
| 299 | lun->vlun.nr_inuse_blocks--; | ||
| 278 | break; | 300 | break; |
| 279 | default: | 301 | default: |
| 280 | WARN_ON_ONCE(1); | 302 | WARN_ON_ONCE(1); |
| 281 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", | 303 | pr_err("gennvm: erroneous block type (%lu -> %u)\n", |
| 282 | blk->id, blk->type); | 304 | blk->id, blk->type); |
| 283 | list_move_tail(&blk->list, &lun->bb_list); | 305 | list_move_tail(&blk->list, &lun->bb_list); |
| 306 | lun->vlun.nr_bad_blocks++; | ||
| 307 | lun->vlun.nr_inuse_blocks--; | ||
| 284 | } | 308 | } |
| 285 | 309 | ||
| 286 | spin_unlock(&vlun->lock); | 310 | spin_unlock(&vlun->lock); |
| @@ -292,10 +316,10 @@ static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 292 | 316 | ||
| 293 | if (rqd->nr_pages > 1) { | 317 | if (rqd->nr_pages > 1) { |
| 294 | for (i = 0; i < rqd->nr_pages; i++) | 318 | for (i = 0; i < rqd->nr_pages; i++) |
| 295 | rqd->ppa_list[i] = addr_to_generic_mode(dev, | 319 | rqd->ppa_list[i] = dev_to_generic_addr(dev, |
| 296 | rqd->ppa_list[i]); | 320 | rqd->ppa_list[i]); |
| 297 | } else { | 321 | } else { |
| 298 | rqd->ppa_addr = addr_to_generic_mode(dev, rqd->ppa_addr); | 322 | rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); |
| 299 | } | 323 | } |
| 300 | } | 324 | } |
| 301 | 325 | ||
| @@ -305,10 +329,10 @@ static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 305 | 329 | ||
| 306 | if (rqd->nr_pages > 1) { | 330 | if (rqd->nr_pages > 1) { |
| 307 | for (i = 0; i < rqd->nr_pages; i++) | 331 | for (i = 0; i < rqd->nr_pages; i++) |
| 308 | rqd->ppa_list[i] = generic_to_addr_mode(dev, | 332 | rqd->ppa_list[i] = generic_to_dev_addr(dev, |
| 309 | rqd->ppa_list[i]); | 333 | rqd->ppa_list[i]); |
| 310 | } else { | 334 | } else { |
| 311 | rqd->ppa_addr = generic_to_addr_mode(dev, rqd->ppa_addr); | 335 | rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); |
| 312 | } | 336 | } |
| 313 | } | 337 | } |
| 314 | 338 | ||
| @@ -354,10 +378,10 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) | |||
| 354 | { | 378 | { |
| 355 | int i; | 379 | int i; |
| 356 | 380 | ||
| 357 | if (!dev->ops->set_bb) | 381 | if (!dev->ops->set_bb_tbl) |
| 358 | return; | 382 | return; |
| 359 | 383 | ||
| 360 | if (dev->ops->set_bb(dev->q, rqd, 1)) | 384 | if (dev->ops->set_bb_tbl(dev->q, rqd, 1)) |
| 361 | return; | 385 | return; |
| 362 | 386 | ||
| 363 | gennvm_addr_to_generic_mode(dev, rqd); | 387 | gennvm_addr_to_generic_mode(dev, rqd); |
| @@ -440,15 +464,24 @@ static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) | |||
| 440 | return &gn->luns[lunid].vlun; | 464 | return &gn->luns[lunid].vlun; |
| 441 | } | 465 | } |
| 442 | 466 | ||
| 443 | static void gennvm_free_blocks_print(struct nvm_dev *dev) | 467 | static void gennvm_lun_info_print(struct nvm_dev *dev) |
| 444 | { | 468 | { |
| 445 | struct gen_nvm *gn = dev->mp; | 469 | struct gen_nvm *gn = dev->mp; |
| 446 | struct gen_lun *lun; | 470 | struct gen_lun *lun; |
| 447 | unsigned int i; | 471 | unsigned int i; |
| 448 | 472 | ||
| 449 | gennvm_for_each_lun(gn, lun, i) | 473 | |
| 450 | pr_info("%s: lun%8u\t%u\n", | 474 | gennvm_for_each_lun(gn, lun, i) { |
| 451 | dev->name, i, lun->vlun.nr_free_blocks); | 475 | spin_lock(&lun->vlun.lock); |
| 476 | |||
| 477 | pr_info("%s: lun%8u\t%u\t%u\t%u\n", | ||
| 478 | dev->name, i, | ||
| 479 | lun->vlun.nr_free_blocks, | ||
| 480 | lun->vlun.nr_inuse_blocks, | ||
| 481 | lun->vlun.nr_bad_blocks); | ||
| 482 | |||
| 483 | spin_unlock(&lun->vlun.lock); | ||
| 484 | } | ||
| 452 | } | 485 | } |
| 453 | 486 | ||
| 454 | static struct nvmm_type gennvm = { | 487 | static struct nvmm_type gennvm = { |
| @@ -466,7 +499,7 @@ static struct nvmm_type gennvm = { | |||
| 466 | .erase_blk = gennvm_erase_blk, | 499 | .erase_blk = gennvm_erase_blk, |
| 467 | 500 | ||
| 468 | .get_lun = gennvm_get_lun, | 501 | .get_lun = gennvm_get_lun, |
| 469 | .free_blocks_print = gennvm_free_blocks_print, | 502 | .lun_info_print = gennvm_lun_info_print, |
| 470 | }; | 503 | }; |
| 471 | 504 | ||
| 472 | static int __init gennvm_module_init(void) | 505 | static int __init gennvm_module_init(void) |
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h index d23bd3501ddc..9c24b5b32dac 100644 --- a/drivers/lightnvm/gennvm.h +++ b/drivers/lightnvm/gennvm.h | |||
| @@ -35,6 +35,8 @@ struct gen_lun { | |||
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | struct gen_nvm { | 37 | struct gen_nvm { |
| 38 | struct nvm_dev *dev; | ||
| 39 | |||
| 38 | int nr_luns; | 40 | int nr_luns; |
| 39 | struct gen_lun *luns; | 41 | struct gen_lun *luns; |
| 40 | }; | 42 | }; |
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c index 7ba64c87ba1c..75e59c3a3f96 100644 --- a/drivers/lightnvm/rrpc.c +++ b/drivers/lightnvm/rrpc.c | |||
| @@ -123,12 +123,42 @@ static u64 block_to_addr(struct rrpc *rrpc, struct rrpc_block *rblk) | |||
| 123 | return blk->id * rrpc->dev->pgs_per_blk; | 123 | return blk->id * rrpc->dev->pgs_per_blk; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | static struct ppa_addr linear_to_generic_addr(struct nvm_dev *dev, | ||
| 127 | struct ppa_addr r) | ||
| 128 | { | ||
| 129 | struct ppa_addr l; | ||
| 130 | int secs, pgs, blks, luns; | ||
| 131 | sector_t ppa = r.ppa; | ||
| 132 | |||
| 133 | l.ppa = 0; | ||
| 134 | |||
| 135 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | ||
| 136 | l.g.sec = secs; | ||
| 137 | |||
| 138 | sector_div(ppa, dev->sec_per_pg); | ||
| 139 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
| 140 | l.g.pg = pgs; | ||
| 141 | |||
| 142 | sector_div(ppa, dev->pgs_per_blk); | ||
| 143 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
| 144 | l.g.blk = blks; | ||
| 145 | |||
| 146 | sector_div(ppa, dev->blks_per_lun); | ||
| 147 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
| 148 | l.g.lun = luns; | ||
| 149 | |||
| 150 | sector_div(ppa, dev->luns_per_chnl); | ||
| 151 | l.g.ch = ppa; | ||
| 152 | |||
| 153 | return l; | ||
| 154 | } | ||
| 155 | |||
| 126 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) | 156 | static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) |
| 127 | { | 157 | { |
| 128 | struct ppa_addr paddr; | 158 | struct ppa_addr paddr; |
| 129 | 159 | ||
| 130 | paddr.ppa = addr; | 160 | paddr.ppa = addr; |
| 131 | return __linear_to_generic_addr(dev, paddr); | 161 | return linear_to_generic_addr(dev, paddr); |
| 132 | } | 162 | } |
| 133 | 163 | ||
| 134 | /* requires lun->lock taken */ | 164 | /* requires lun->lock taken */ |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 917d47e290ae..3147c8d09ea8 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -112,7 +112,8 @@ struct iv_tcw_private { | |||
| 112 | * and encrypts / decrypts at the same time. | 112 | * and encrypts / decrypts at the same time. |
| 113 | */ | 113 | */ |
| 114 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, | 114 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, |
| 115 | DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; | 115 | DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD, |
| 116 | DM_CRYPT_EXIT_THREAD}; | ||
| 116 | 117 | ||
| 117 | /* | 118 | /* |
| 118 | * The fields in here must be read only after initialization. | 119 | * The fields in here must be read only after initialization. |
| @@ -1203,20 +1204,18 @@ continue_locked: | |||
| 1203 | if (!RB_EMPTY_ROOT(&cc->write_tree)) | 1204 | if (!RB_EMPTY_ROOT(&cc->write_tree)) |
| 1204 | goto pop_from_list; | 1205 | goto pop_from_list; |
| 1205 | 1206 | ||
| 1207 | if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) { | ||
| 1208 | spin_unlock_irq(&cc->write_thread_wait.lock); | ||
| 1209 | break; | ||
| 1210 | } | ||
| 1211 | |||
| 1206 | __set_current_state(TASK_INTERRUPTIBLE); | 1212 | __set_current_state(TASK_INTERRUPTIBLE); |
| 1207 | __add_wait_queue(&cc->write_thread_wait, &wait); | 1213 | __add_wait_queue(&cc->write_thread_wait, &wait); |
| 1208 | 1214 | ||
| 1209 | spin_unlock_irq(&cc->write_thread_wait.lock); | 1215 | spin_unlock_irq(&cc->write_thread_wait.lock); |
| 1210 | 1216 | ||
| 1211 | if (unlikely(kthread_should_stop())) { | ||
| 1212 | set_task_state(current, TASK_RUNNING); | ||
| 1213 | remove_wait_queue(&cc->write_thread_wait, &wait); | ||
| 1214 | break; | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | schedule(); | 1217 | schedule(); |
| 1218 | 1218 | ||
| 1219 | set_task_state(current, TASK_RUNNING); | ||
| 1220 | spin_lock_irq(&cc->write_thread_wait.lock); | 1219 | spin_lock_irq(&cc->write_thread_wait.lock); |
| 1221 | __remove_wait_queue(&cc->write_thread_wait, &wait); | 1220 | __remove_wait_queue(&cc->write_thread_wait, &wait); |
| 1222 | goto continue_locked; | 1221 | goto continue_locked; |
| @@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti) | |||
| 1531 | if (!cc) | 1530 | if (!cc) |
| 1532 | return; | 1531 | return; |
| 1533 | 1532 | ||
| 1534 | if (cc->write_thread) | 1533 | if (cc->write_thread) { |
| 1534 | spin_lock_irq(&cc->write_thread_wait.lock); | ||
| 1535 | set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags); | ||
| 1536 | wake_up_locked(&cc->write_thread_wait); | ||
| 1537 | spin_unlock_irq(&cc->write_thread_wait.lock); | ||
| 1535 | kthread_stop(cc->write_thread); | 1538 | kthread_stop(cc->write_thread); |
| 1539 | } | ||
| 1536 | 1540 | ||
| 1537 | if (cc->io_queue) | 1541 | if (cc->io_queue) |
| 1538 | destroy_workqueue(cc->io_queue); | 1542 | destroy_workqueue(cc->io_queue); |
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaa6caa46a9f..cfa29f574c2a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti, | |||
| 1537 | struct block_device **bdev, fmode_t *mode) | 1537 | struct block_device **bdev, fmode_t *mode) |
| 1538 | { | 1538 | { |
| 1539 | struct multipath *m = ti->private; | 1539 | struct multipath *m = ti->private; |
| 1540 | struct pgpath *pgpath; | ||
| 1541 | unsigned long flags; | 1540 | unsigned long flags; |
| 1542 | int r; | 1541 | int r; |
| 1543 | 1542 | ||
| 1544 | r = 0; | ||
| 1545 | |||
| 1546 | spin_lock_irqsave(&m->lock, flags); | 1543 | spin_lock_irqsave(&m->lock, flags); |
| 1547 | 1544 | ||
| 1548 | if (!m->current_pgpath) | 1545 | if (!m->current_pgpath) |
| 1549 | __choose_pgpath(m, 0); | 1546 | __choose_pgpath(m, 0); |
| 1550 | 1547 | ||
| 1551 | pgpath = m->current_pgpath; | 1548 | if (m->current_pgpath) { |
| 1552 | 1549 | if (!m->queue_io) { | |
| 1553 | if (pgpath) { | 1550 | *bdev = m->current_pgpath->path.dev->bdev; |
| 1554 | *bdev = pgpath->path.dev->bdev; | 1551 | *mode = m->current_pgpath->path.dev->mode; |
| 1555 | *mode = pgpath->path.dev->mode; | 1552 | r = 0; |
| 1553 | } else { | ||
| 1554 | /* pg_init has not started or completed */ | ||
| 1555 | r = -ENOTCONN; | ||
| 1556 | } | ||
| 1557 | } else { | ||
| 1558 | /* No path is available */ | ||
| 1559 | if (m->queue_if_no_path) | ||
| 1560 | r = -ENOTCONN; | ||
| 1561 | else | ||
| 1562 | r = -EIO; | ||
| 1556 | } | 1563 | } |
| 1557 | 1564 | ||
| 1558 | if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) | ||
| 1559 | r = -ENOTCONN; | ||
| 1560 | else if (!*bdev) | ||
| 1561 | r = -EIO; | ||
| 1562 | |||
| 1563 | spin_unlock_irqrestore(&m->lock, flags); | 1565 | spin_unlock_irqrestore(&m->lock, flags); |
| 1564 | 1566 | ||
| 1565 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { | 1567 | if (r == -ENOTCONN) { |
| 1566 | spin_lock_irqsave(&m->lock, flags); | 1568 | spin_lock_irqsave(&m->lock, flags); |
| 1567 | if (!m->current_pg) { | 1569 | if (!m->current_pg) { |
| 1568 | /* Path status changed, redo selection */ | 1570 | /* Path status changed, redo selection */ |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 3897b90bd462..63903a5a5d9e 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) | |||
| 2432 | case PM_WRITE: | 2432 | case PM_WRITE: |
| 2433 | if (old_mode != new_mode) | 2433 | if (old_mode != new_mode) |
| 2434 | notify_of_pool_mode_change(pool, "write"); | 2434 | notify_of_pool_mode_change(pool, "write"); |
| 2435 | pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; | ||
| 2435 | dm_pool_metadata_read_write(pool->pmd); | 2436 | dm_pool_metadata_read_write(pool->pmd); |
| 2436 | pool->process_bio = process_bio; | 2437 | pool->process_bio = process_bio; |
| 2437 | pool->process_discard = process_discard_bio; | 2438 | pool->process_discard = process_discard_bio; |
| @@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
| 4249 | { | 4250 | { |
| 4250 | struct thin_c *tc = ti->private; | 4251 | struct thin_c *tc = ti->private; |
| 4251 | struct pool *pool = tc->pool; | 4252 | struct pool *pool = tc->pool; |
| 4252 | struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md); | ||
| 4253 | 4253 | ||
| 4254 | if (!pool_limits->discard_granularity) | 4254 | if (!pool->pf.discard_enabled) |
| 4255 | return; /* pool's discard support is disabled */ | 4255 | return; |
| 4256 | 4256 | ||
| 4257 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; | 4257 | limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; |
| 4258 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ | 4258 | limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e15f3565892..5df40480228b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -591,7 +591,7 @@ retry: | |||
| 591 | 591 | ||
| 592 | out: | 592 | out: |
| 593 | dm_put_live_table(md, *srcu_idx); | 593 | dm_put_live_table(md, *srcu_idx); |
| 594 | if (r == -ENOTCONN) { | 594 | if (r == -ENOTCONN && !fatal_signal_pending(current)) { |
| 595 | msleep(10); | 595 | msleep(10); |
| 596 | goto retry; | 596 | goto retry; |
| 597 | } | 597 | } |
| @@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 603 | { | 603 | { |
| 604 | struct mapped_device *md = bdev->bd_disk->private_data; | 604 | struct mapped_device *md = bdev->bd_disk->private_data; |
| 605 | struct dm_target *tgt; | 605 | struct dm_target *tgt; |
| 606 | struct block_device *tgt_bdev = NULL; | ||
| 606 | int srcu_idx, r; | 607 | int srcu_idx, r; |
| 607 | 608 | ||
| 608 | r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx); | 609 | r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx); |
| 609 | if (r < 0) | 610 | if (r < 0) |
| 610 | return r; | 611 | return r; |
| 611 | 612 | ||
| @@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 620 | goto out; | 621 | goto out; |
| 621 | } | 622 | } |
| 622 | 623 | ||
| 623 | r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); | 624 | r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg); |
| 624 | out: | 625 | out: |
| 625 | dm_put_live_table(md, srcu_idx); | 626 | dm_put_live_table(md, srcu_idx); |
| 626 | return r; | 627 | return r; |
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c index 35759a91d47d..e8f847226a19 100644 --- a/drivers/media/pci/cx23885/cx23885-core.c +++ b/drivers/media/pci/cx23885/cx23885-core.c | |||
| @@ -1992,9 +1992,9 @@ static int cx23885_initdev(struct pci_dev *pci_dev, | |||
| 1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); | 1992 | (unsigned long long)pci_resource_start(pci_dev, 0)); |
| 1993 | 1993 | ||
| 1994 | pci_set_master(pci_dev); | 1994 | pci_set_master(pci_dev); |
| 1995 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1995 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1996 | if (err) { | ||
| 1996 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1997 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1997 | err = -EIO; | ||
| 1998 | goto fail_context; | 1998 | goto fail_context; |
| 1999 | } | 1999 | } |
| 2000 | 2000 | ||
diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c index dbc695f32760..0042803a9de7 100644 --- a/drivers/media/pci/cx25821/cx25821-core.c +++ b/drivers/media/pci/cx25821/cx25821-core.c | |||
| @@ -1319,7 +1319,8 @@ static int cx25821_initdev(struct pci_dev *pci_dev, | |||
| 1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); | 1319 | dev->pci_lat, (unsigned long long)dev->base_io_addr); |
| 1320 | 1320 | ||
| 1321 | pci_set_master(pci_dev); | 1321 | pci_set_master(pci_dev); |
| 1322 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1322 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1323 | if (err) { | ||
| 1323 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1324 | pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1324 | err = -EIO; | 1325 | err = -EIO; |
| 1325 | goto fail_irq; | 1326 | goto fail_irq; |
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c index 0ed1b6530374..1b5268f9bb24 100644 --- a/drivers/media/pci/cx88/cx88-alsa.c +++ b/drivers/media/pci/cx88/cx88-alsa.c | |||
| @@ -890,9 +890,9 @@ static int snd_cx88_create(struct snd_card *card, struct pci_dev *pci, | |||
| 890 | return err; | 890 | return err; |
| 891 | } | 891 | } |
| 892 | 892 | ||
| 893 | if (!pci_set_dma_mask(pci,DMA_BIT_MASK(32))) { | 893 | err = pci_set_dma_mask(pci,DMA_BIT_MASK(32)); |
| 894 | if (err) { | ||
| 894 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); | 895 | dprintk(0, "%s/1: Oops: no 32bit PCI DMA ???\n",core->name); |
| 895 | err = -EIO; | ||
| 896 | cx88_core_put(core, pci); | 896 | cx88_core_put(core, pci); |
| 897 | return err; | 897 | return err; |
| 898 | } | 898 | } |
diff --git a/drivers/media/pci/cx88/cx88-mpeg.c b/drivers/media/pci/cx88/cx88-mpeg.c index 9db7767d1fe0..f34c229f9b37 100644 --- a/drivers/media/pci/cx88/cx88-mpeg.c +++ b/drivers/media/pci/cx88/cx88-mpeg.c | |||
| @@ -393,7 +393,8 @@ static int cx8802_init_common(struct cx8802_dev *dev) | |||
| 393 | if (pci_enable_device(dev->pci)) | 393 | if (pci_enable_device(dev->pci)) |
| 394 | return -EIO; | 394 | return -EIO; |
| 395 | pci_set_master(dev->pci); | 395 | pci_set_master(dev->pci); |
| 396 | if (!pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32))) { | 396 | err = pci_set_dma_mask(dev->pci,DMA_BIT_MASK(32)); |
| 397 | if (err) { | ||
| 397 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); | 398 | printk("%s/2: Oops: no 32bit PCI DMA ???\n",dev->core->name); |
| 398 | return -EIO; | 399 | return -EIO; |
| 399 | } | 400 | } |
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index 0de1ad5a977d..aef9acf351f6 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c | |||
| @@ -1314,9 +1314,9 @@ static int cx8800_initdev(struct pci_dev *pci_dev, | |||
| 1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 1314 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
| 1315 | 1315 | ||
| 1316 | pci_set_master(pci_dev); | 1316 | pci_set_master(pci_dev); |
| 1317 | if (!pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32))) { | 1317 | err = pci_set_dma_mask(pci_dev,DMA_BIT_MASK(32)); |
| 1318 | if (err) { | ||
| 1318 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); | 1319 | printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name); |
| 1319 | err = -EIO; | ||
| 1320 | goto fail_core; | 1320 | goto fail_core; |
| 1321 | } | 1321 | } |
| 1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); | 1322 | dev->alloc_ctx = vb2_dma_sg_init_ctx(&pci_dev->dev); |
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c index 60b2d462f98d..3fdbd81b5580 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c | |||
| @@ -810,7 +810,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev, | |||
| 810 | "%s(): board vendor 0x%x, revision 0x%x\n", | 810 | "%s(): board vendor 0x%x, revision 0x%x\n", |
| 811 | __func__, board_vendor, board_revision); | 811 | __func__, board_vendor, board_revision); |
| 812 | pci_set_master(pci_dev); | 812 | pci_set_master(pci_dev); |
| 813 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 813 | if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) { |
| 814 | dev_err(&pci_dev->dev, | 814 | dev_err(&pci_dev->dev, |
| 815 | "%s(): 32bit PCI DMA is not supported\n", __func__); | 815 | "%s(): 32bit PCI DMA is not supported\n", __func__); |
| 816 | goto pci_detect_err; | 816 | goto pci_detect_err; |
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c index e79d63eb774e..f720cea80e28 100644 --- a/drivers/media/pci/saa7134/saa7134-core.c +++ b/drivers/media/pci/saa7134/saa7134-core.c | |||
| @@ -951,9 +951,9 @@ static int saa7134_initdev(struct pci_dev *pci_dev, | |||
| 951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 951 | pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
| 952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); | 952 | dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0)); |
| 953 | pci_set_master(pci_dev); | 953 | pci_set_master(pci_dev); |
| 954 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 954 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
| 955 | if (err) { | ||
| 955 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 956 | pr_warn("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 956 | err = -EIO; | ||
| 957 | goto fail1; | 957 | goto fail1; |
| 958 | } | 958 | } |
| 959 | 959 | ||
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c index 8f36b48ef733..8bbd092fbe1d 100644 --- a/drivers/media/pci/saa7164/saa7164-core.c +++ b/drivers/media/pci/saa7164/saa7164-core.c | |||
| @@ -1264,9 +1264,9 @@ static int saa7164_initdev(struct pci_dev *pci_dev, | |||
| 1264 | 1264 | ||
| 1265 | pci_set_master(pci_dev); | 1265 | pci_set_master(pci_dev); |
| 1266 | /* TODO */ | 1266 | /* TODO */ |
| 1267 | if (!pci_set_dma_mask(pci_dev, 0xffffffff)) { | 1267 | err = pci_set_dma_mask(pci_dev, 0xffffffff); |
| 1268 | if (err) { | ||
| 1268 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); | 1269 | printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 1269 | err = -EIO; | ||
| 1270 | goto fail_irq; | 1270 | goto fail_irq; |
| 1271 | } | 1271 | } |
| 1272 | 1272 | ||
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c index 8c5655d351d3..4e77618fbb2b 100644 --- a/drivers/media/pci/tw68/tw68-core.c +++ b/drivers/media/pci/tw68/tw68-core.c | |||
| @@ -257,9 +257,9 @@ static int tw68_initdev(struct pci_dev *pci_dev, | |||
| 257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, | 257 | dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq, |
| 258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); | 258 | dev->pci_lat, (u64)pci_resource_start(pci_dev, 0)); |
| 259 | pci_set_master(pci_dev); | 259 | pci_set_master(pci_dev); |
| 260 | if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { | 260 | err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); |
| 261 | if (err) { | ||
| 261 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); | 262 | pr_info("%s: Oops: no 32bit PCI DMA ???\n", dev->name); |
| 262 | err = -EIO; | ||
| 263 | goto fail1; | 263 | goto fail1; |
| 264 | } | 264 | } |
| 265 | 265 | ||
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index dc4e8446f1ff..5a99a93ed025 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #include <linux/gpio.h> | 26 | #include <linux/gpio.h> |
| 27 | 27 | ||
| 28 | #include <asm/mach-jz4740/gpio.h> | ||
| 28 | #include <asm/mach-jz4740/jz4740_nand.h> | 29 | #include <asm/mach-jz4740/jz4740_nand.h> |
| 29 | 30 | ||
| 30 | #define JZ_REG_NAND_CTRL 0x50 | 31 | #define JZ_REG_NAND_CTRL 0x50 |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index cc74142938b0..ece544efccc3 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
| @@ -3110,7 +3110,7 @@ static void nand_resume(struct mtd_info *mtd) | |||
| 3110 | */ | 3110 | */ |
| 3111 | static void nand_shutdown(struct mtd_info *mtd) | 3111 | static void nand_shutdown(struct mtd_info *mtd) |
| 3112 | { | 3112 | { |
| 3113 | nand_get_device(mtd, FL_SHUTDOWN); | 3113 | nand_get_device(mtd, FL_PM_SUSPENDED); |
| 3114 | } | 3114 | } |
| 3115 | 3115 | ||
| 3116 | /* Set default functions */ | 3116 | /* Set default functions */ |
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e2afabf3a465..7ccebae9cb48 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c | |||
| @@ -1500,10 +1500,11 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1500 | return -ENODEV; | 1500 | return -ENODEV; |
| 1501 | } | 1501 | } |
| 1502 | 1502 | ||
| 1503 | if (!pci_set_dma_mask(pdev, PCNET32_DMA_MASK)) { | 1503 | err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); |
| 1504 | if (err) { | ||
| 1504 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1505 | if (pcnet32_debug & NETIF_MSG_PROBE) |
| 1505 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); | 1506 | pr_err("architecture does not support 32bit PCI busmaster DMA\n"); |
| 1506 | return -ENODEV; | 1507 | return err; |
| 1507 | } | 1508 | } |
| 1508 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { | 1509 | if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { |
| 1509 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1510 | if (pcnet32_debug & NETIF_MSG_PROBE) |
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index e0b7b95813bc..9202d1a468d0 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
| @@ -93,7 +93,7 @@ struct nvme_nvm_l2ptbl { | |||
| 93 | __le16 cdw14[6]; | 93 | __le16 cdw14[6]; |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | struct nvme_nvm_bbtbl { | 96 | struct nvme_nvm_getbbtbl { |
| 97 | __u8 opcode; | 97 | __u8 opcode; |
| 98 | __u8 flags; | 98 | __u8 flags; |
| 99 | __u16 command_id; | 99 | __u16 command_id; |
| @@ -101,10 +101,23 @@ struct nvme_nvm_bbtbl { | |||
| 101 | __u64 rsvd[2]; | 101 | __u64 rsvd[2]; |
| 102 | __le64 prp1; | 102 | __le64 prp1; |
| 103 | __le64 prp2; | 103 | __le64 prp2; |
| 104 | __le32 prp1_len; | 104 | __le64 spba; |
| 105 | __le32 prp2_len; | 105 | __u32 rsvd4[4]; |
| 106 | __le32 lbb; | 106 | }; |
| 107 | __u32 rsvd11[3]; | 107 | |
| 108 | struct nvme_nvm_setbbtbl { | ||
| 109 | __u8 opcode; | ||
| 110 | __u8 flags; | ||
| 111 | __u16 command_id; | ||
| 112 | __le32 nsid; | ||
| 113 | __le64 rsvd[2]; | ||
| 114 | __le64 prp1; | ||
| 115 | __le64 prp2; | ||
| 116 | __le64 spba; | ||
| 117 | __le16 nlb; | ||
| 118 | __u8 value; | ||
| 119 | __u8 rsvd3; | ||
| 120 | __u32 rsvd4[3]; | ||
| 108 | }; | 121 | }; |
| 109 | 122 | ||
| 110 | struct nvme_nvm_erase_blk { | 123 | struct nvme_nvm_erase_blk { |
| @@ -129,8 +142,8 @@ struct nvme_nvm_command { | |||
| 129 | struct nvme_nvm_hb_rw hb_rw; | 142 | struct nvme_nvm_hb_rw hb_rw; |
| 130 | struct nvme_nvm_ph_rw ph_rw; | 143 | struct nvme_nvm_ph_rw ph_rw; |
| 131 | struct nvme_nvm_l2ptbl l2p; | 144 | struct nvme_nvm_l2ptbl l2p; |
| 132 | struct nvme_nvm_bbtbl get_bb; | 145 | struct nvme_nvm_getbbtbl get_bb; |
| 133 | struct nvme_nvm_bbtbl set_bb; | 146 | struct nvme_nvm_setbbtbl set_bb; |
| 134 | struct nvme_nvm_erase_blk erase; | 147 | struct nvme_nvm_erase_blk erase; |
| 135 | }; | 148 | }; |
| 136 | }; | 149 | }; |
| @@ -142,11 +155,13 @@ struct nvme_nvm_id_group { | |||
| 142 | __u8 num_ch; | 155 | __u8 num_ch; |
| 143 | __u8 num_lun; | 156 | __u8 num_lun; |
| 144 | __u8 num_pln; | 157 | __u8 num_pln; |
| 158 | __u8 rsvd1; | ||
| 145 | __le16 num_blk; | 159 | __le16 num_blk; |
| 146 | __le16 num_pg; | 160 | __le16 num_pg; |
| 147 | __le16 fpg_sz; | 161 | __le16 fpg_sz; |
| 148 | __le16 csecs; | 162 | __le16 csecs; |
| 149 | __le16 sos; | 163 | __le16 sos; |
| 164 | __le16 rsvd2; | ||
| 150 | __le32 trdt; | 165 | __le32 trdt; |
| 151 | __le32 trdm; | 166 | __le32 trdm; |
| 152 | __le32 tprt; | 167 | __le32 tprt; |
| @@ -154,8 +169,9 @@ struct nvme_nvm_id_group { | |||
| 154 | __le32 tbet; | 169 | __le32 tbet; |
| 155 | __le32 tbem; | 170 | __le32 tbem; |
| 156 | __le32 mpos; | 171 | __le32 mpos; |
| 172 | __le32 mccap; | ||
| 157 | __le16 cpar; | 173 | __le16 cpar; |
| 158 | __u8 reserved[913]; | 174 | __u8 reserved[906]; |
| 159 | } __packed; | 175 | } __packed; |
| 160 | 176 | ||
| 161 | struct nvme_nvm_addr_format { | 177 | struct nvme_nvm_addr_format { |
| @@ -178,15 +194,28 @@ struct nvme_nvm_id { | |||
| 178 | __u8 ver_id; | 194 | __u8 ver_id; |
| 179 | __u8 vmnt; | 195 | __u8 vmnt; |
| 180 | __u8 cgrps; | 196 | __u8 cgrps; |
| 181 | __u8 res[5]; | 197 | __u8 res; |
| 182 | __le32 cap; | 198 | __le32 cap; |
| 183 | __le32 dom; | 199 | __le32 dom; |
| 184 | struct nvme_nvm_addr_format ppaf; | 200 | struct nvme_nvm_addr_format ppaf; |
| 185 | __u8 ppat; | 201 | __u8 resv[228]; |
| 186 | __u8 resv[223]; | ||
| 187 | struct nvme_nvm_id_group groups[4]; | 202 | struct nvme_nvm_id_group groups[4]; |
| 188 | } __packed; | 203 | } __packed; |
| 189 | 204 | ||
| 205 | struct nvme_nvm_bb_tbl { | ||
| 206 | __u8 tblid[4]; | ||
| 207 | __le16 verid; | ||
| 208 | __le16 revid; | ||
| 209 | __le32 rvsd1; | ||
| 210 | __le32 tblks; | ||
| 211 | __le32 tfact; | ||
| 212 | __le32 tgrown; | ||
| 213 | __le32 tdresv; | ||
| 214 | __le32 thresv; | ||
| 215 | __le32 rsvd2[8]; | ||
| 216 | __u8 blk[0]; | ||
| 217 | }; | ||
| 218 | |||
| 190 | /* | 219 | /* |
| 191 | * Check we didn't inadvertently grow the command struct | 220 | * Check we didn't inadvertently grow the command struct |
| 192 | */ | 221 | */ |
| @@ -195,12 +224,14 @@ static inline void _nvme_nvm_check_size(void) | |||
| 195 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); | 224 | BUILD_BUG_ON(sizeof(struct nvme_nvm_identity) != 64); |
| 196 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); | 225 | BUILD_BUG_ON(sizeof(struct nvme_nvm_hb_rw) != 64); |
| 197 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); | 226 | BUILD_BUG_ON(sizeof(struct nvme_nvm_ph_rw) != 64); |
| 198 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bbtbl) != 64); | 227 | BUILD_BUG_ON(sizeof(struct nvme_nvm_getbbtbl) != 64); |
| 228 | BUILD_BUG_ON(sizeof(struct nvme_nvm_setbbtbl) != 64); | ||
| 199 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); | 229 | BUILD_BUG_ON(sizeof(struct nvme_nvm_l2ptbl) != 64); |
| 200 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); | 230 | BUILD_BUG_ON(sizeof(struct nvme_nvm_erase_blk) != 64); |
| 201 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); | 231 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id_group) != 960); |
| 202 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); | 232 | BUILD_BUG_ON(sizeof(struct nvme_nvm_addr_format) != 128); |
| 203 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); | 233 | BUILD_BUG_ON(sizeof(struct nvme_nvm_id) != 4096); |
| 234 | BUILD_BUG_ON(sizeof(struct nvme_nvm_bb_tbl) != 512); | ||
| 204 | } | 235 | } |
| 205 | 236 | ||
| 206 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | 237 | static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) |
| @@ -234,6 +265,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
| 234 | dst->tbet = le32_to_cpu(src->tbet); | 265 | dst->tbet = le32_to_cpu(src->tbet); |
| 235 | dst->tbem = le32_to_cpu(src->tbem); | 266 | dst->tbem = le32_to_cpu(src->tbem); |
| 236 | dst->mpos = le32_to_cpu(src->mpos); | 267 | dst->mpos = le32_to_cpu(src->mpos); |
| 268 | dst->mccap = le32_to_cpu(src->mccap); | ||
| 237 | 269 | ||
| 238 | dst->cpar = le16_to_cpu(src->cpar); | 270 | dst->cpar = le16_to_cpu(src->cpar); |
| 239 | } | 271 | } |
| @@ -244,6 +276,7 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id) | |||
| 244 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | 276 | static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) |
| 245 | { | 277 | { |
| 246 | struct nvme_ns *ns = q->queuedata; | 278 | struct nvme_ns *ns = q->queuedata; |
| 279 | struct nvme_dev *dev = ns->dev; | ||
| 247 | struct nvme_nvm_id *nvme_nvm_id; | 280 | struct nvme_nvm_id *nvme_nvm_id; |
| 248 | struct nvme_nvm_command c = {}; | 281 | struct nvme_nvm_command c = {}; |
| 249 | int ret; | 282 | int ret; |
| @@ -256,8 +289,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
| 256 | if (!nvme_nvm_id) | 289 | if (!nvme_nvm_id) |
| 257 | return -ENOMEM; | 290 | return -ENOMEM; |
| 258 | 291 | ||
| 259 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, nvme_nvm_id, | 292 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
| 260 | sizeof(struct nvme_nvm_id)); | 293 | nvme_nvm_id, sizeof(struct nvme_nvm_id)); |
| 261 | if (ret) { | 294 | if (ret) { |
| 262 | ret = -EIO; | 295 | ret = -EIO; |
| 263 | goto out; | 296 | goto out; |
| @@ -268,6 +301,8 @@ static int nvme_nvm_identity(struct request_queue *q, struct nvm_id *nvm_id) | |||
| 268 | nvm_id->cgrps = nvme_nvm_id->cgrps; | 301 | nvm_id->cgrps = nvme_nvm_id->cgrps; |
| 269 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); | 302 | nvm_id->cap = le32_to_cpu(nvme_nvm_id->cap); |
| 270 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); | 303 | nvm_id->dom = le32_to_cpu(nvme_nvm_id->dom); |
| 304 | memcpy(&nvm_id->ppaf, &nvme_nvm_id->ppaf, | ||
| 305 | sizeof(struct nvme_nvm_addr_format)); | ||
| 271 | 306 | ||
| 272 | ret = init_grps(nvm_id, nvme_nvm_id); | 307 | ret = init_grps(nvm_id, nvme_nvm_id); |
| 273 | out: | 308 | out: |
| @@ -281,7 +316,7 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
| 281 | struct nvme_ns *ns = q->queuedata; | 316 | struct nvme_ns *ns = q->queuedata; |
| 282 | struct nvme_dev *dev = ns->dev; | 317 | struct nvme_dev *dev = ns->dev; |
| 283 | struct nvme_nvm_command c = {}; | 318 | struct nvme_nvm_command c = {}; |
| 284 | u32 len = queue_max_hw_sectors(q) << 9; | 319 | u32 len = queue_max_hw_sectors(dev->admin_q) << 9; |
| 285 | u32 nlb_pr_rq = len / sizeof(u64); | 320 | u32 nlb_pr_rq = len / sizeof(u64); |
| 286 | u64 cmd_slba = slba; | 321 | u64 cmd_slba = slba; |
| 287 | void *entries; | 322 | void *entries; |
| @@ -299,8 +334,8 @@ static int nvme_nvm_get_l2p_tbl(struct request_queue *q, u64 slba, u32 nlb, | |||
| 299 | c.l2p.slba = cpu_to_le64(cmd_slba); | 334 | c.l2p.slba = cpu_to_le64(cmd_slba); |
| 300 | c.l2p.nlb = cpu_to_le32(cmd_nlb); | 335 | c.l2p.nlb = cpu_to_le32(cmd_nlb); |
| 301 | 336 | ||
| 302 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, | 337 | ret = nvme_submit_sync_cmd(dev->admin_q, |
| 303 | entries, len); | 338 | (struct nvme_command *)&c, entries, len); |
| 304 | if (ret) { | 339 | if (ret) { |
| 305 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", | 340 | dev_err(dev->dev, "L2P table transfer failed (%d)\n", |
| 306 | ret); | 341 | ret); |
| @@ -322,43 +357,82 @@ out: | |||
| 322 | return ret; | 357 | return ret; |
| 323 | } | 358 | } |
| 324 | 359 | ||
| 325 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, int lunid, | 360 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, |
| 326 | unsigned int nr_blocks, | 361 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
| 327 | nvm_bb_update_fn *update_bbtbl, void *priv) | 362 | void *priv) |
| 328 | { | 363 | { |
| 329 | struct nvme_ns *ns = q->queuedata; | 364 | struct nvme_ns *ns = q->queuedata; |
| 330 | struct nvme_dev *dev = ns->dev; | 365 | struct nvme_dev *dev = ns->dev; |
| 331 | struct nvme_nvm_command c = {}; | 366 | struct nvme_nvm_command c = {}; |
| 332 | void *bb_bitmap; | 367 | struct nvme_nvm_bb_tbl *bb_tbl; |
| 333 | u16 bb_bitmap_size; | 368 | int tblsz = sizeof(struct nvme_nvm_bb_tbl) + nr_blocks; |
| 334 | int ret = 0; | 369 | int ret = 0; |
| 335 | 370 | ||
| 336 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; | 371 | c.get_bb.opcode = nvme_nvm_admin_get_bb_tbl; |
| 337 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); | 372 | c.get_bb.nsid = cpu_to_le32(ns->ns_id); |
| 338 | c.get_bb.lbb = cpu_to_le32(lunid); | 373 | c.get_bb.spba = cpu_to_le64(ppa.ppa); |
| 339 | bb_bitmap_size = ((nr_blocks >> 15) + 1) * PAGE_SIZE; | ||
| 340 | bb_bitmap = kmalloc(bb_bitmap_size, GFP_KERNEL); | ||
| 341 | if (!bb_bitmap) | ||
| 342 | return -ENOMEM; | ||
| 343 | 374 | ||
| 344 | bitmap_zero(bb_bitmap, nr_blocks); | 375 | bb_tbl = kzalloc(tblsz, GFP_KERNEL); |
| 376 | if (!bb_tbl) | ||
| 377 | return -ENOMEM; | ||
| 345 | 378 | ||
| 346 | ret = nvme_submit_sync_cmd(q, (struct nvme_command *)&c, bb_bitmap, | 379 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, |
| 347 | bb_bitmap_size); | 380 | bb_tbl, tblsz); |
| 348 | if (ret) { | 381 | if (ret) { |
| 349 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); | 382 | dev_err(dev->dev, "get bad block table failed (%d)\n", ret); |
| 350 | ret = -EIO; | 383 | ret = -EIO; |
| 351 | goto out; | 384 | goto out; |
| 352 | } | 385 | } |
| 353 | 386 | ||
| 354 | ret = update_bbtbl(lunid, bb_bitmap, nr_blocks, priv); | 387 | if (bb_tbl->tblid[0] != 'B' || bb_tbl->tblid[1] != 'B' || |
| 388 | bb_tbl->tblid[2] != 'L' || bb_tbl->tblid[3] != 'T') { | ||
| 389 | dev_err(dev->dev, "bbt format mismatch\n"); | ||
| 390 | ret = -EINVAL; | ||
| 391 | goto out; | ||
| 392 | } | ||
| 393 | |||
| 394 | if (le16_to_cpu(bb_tbl->verid) != 1) { | ||
| 395 | ret = -EINVAL; | ||
| 396 | dev_err(dev->dev, "bbt version not supported\n"); | ||
| 397 | goto out; | ||
| 398 | } | ||
| 399 | |||
| 400 | if (le32_to_cpu(bb_tbl->tblks) != nr_blocks) { | ||
| 401 | ret = -EINVAL; | ||
| 402 | dev_err(dev->dev, "bbt unsuspected blocks returned (%u!=%u)", | ||
| 403 | le32_to_cpu(bb_tbl->tblks), nr_blocks); | ||
| 404 | goto out; | ||
| 405 | } | ||
| 406 | |||
| 407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | ||
| 355 | if (ret) { | 408 | if (ret) { |
| 356 | ret = -EINTR; | 409 | ret = -EINTR; |
| 357 | goto out; | 410 | goto out; |
| 358 | } | 411 | } |
| 359 | 412 | ||
| 360 | out: | 413 | out: |
| 361 | kfree(bb_bitmap); | 414 | kfree(bb_tbl); |
| 415 | return ret; | ||
| 416 | } | ||
| 417 | |||
| 418 | static int nvme_nvm_set_bb_tbl(struct request_queue *q, struct nvm_rq *rqd, | ||
| 419 | int type) | ||
| 420 | { | ||
| 421 | struct nvme_ns *ns = q->queuedata; | ||
| 422 | struct nvme_dev *dev = ns->dev; | ||
| 423 | struct nvme_nvm_command c = {}; | ||
| 424 | int ret = 0; | ||
| 425 | |||
| 426 | c.set_bb.opcode = nvme_nvm_admin_set_bb_tbl; | ||
| 427 | c.set_bb.nsid = cpu_to_le32(ns->ns_id); | ||
| 428 | c.set_bb.spba = cpu_to_le64(rqd->ppa_addr.ppa); | ||
| 429 | c.set_bb.nlb = cpu_to_le16(rqd->nr_pages - 1); | ||
| 430 | c.set_bb.value = type; | ||
| 431 | |||
| 432 | ret = nvme_submit_sync_cmd(dev->admin_q, (struct nvme_command *)&c, | ||
| 433 | NULL, 0); | ||
| 434 | if (ret) | ||
| 435 | dev_err(dev->dev, "set bad block table failed (%d)\n", ret); | ||
| 362 | return ret; | 436 | return ret; |
| 363 | } | 437 | } |
| 364 | 438 | ||
| @@ -474,6 +548,7 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { | |||
| 474 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, | 548 | .get_l2p_tbl = nvme_nvm_get_l2p_tbl, |
| 475 | 549 | ||
| 476 | .get_bb_tbl = nvme_nvm_get_bb_tbl, | 550 | .get_bb_tbl = nvme_nvm_get_bb_tbl, |
| 551 | .set_bb_tbl = nvme_nvm_set_bb_tbl, | ||
| 477 | 552 | ||
| 478 | .submit_io = nvme_nvm_submit_io, | 553 | .submit_io = nvme_nvm_submit_io, |
| 479 | .erase_block = nvme_nvm_erase_block, | 554 | .erase_block = nvme_nvm_erase_block, |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8187df204695..f3b53af789ef 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 896 | goto retry_cmd; | 896 | goto retry_cmd; |
| 897 | } | 897 | } |
| 898 | if (blk_integrity_rq(req)) { | 898 | if (blk_integrity_rq(req)) { |
| 899 | if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) | 899 | if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) { |
| 900 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 901 | dma_dir); | ||
| 900 | goto error_cmd; | 902 | goto error_cmd; |
| 903 | } | ||
| 901 | 904 | ||
| 902 | sg_init_table(iod->meta_sg, 1); | 905 | sg_init_table(iod->meta_sg, 1); |
| 903 | if (blk_rq_map_integrity_sg( | 906 | if (blk_rq_map_integrity_sg( |
| 904 | req->q, req->bio, iod->meta_sg) != 1) | 907 | req->q, req->bio, iod->meta_sg) != 1) { |
| 908 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 909 | dma_dir); | ||
| 905 | goto error_cmd; | 910 | goto error_cmd; |
| 911 | } | ||
| 906 | 912 | ||
| 907 | if (rq_data_dir(req)) | 913 | if (rq_data_dir(req)) |
| 908 | nvme_dif_remap(req, nvme_dif_prep); | 914 | nvme_dif_remap(req, nvme_dif_prep); |
| 909 | 915 | ||
| 910 | if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) | 916 | if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) { |
| 917 | dma_unmap_sg(dev->dev, iod->sg, iod->nents, | ||
| 918 | dma_dir); | ||
| 911 | goto error_cmd; | 919 | goto error_cmd; |
| 920 | } | ||
| 912 | } | 921 | } |
| 913 | } | 922 | } |
| 914 | 923 | ||
| @@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) | |||
| 968 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) | 977 | if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) |
| 969 | return; | 978 | return; |
| 970 | 979 | ||
| 971 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | 980 | if (likely(nvmeq->cq_vector >= 0)) |
| 981 | writel(head, nvmeq->q_db + nvmeq->dev->db_stride); | ||
| 972 | nvmeq->cq_head = head; | 982 | nvmeq->cq_head = head; |
| 973 | nvmeq->cq_phase = phase; | 983 | nvmeq->cq_phase = phase; |
| 974 | 984 | ||
| @@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1727 | u32 aqa; | 1737 | u32 aqa; |
| 1728 | u64 cap = lo_hi_readq(&dev->bar->cap); | 1738 | u64 cap = lo_hi_readq(&dev->bar->cap); |
| 1729 | struct nvme_queue *nvmeq; | 1739 | struct nvme_queue *nvmeq; |
| 1730 | unsigned page_shift = PAGE_SHIFT; | 1740 | /* |
| 1741 | * default to a 4K page size, with the intention to update this | ||
| 1742 | * path in the future to accomodate architectures with differing | ||
| 1743 | * kernel and IO page sizes. | ||
| 1744 | */ | ||
| 1745 | unsigned page_shift = 12; | ||
| 1731 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; | 1746 | unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; |
| 1732 | unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12; | ||
| 1733 | 1747 | ||
| 1734 | if (page_shift < dev_page_min) { | 1748 | if (page_shift < dev_page_min) { |
| 1735 | dev_err(dev->dev, | 1749 | dev_err(dev->dev, |
| @@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
| 1738 | 1 << page_shift); | 1752 | 1 << page_shift); |
| 1739 | return -ENODEV; | 1753 | return -ENODEV; |
| 1740 | } | 1754 | } |
| 1741 | if (page_shift > dev_page_max) { | ||
| 1742 | dev_info(dev->dev, | ||
| 1743 | "Device maximum page size (%u) smaller than " | ||
| 1744 | "host (%u); enabling work-around\n", | ||
| 1745 | 1 << dev_page_max, 1 << page_shift); | ||
| 1746 | page_shift = dev_page_max; | ||
| 1747 | } | ||
| 1748 | 1755 | ||
| 1749 | dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? | 1756 | dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? |
| 1750 | NVME_CAP_NSSRC(cap) : 0; | 1757 | NVME_CAP_NSSRC(cap) : 0; |
| @@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) | |||
| 2268 | if (dev->max_hw_sectors) { | 2275 | if (dev->max_hw_sectors) { |
| 2269 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); | 2276 | blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); |
| 2270 | blk_queue_max_segments(ns->queue, | 2277 | blk_queue_max_segments(ns->queue, |
| 2271 | ((dev->max_hw_sectors << 9) / dev->page_size) + 1); | 2278 | (dev->max_hw_sectors / (dev->page_size >> 9)) + 1); |
| 2272 | } | 2279 | } |
| 2273 | if (dev->stripe_size) | 2280 | if (dev->stripe_size) |
| 2274 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); | 2281 | blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); |
| @@ -2787,6 +2794,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq) | |||
| 2787 | { | 2794 | { |
| 2788 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; | 2795 | struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; |
| 2789 | nvme_put_dq(dq); | 2796 | nvme_put_dq(dq); |
| 2797 | |||
| 2798 | spin_lock_irq(&nvmeq->q_lock); | ||
| 2799 | nvme_process_cq(nvmeq); | ||
| 2800 | spin_unlock_irq(&nvmeq->q_lock); | ||
| 2790 | } | 2801 | } |
| 2791 | 2802 | ||
| 2792 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, | 2803 | static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e735c728e3b3..edb1984201e9 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev) | |||
| 1685 | { | 1685 | { |
| 1686 | struct device *bridge = pci_get_host_bridge_device(dev); | 1686 | struct device *bridge = pci_get_host_bridge_device(dev); |
| 1687 | 1687 | ||
| 1688 | if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) { | 1688 | if (IS_ENABLED(CONFIG_OF) && |
| 1689 | if (bridge->parent) | 1689 | bridge->parent && bridge->parent->of_node) { |
| 1690 | of_dma_configure(&dev->dev, bridge->parent->of_node); | 1690 | of_dma_configure(&dev->dev, bridge->parent->of_node); |
| 1691 | } else if (has_acpi_companion(bridge)) { | 1691 | } else if (has_acpi_companion(bridge)) { |
| 1692 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); | 1692 | struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); |
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 25abd4eb7d10..91a003011acf 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
| @@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = { | |||
| 34 | 34 | ||
| 35 | static int __init sh_pm_runtime_init(void) | 35 | static int __init sh_pm_runtime_init(void) |
| 36 | { | 36 | { |
| 37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | 37 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { |
| 38 | if (!of_find_compatible_node(NULL, NULL, | 38 | if (!of_find_compatible_node(NULL, NULL, |
| 39 | "renesas,cpg-mstp-clocks")) | 39 | "renesas,cpg-mstp-clocks")) |
| 40 | return 0; | 40 | return 0; |
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index 6d5b38d69578..9d7f0004d2d7 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig | |||
| @@ -18,7 +18,8 @@ source "drivers/staging/iio/resolver/Kconfig" | |||
| 18 | source "drivers/staging/iio/trigger/Kconfig" | 18 | source "drivers/staging/iio/trigger/Kconfig" |
| 19 | 19 | ||
| 20 | config IIO_DUMMY_EVGEN | 20 | config IIO_DUMMY_EVGEN |
| 21 | tristate | 21 | tristate |
| 22 | select IRQ_WORK | ||
| 22 | 23 | ||
| 23 | config IIO_SIMPLE_DUMMY | 24 | config IIO_SIMPLE_DUMMY |
| 24 | tristate "An example driver with no hardware requirements" | 25 | tristate "An example driver with no hardware requirements" |
diff --git a/drivers/staging/iio/adc/lpc32xx_adc.c b/drivers/staging/iio/adc/lpc32xx_adc.c index d11c54b72186..b51f237cd817 100644 --- a/drivers/staging/iio/adc/lpc32xx_adc.c +++ b/drivers/staging/iio/adc/lpc32xx_adc.c | |||
| @@ -76,7 +76,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, | |||
| 76 | 76 | ||
| 77 | if (mask == IIO_CHAN_INFO_RAW) { | 77 | if (mask == IIO_CHAN_INFO_RAW) { |
| 78 | mutex_lock(&indio_dev->mlock); | 78 | mutex_lock(&indio_dev->mlock); |
| 79 | clk_enable(info->clk); | 79 | clk_prepare_enable(info->clk); |
| 80 | /* Measurement setup */ | 80 | /* Measurement setup */ |
| 81 | __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, | 81 | __raw_writel(AD_INTERNAL | (chan->address) | AD_REFp | AD_REFm, |
| 82 | LPC32XX_ADC_SELECT(info->adc_base)); | 82 | LPC32XX_ADC_SELECT(info->adc_base)); |
| @@ -84,7 +84,7 @@ static int lpc32xx_read_raw(struct iio_dev *indio_dev, | |||
| 84 | __raw_writel(AD_PDN_CTRL | AD_STROBE, | 84 | __raw_writel(AD_PDN_CTRL | AD_STROBE, |
| 85 | LPC32XX_ADC_CTRL(info->adc_base)); | 85 | LPC32XX_ADC_CTRL(info->adc_base)); |
| 86 | wait_for_completion(&info->completion); /* set by ISR */ | 86 | wait_for_completion(&info->completion); /* set by ISR */ |
| 87 | clk_disable(info->clk); | 87 | clk_disable_unprepare(info->clk); |
| 88 | *val = info->value; | 88 | *val = info->value; |
| 89 | mutex_unlock(&indio_dev->mlock); | 89 | mutex_unlock(&indio_dev->mlock); |
| 90 | 90 | ||
diff --git a/drivers/staging/wilc1000/coreconfigurator.c b/drivers/staging/wilc1000/coreconfigurator.c index e10c6ffa698a..9568bdb6319b 100644 --- a/drivers/staging/wilc1000/coreconfigurator.c +++ b/drivers/staging/wilc1000/coreconfigurator.c | |||
| @@ -13,12 +13,8 @@ | |||
| 13 | #include "wilc_wlan.h" | 13 | #include "wilc_wlan.h" |
| 14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/etherdevice.h> | ||
| 17 | #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ | 16 | #define TAG_PARAM_OFFSET (MAC_HDR_LEN + TIME_STAMP_LEN + \ |
| 18 | BEACON_INTERVAL_LEN + CAP_INFO_LEN) | 17 | BEACON_INTERVAL_LEN + CAP_INFO_LEN) |
| 19 | #define ADDR1 4 | ||
| 20 | #define ADDR2 10 | ||
| 21 | #define ADDR3 16 | ||
| 22 | 18 | ||
| 23 | /* Basic Frame Type Codes (2-bit) */ | 19 | /* Basic Frame Type Codes (2-bit) */ |
| 24 | enum basic_frame_type { | 20 | enum basic_frame_type { |
| @@ -175,32 +171,38 @@ static inline u8 get_from_ds(u8 *header) | |||
| 175 | return ((header[1] & 0x02) >> 1); | 171 | return ((header[1] & 0x02) >> 1); |
| 176 | } | 172 | } |
| 177 | 173 | ||
| 174 | /* This function extracts the MAC Address in 'address1' field of the MAC */ | ||
| 175 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 176 | static inline void get_address1(u8 *pu8msa, u8 *addr) | ||
| 177 | { | ||
| 178 | memcpy(addr, pu8msa + 4, 6); | ||
| 179 | } | ||
| 180 | |||
| 181 | /* This function extracts the MAC Address in 'address2' field of the MAC */ | ||
| 182 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 183 | static inline void get_address2(u8 *pu8msa, u8 *addr) | ||
| 184 | { | ||
| 185 | memcpy(addr, pu8msa + 10, 6); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* This function extracts the MAC Address in 'address3' field of the MAC */ | ||
| 189 | /* header and updates the MAC Address in the allocated 'addr' variable. */ | ||
| 190 | static inline void get_address3(u8 *pu8msa, u8 *addr) | ||
| 191 | { | ||
| 192 | memcpy(addr, pu8msa + 16, 6); | ||
| 193 | } | ||
| 194 | |||
| 178 | /* This function extracts the BSSID from the incoming WLAN packet based on */ | 195 | /* This function extracts the BSSID from the incoming WLAN packet based on */ |
| 179 | /* the 'from ds' bit, and updates the MAC Address in the allocated 'data' */ | 196 | /* the 'from ds' bit, and updates the MAC Address in the allocated 'addr' */ |
| 180 | /* variable. */ | 197 | /* variable. */ |
| 181 | static inline void get_BSSID(u8 *data, u8 *bssid) | 198 | static inline void get_BSSID(u8 *data, u8 *bssid) |
| 182 | { | 199 | { |
| 183 | if (get_from_ds(data) == 1) | 200 | if (get_from_ds(data) == 1) |
| 184 | /* | 201 | get_address2(data, bssid); |
| 185 | * Extract the MAC Address in 'address2' field of the MAC | ||
| 186 | * header and update the MAC Address in the allocated 'data' | ||
| 187 | * variable. | ||
| 188 | */ | ||
| 189 | ether_addr_copy(data, bssid + ADDR2); | ||
| 190 | else if (get_to_ds(data) == 1) | 202 | else if (get_to_ds(data) == 1) |
| 191 | /* | 203 | get_address1(data, bssid); |
| 192 | * Extract the MAC Address in 'address1' field of the MAC | ||
| 193 | * header and update the MAC Address in the allocated 'data' | ||
| 194 | * variable. | ||
| 195 | */ | ||
| 196 | ether_addr_copy(data, bssid + ADDR1); | ||
| 197 | else | 204 | else |
| 198 | /* | 205 | get_address3(data, bssid); |
| 199 | * Extract the MAC Address in 'address3' field of the MAC | ||
| 200 | * header and update the MAC Address in the allocated 'data' | ||
| 201 | * variable. | ||
| 202 | */ | ||
| 203 | ether_addr_copy(data, bssid + ADDR3); | ||
| 204 | } | 206 | } |
| 205 | 207 | ||
| 206 | /* This function extracts the SSID from a beacon/probe response frame */ | 208 | /* This function extracts the SSID from a beacon/probe response frame */ |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 13844261cd5f..ed776149261e 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
| @@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty, | |||
| 169 | { | 169 | { |
| 170 | struct n_tty_data *ldata = tty->disc_data; | 170 | struct n_tty_data *ldata = tty->disc_data; |
| 171 | 171 | ||
| 172 | tty_audit_add_data(tty, to, n, ldata->icanon); | 172 | tty_audit_add_data(tty, from, n, ldata->icanon); |
| 173 | return copy_to_user(to, from, n); | 173 | return copy_to_user(to, from, n); |
| 174 | } | 174 | } |
| 175 | 175 | ||
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c index c0533a57ec53..910bfee5a88b 100644 --- a/drivers/tty/serial/8250/8250_fsl.c +++ b/drivers/tty/serial/8250/8250_fsl.c | |||
| @@ -60,3 +60,4 @@ int fsl8250_handle_irq(struct uart_port *port) | |||
| 60 | spin_unlock_irqrestore(&up->port.lock, flags); | 60 | spin_unlock_irqrestore(&up->port.lock, flags); |
| 61 | return 1; | 61 | return 1; |
| 62 | } | 62 | } |
| 63 | EXPORT_SYMBOL_GPL(fsl8250_handle_irq); | ||
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index e6f5e12a2d83..6412f1455beb 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig | |||
| @@ -373,6 +373,7 @@ config SERIAL_8250_MID | |||
| 373 | depends on SERIAL_8250 && PCI | 373 | depends on SERIAL_8250 && PCI |
| 374 | select HSU_DMA if SERIAL_8250_DMA | 374 | select HSU_DMA if SERIAL_8250_DMA |
| 375 | select HSU_DMA_PCI if X86_INTEL_MID | 375 | select HSU_DMA_PCI if X86_INTEL_MID |
| 376 | select RATIONAL | ||
| 376 | help | 377 | help |
| 377 | Selecting this option will enable handling of the extra features | 378 | Selecting this option will enable handling of the extra features |
| 378 | present on the UART found on Intel Medfield SOC and various other | 379 | present on the UART found on Intel Medfield SOC and various other |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 1aec4404062d..f38beb28e7ae 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
| @@ -1539,7 +1539,6 @@ config SERIAL_FSL_LPUART | |||
| 1539 | tristate "Freescale lpuart serial port support" | 1539 | tristate "Freescale lpuart serial port support" |
| 1540 | depends on HAS_DMA | 1540 | depends on HAS_DMA |
| 1541 | select SERIAL_CORE | 1541 | select SERIAL_CORE |
| 1542 | select SERIAL_EARLYCON | ||
| 1543 | help | 1542 | help |
| 1544 | Support for the on-chip lpuart on some Freescale SOCs. | 1543 | Support for the on-chip lpuart on some Freescale SOCs. |
| 1545 | 1544 | ||
| @@ -1547,6 +1546,7 @@ config SERIAL_FSL_LPUART_CONSOLE | |||
| 1547 | bool "Console on Freescale lpuart serial port" | 1546 | bool "Console on Freescale lpuart serial port" |
| 1548 | depends on SERIAL_FSL_LPUART=y | 1547 | depends on SERIAL_FSL_LPUART=y |
| 1549 | select SERIAL_CORE_CONSOLE | 1548 | select SERIAL_CORE_CONSOLE |
| 1549 | select SERIAL_EARLYCON | ||
| 1550 | help | 1550 | help |
| 1551 | If you have enabled the lpuart serial port on the Freescale SoCs, | 1551 | If you have enabled the lpuart serial port on the Freescale SoCs, |
| 1552 | you can make it the console by answering Y to this option. | 1552 | you can make it the console by answering Y to this option. |
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c index 681e0f3d5e0e..a1c0a89d9c7f 100644 --- a/drivers/tty/serial/bcm63xx_uart.c +++ b/drivers/tty/serial/bcm63xx_uart.c | |||
| @@ -474,7 +474,7 @@ static int bcm_uart_startup(struct uart_port *port) | |||
| 474 | 474 | ||
| 475 | /* register irq and enable rx interrupts */ | 475 | /* register irq and enable rx interrupts */ |
| 476 | ret = request_irq(port->irq, bcm_uart_interrupt, 0, | 476 | ret = request_irq(port->irq, bcm_uart_interrupt, 0, |
| 477 | bcm_uart_type(port), port); | 477 | dev_name(port->dev), port); |
| 478 | if (ret) | 478 | if (ret) |
| 479 | return ret; | 479 | return ret; |
| 480 | bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); | 480 | bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); |
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index 6813e316e9ff..2f80bc7e44fb 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c | |||
| @@ -894,7 +894,7 @@ static int etraxfs_uart_probe(struct platform_device *pdev) | |||
| 894 | up->regi_ser = of_iomap(np, 0); | 894 | up->regi_ser = of_iomap(np, 0); |
| 895 | up->port.dev = &pdev->dev; | 895 | up->port.dev = &pdev->dev; |
| 896 | 896 | ||
| 897 | up->gpios = mctrl_gpio_init(&pdev->dev, 0); | 897 | up->gpios = mctrl_gpio_init_noauto(&pdev->dev, 0); |
| 898 | if (IS_ERR(up->gpios)) | 898 | if (IS_ERR(up->gpios)) |
| 899 | return PTR_ERR(up->gpios); | 899 | return PTR_ERR(up->gpios); |
| 900 | 900 | ||
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c index 90ca082935f6..3d245cd3d8e6 100644 --- a/drivers/tty/tty_audit.c +++ b/drivers/tty/tty_audit.c | |||
| @@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, | |||
| 265 | * | 265 | * |
| 266 | * Audit @data of @size from @tty, if necessary. | 266 | * Audit @data of @size from @tty, if necessary. |
| 267 | */ | 267 | */ |
| 268 | void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 268 | void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 269 | size_t size, unsigned icanon) | 269 | size_t size, unsigned icanon) |
| 270 | { | 270 | { |
| 271 | struct tty_audit_buf *buf; | 271 | struct tty_audit_buf *buf; |
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 0c41dbcb90b8..bcc8e1e8bb72 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c | |||
| @@ -1282,18 +1282,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch) | |||
| 1282 | int was_stopped = tty->stopped; | 1282 | int was_stopped = tty->stopped; |
| 1283 | 1283 | ||
| 1284 | if (tty->ops->send_xchar) { | 1284 | if (tty->ops->send_xchar) { |
| 1285 | down_read(&tty->termios_rwsem); | ||
| 1285 | tty->ops->send_xchar(tty, ch); | 1286 | tty->ops->send_xchar(tty, ch); |
| 1287 | up_read(&tty->termios_rwsem); | ||
| 1286 | return 0; | 1288 | return 0; |
| 1287 | } | 1289 | } |
| 1288 | 1290 | ||
| 1289 | if (tty_write_lock(tty, 0) < 0) | 1291 | if (tty_write_lock(tty, 0) < 0) |
| 1290 | return -ERESTARTSYS; | 1292 | return -ERESTARTSYS; |
| 1291 | 1293 | ||
| 1294 | down_read(&tty->termios_rwsem); | ||
| 1292 | if (was_stopped) | 1295 | if (was_stopped) |
| 1293 | start_tty(tty); | 1296 | start_tty(tty); |
| 1294 | tty->ops->write(tty, &ch, 1); | 1297 | tty->ops->write(tty, &ch, 1); |
| 1295 | if (was_stopped) | 1298 | if (was_stopped) |
| 1296 | stop_tty(tty); | 1299 | stop_tty(tty); |
| 1300 | up_read(&tty->termios_rwsem); | ||
| 1297 | tty_write_unlock(tty); | 1301 | tty_write_unlock(tty); |
| 1298 | return 0; | 1302 | return 0; |
| 1299 | } | 1303 | } |
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 9c5aebfe7053..1445dd39aa62 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
| @@ -1147,16 +1147,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | |||
| 1147 | spin_unlock_irq(&tty->flow_lock); | 1147 | spin_unlock_irq(&tty->flow_lock); |
| 1148 | break; | 1148 | break; |
| 1149 | case TCIOFF: | 1149 | case TCIOFF: |
| 1150 | down_read(&tty->termios_rwsem); | ||
| 1151 | if (STOP_CHAR(tty) != __DISABLED_CHAR) | 1150 | if (STOP_CHAR(tty) != __DISABLED_CHAR) |
| 1152 | retval = tty_send_xchar(tty, STOP_CHAR(tty)); | 1151 | retval = tty_send_xchar(tty, STOP_CHAR(tty)); |
| 1153 | up_read(&tty->termios_rwsem); | ||
| 1154 | break; | 1152 | break; |
| 1155 | case TCION: | 1153 | case TCION: |
| 1156 | down_read(&tty->termios_rwsem); | ||
| 1157 | if (START_CHAR(tty) != __DISABLED_CHAR) | 1154 | if (START_CHAR(tty) != __DISABLED_CHAR) |
| 1158 | retval = tty_send_xchar(tty, START_CHAR(tty)); | 1155 | retval = tty_send_xchar(tty, START_CHAR(tty)); |
| 1159 | up_read(&tty->termios_rwsem); | ||
| 1160 | break; | 1156 | break; |
| 1161 | default: | 1157 | default: |
| 1162 | return -EINVAL; | 1158 | return -EINVAL; |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 5af8f1874c1a..629e3c865072 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -592,7 +592,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) | |||
| 592 | 592 | ||
| 593 | /* Restart the work queue in case no characters kick it off. Safe if | 593 | /* Restart the work queue in case no characters kick it off. Safe if |
| 594 | already running */ | 594 | already running */ |
| 595 | schedule_work(&tty->port->buf.work); | 595 | tty_buffer_restart_work(tty->port); |
| 596 | 596 | ||
| 597 | tty_unlock(tty); | 597 | tty_unlock(tty); |
| 598 | return retval; | 598 | return retval; |
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 6ccbf60cdd5c..5a048b7b92e8 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c | |||
| @@ -84,6 +84,12 @@ struct ci_hdrc_imx_data { | |||
| 84 | struct imx_usbmisc_data *usbmisc_data; | 84 | struct imx_usbmisc_data *usbmisc_data; |
| 85 | bool supports_runtime_pm; | 85 | bool supports_runtime_pm; |
| 86 | bool in_lpm; | 86 | bool in_lpm; |
| 87 | /* SoC before i.mx6 (except imx23/imx28) needs three clks */ | ||
| 88 | bool need_three_clks; | ||
| 89 | struct clk *clk_ipg; | ||
| 90 | struct clk *clk_ahb; | ||
| 91 | struct clk *clk_per; | ||
| 92 | /* --------------------------------- */ | ||
| 87 | }; | 93 | }; |
| 88 | 94 | ||
| 89 | /* Common functions shared by usbmisc drivers */ | 95 | /* Common functions shared by usbmisc drivers */ |
| @@ -135,6 +141,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) | |||
| 135 | } | 141 | } |
| 136 | 142 | ||
| 137 | /* End of common functions shared by usbmisc drivers*/ | 143 | /* End of common functions shared by usbmisc drivers*/ |
| 144 | static int imx_get_clks(struct device *dev) | ||
| 145 | { | ||
| 146 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 147 | int ret = 0; | ||
| 148 | |||
| 149 | data->clk_ipg = devm_clk_get(dev, "ipg"); | ||
| 150 | if (IS_ERR(data->clk_ipg)) { | ||
| 151 | /* If the platform only needs one clocks */ | ||
| 152 | data->clk = devm_clk_get(dev, NULL); | ||
| 153 | if (IS_ERR(data->clk)) { | ||
| 154 | ret = PTR_ERR(data->clk); | ||
| 155 | dev_err(dev, | ||
| 156 | "Failed to get clks, err=%ld,%ld\n", | ||
| 157 | PTR_ERR(data->clk), PTR_ERR(data->clk_ipg)); | ||
| 158 | return ret; | ||
| 159 | } | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | data->clk_ahb = devm_clk_get(dev, "ahb"); | ||
| 164 | if (IS_ERR(data->clk_ahb)) { | ||
| 165 | ret = PTR_ERR(data->clk_ahb); | ||
| 166 | dev_err(dev, | ||
| 167 | "Failed to get ahb clock, err=%d\n", ret); | ||
| 168 | return ret; | ||
| 169 | } | ||
| 170 | |||
| 171 | data->clk_per = devm_clk_get(dev, "per"); | ||
| 172 | if (IS_ERR(data->clk_per)) { | ||
| 173 | ret = PTR_ERR(data->clk_per); | ||
| 174 | dev_err(dev, | ||
| 175 | "Failed to get per clock, err=%d\n", ret); | ||
| 176 | return ret; | ||
| 177 | } | ||
| 178 | |||
| 179 | data->need_three_clks = true; | ||
| 180 | return ret; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int imx_prepare_enable_clks(struct device *dev) | ||
| 184 | { | ||
| 185 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 186 | int ret = 0; | ||
| 187 | |||
| 188 | if (data->need_three_clks) { | ||
| 189 | ret = clk_prepare_enable(data->clk_ipg); | ||
| 190 | if (ret) { | ||
| 191 | dev_err(dev, | ||
| 192 | "Failed to prepare/enable ipg clk, err=%d\n", | ||
| 193 | ret); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | ret = clk_prepare_enable(data->clk_ahb); | ||
| 198 | if (ret) { | ||
| 199 | dev_err(dev, | ||
| 200 | "Failed to prepare/enable ahb clk, err=%d\n", | ||
| 201 | ret); | ||
| 202 | clk_disable_unprepare(data->clk_ipg); | ||
| 203 | return ret; | ||
| 204 | } | ||
| 205 | |||
| 206 | ret = clk_prepare_enable(data->clk_per); | ||
| 207 | if (ret) { | ||
| 208 | dev_err(dev, | ||
| 209 | "Failed to prepare/enable per clk, err=%d\n", | ||
| 210 | ret); | ||
| 211 | clk_disable_unprepare(data->clk_ahb); | ||
| 212 | clk_disable_unprepare(data->clk_ipg); | ||
| 213 | return ret; | ||
| 214 | } | ||
| 215 | } else { | ||
| 216 | ret = clk_prepare_enable(data->clk); | ||
| 217 | if (ret) { | ||
| 218 | dev_err(dev, | ||
| 219 | "Failed to prepare/enable clk, err=%d\n", | ||
| 220 | ret); | ||
| 221 | return ret; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | return ret; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void imx_disable_unprepare_clks(struct device *dev) | ||
| 229 | { | ||
| 230 | struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); | ||
| 231 | |||
| 232 | if (data->need_three_clks) { | ||
| 233 | clk_disable_unprepare(data->clk_per); | ||
| 234 | clk_disable_unprepare(data->clk_ahb); | ||
| 235 | clk_disable_unprepare(data->clk_ipg); | ||
| 236 | } else { | ||
| 237 | clk_disable_unprepare(data->clk); | ||
| 238 | } | ||
| 239 | } | ||
| 138 | 240 | ||
| 139 | static int ci_hdrc_imx_probe(struct platform_device *pdev) | 241 | static int ci_hdrc_imx_probe(struct platform_device *pdev) |
| 140 | { | 242 | { |
| @@ -145,31 +247,31 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 145 | .flags = CI_HDRC_SET_NON_ZERO_TTHA, | 247 | .flags = CI_HDRC_SET_NON_ZERO_TTHA, |
| 146 | }; | 248 | }; |
| 147 | int ret; | 249 | int ret; |
| 148 | const struct of_device_id *of_id = | 250 | const struct of_device_id *of_id; |
| 149 | of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); | 251 | const struct ci_hdrc_imx_platform_flag *imx_platform_flag; |
| 150 | const struct ci_hdrc_imx_platform_flag *imx_platform_flag = of_id->data; | 252 | |
| 253 | of_id = of_match_device(ci_hdrc_imx_dt_ids, &pdev->dev); | ||
| 254 | if (!of_id) | ||
| 255 | return -ENODEV; | ||
| 256 | |||
| 257 | imx_platform_flag = of_id->data; | ||
| 151 | 258 | ||
| 152 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | 259 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); |
| 153 | if (!data) | 260 | if (!data) |
| 154 | return -ENOMEM; | 261 | return -ENOMEM; |
| 155 | 262 | ||
| 263 | platform_set_drvdata(pdev, data); | ||
| 156 | data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); | 264 | data->usbmisc_data = usbmisc_get_init_data(&pdev->dev); |
| 157 | if (IS_ERR(data->usbmisc_data)) | 265 | if (IS_ERR(data->usbmisc_data)) |
| 158 | return PTR_ERR(data->usbmisc_data); | 266 | return PTR_ERR(data->usbmisc_data); |
| 159 | 267 | ||
| 160 | data->clk = devm_clk_get(&pdev->dev, NULL); | 268 | ret = imx_get_clks(&pdev->dev); |
| 161 | if (IS_ERR(data->clk)) { | 269 | if (ret) |
| 162 | dev_err(&pdev->dev, | 270 | return ret; |
| 163 | "Failed to get clock, err=%ld\n", PTR_ERR(data->clk)); | ||
| 164 | return PTR_ERR(data->clk); | ||
| 165 | } | ||
| 166 | 271 | ||
| 167 | ret = clk_prepare_enable(data->clk); | 272 | ret = imx_prepare_enable_clks(&pdev->dev); |
| 168 | if (ret) { | 273 | if (ret) |
| 169 | dev_err(&pdev->dev, | ||
| 170 | "Failed to prepare or enable clock, err=%d\n", ret); | ||
| 171 | return ret; | 274 | return ret; |
| 172 | } | ||
| 173 | 275 | ||
| 174 | data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); | 276 | data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0); |
| 175 | if (IS_ERR(data->phy)) { | 277 | if (IS_ERR(data->phy)) { |
| @@ -212,8 +314,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 212 | goto disable_device; | 314 | goto disable_device; |
| 213 | } | 315 | } |
| 214 | 316 | ||
| 215 | platform_set_drvdata(pdev, data); | ||
| 216 | |||
| 217 | if (data->supports_runtime_pm) { | 317 | if (data->supports_runtime_pm) { |
| 218 | pm_runtime_set_active(&pdev->dev); | 318 | pm_runtime_set_active(&pdev->dev); |
| 219 | pm_runtime_enable(&pdev->dev); | 319 | pm_runtime_enable(&pdev->dev); |
| @@ -226,7 +326,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) | |||
| 226 | disable_device: | 326 | disable_device: |
| 227 | ci_hdrc_remove_device(data->ci_pdev); | 327 | ci_hdrc_remove_device(data->ci_pdev); |
| 228 | err_clk: | 328 | err_clk: |
| 229 | clk_disable_unprepare(data->clk); | 329 | imx_disable_unprepare_clks(&pdev->dev); |
| 230 | return ret; | 330 | return ret; |
| 231 | } | 331 | } |
| 232 | 332 | ||
| @@ -240,7 +340,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) | |||
| 240 | pm_runtime_put_noidle(&pdev->dev); | 340 | pm_runtime_put_noidle(&pdev->dev); |
| 241 | } | 341 | } |
| 242 | ci_hdrc_remove_device(data->ci_pdev); | 342 | ci_hdrc_remove_device(data->ci_pdev); |
| 243 | clk_disable_unprepare(data->clk); | 343 | imx_disable_unprepare_clks(&pdev->dev); |
| 244 | 344 | ||
| 245 | return 0; | 345 | return 0; |
| 246 | } | 346 | } |
| @@ -252,7 +352,7 @@ static int imx_controller_suspend(struct device *dev) | |||
| 252 | 352 | ||
| 253 | dev_dbg(dev, "at %s\n", __func__); | 353 | dev_dbg(dev, "at %s\n", __func__); |
| 254 | 354 | ||
| 255 | clk_disable_unprepare(data->clk); | 355 | imx_disable_unprepare_clks(dev); |
| 256 | data->in_lpm = true; | 356 | data->in_lpm = true; |
| 257 | 357 | ||
| 258 | return 0; | 358 | return 0; |
| @@ -270,7 +370,7 @@ static int imx_controller_resume(struct device *dev) | |||
| 270 | return 0; | 370 | return 0; |
| 271 | } | 371 | } |
| 272 | 372 | ||
| 273 | ret = clk_prepare_enable(data->clk); | 373 | ret = imx_prepare_enable_clks(dev); |
| 274 | if (ret) | 374 | if (ret) |
| 275 | return ret; | 375 | return ret; |
| 276 | 376 | ||
| @@ -285,7 +385,7 @@ static int imx_controller_resume(struct device *dev) | |||
| 285 | return 0; | 385 | return 0; |
| 286 | 386 | ||
| 287 | clk_disable: | 387 | clk_disable: |
| 288 | clk_disable_unprepare(data->clk); | 388 | imx_disable_unprepare_clks(dev); |
| 289 | return ret; | 389 | return ret; |
| 290 | } | 390 | } |
| 291 | 391 | ||
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index 080b7be3daf0..58c8485a0715 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c | |||
| @@ -322,8 +322,10 @@ static ssize_t ci_role_write(struct file *file, const char __user *ubuf, | |||
| 322 | return -EINVAL; | 322 | return -EINVAL; |
| 323 | 323 | ||
| 324 | pm_runtime_get_sync(ci->dev); | 324 | pm_runtime_get_sync(ci->dev); |
| 325 | disable_irq(ci->irq); | ||
| 325 | ci_role_stop(ci); | 326 | ci_role_stop(ci); |
| 326 | ret = ci_role_start(ci, role); | 327 | ret = ci_role_start(ci, role); |
| 328 | enable_irq(ci->irq); | ||
| 327 | pm_runtime_put_sync(ci->dev); | 329 | pm_runtime_put_sync(ci->dev); |
| 328 | 330 | ||
| 329 | return ret ? ret : count; | 331 | return ret ? ret : count; |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 8223fe73ea85..391a1225b0ba 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
| @@ -1751,6 +1751,22 @@ static int ci_udc_start(struct usb_gadget *gadget, | |||
| 1751 | return retval; | 1751 | return retval; |
| 1752 | } | 1752 | } |
| 1753 | 1753 | ||
| 1754 | static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci) | ||
| 1755 | { | ||
| 1756 | if (!ci_otg_is_fsm_mode(ci)) | ||
| 1757 | return; | ||
| 1758 | |||
| 1759 | mutex_lock(&ci->fsm.lock); | ||
| 1760 | if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) { | ||
| 1761 | ci->fsm.a_bidl_adis_tmout = 1; | ||
| 1762 | ci_hdrc_otg_fsm_start(ci); | ||
| 1763 | } else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) { | ||
| 1764 | ci->fsm.protocol = PROTO_UNDEF; | ||
| 1765 | ci->fsm.otg->state = OTG_STATE_UNDEFINED; | ||
| 1766 | } | ||
| 1767 | mutex_unlock(&ci->fsm.lock); | ||
| 1768 | } | ||
| 1769 | |||
| 1754 | /** | 1770 | /** |
| 1755 | * ci_udc_stop: unregister a gadget driver | 1771 | * ci_udc_stop: unregister a gadget driver |
| 1756 | */ | 1772 | */ |
| @@ -1775,6 +1791,7 @@ static int ci_udc_stop(struct usb_gadget *gadget) | |||
| 1775 | ci->driver = NULL; | 1791 | ci->driver = NULL; |
| 1776 | spin_unlock_irqrestore(&ci->lock, flags); | 1792 | spin_unlock_irqrestore(&ci->lock, flags); |
| 1777 | 1793 | ||
| 1794 | ci_udc_stop_for_otg_fsm(ci); | ||
| 1778 | return 0; | 1795 | return 0; |
| 1779 | } | 1796 | } |
| 1780 | 1797 | ||
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index fcea4eb36eee..ab8b027e8cc8 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c | |||
| @@ -500,7 +500,11 @@ static int usbmisc_imx_probe(struct platform_device *pdev) | |||
| 500 | { | 500 | { |
| 501 | struct resource *res; | 501 | struct resource *res; |
| 502 | struct imx_usbmisc *data; | 502 | struct imx_usbmisc *data; |
| 503 | struct of_device_id *tmp_dev; | 503 | const struct of_device_id *of_id; |
| 504 | |||
| 505 | of_id = of_match_device(usbmisc_imx_dt_ids, &pdev->dev); | ||
| 506 | if (!of_id) | ||
| 507 | return -ENODEV; | ||
| 504 | 508 | ||
| 505 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | 509 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); |
| 506 | if (!data) | 510 | if (!data) |
| @@ -513,9 +517,7 @@ static int usbmisc_imx_probe(struct platform_device *pdev) | |||
| 513 | if (IS_ERR(data->base)) | 517 | if (IS_ERR(data->base)) |
| 514 | return PTR_ERR(data->base); | 518 | return PTR_ERR(data->base); |
| 515 | 519 | ||
| 516 | tmp_dev = (struct of_device_id *) | 520 | data->ops = (const struct usbmisc_ops *)of_id->data; |
| 517 | of_match_device(usbmisc_imx_dt_ids, &pdev->dev); | ||
| 518 | data->ops = (const struct usbmisc_ops *)tmp_dev->data; | ||
| 519 | platform_set_drvdata(pdev, data); | 521 | platform_set_drvdata(pdev, data); |
| 520 | 522 | ||
| 521 | return 0; | 523 | return 0; |
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 433bbc34a8a4..071964c7847f 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c | |||
| @@ -884,11 +884,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock) | |||
| 884 | 884 | ||
| 885 | add_wait_queue(&usblp->wwait, &waita); | 885 | add_wait_queue(&usblp->wwait, &waita); |
| 886 | for (;;) { | 886 | for (;;) { |
| 887 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 888 | if (mutex_lock_interruptible(&usblp->mut)) { | 887 | if (mutex_lock_interruptible(&usblp->mut)) { |
| 889 | rc = -EINTR; | 888 | rc = -EINTR; |
| 890 | break; | 889 | break; |
| 891 | } | 890 | } |
| 891 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 892 | rc = usblp_wtest(usblp, nonblock); | 892 | rc = usblp_wtest(usblp, nonblock); |
| 893 | mutex_unlock(&usblp->mut); | 893 | mutex_unlock(&usblp->mut); |
| 894 | if (rc <= 0) | 894 | if (rc <= 0) |
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index a99c89e78126..dd280108758f 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig | |||
| @@ -77,8 +77,7 @@ config USB_OTG_BLACKLIST_HUB | |||
| 77 | 77 | ||
| 78 | config USB_OTG_FSM | 78 | config USB_OTG_FSM |
| 79 | tristate "USB 2.0 OTG FSM implementation" | 79 | tristate "USB 2.0 OTG FSM implementation" |
| 80 | depends on USB | 80 | depends on USB && USB_OTG |
| 81 | select USB_OTG | ||
| 82 | select USB_PHY | 81 | select USB_PHY |
| 83 | help | 82 | help |
| 84 | Implements OTG Finite State Machine as specified in On-The-Go | 83 | Implements OTG Finite State Machine as specified in On-The-Go |
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index e79baf73c234..571c21727ff9 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c | |||
| @@ -324,12 +324,13 @@ void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg) | |||
| 324 | */ | 324 | */ |
| 325 | static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) | 325 | static void dwc2_hcd_rem_wakeup(struct dwc2_hsotg *hsotg) |
| 326 | { | 326 | { |
| 327 | if (hsotg->lx_state == DWC2_L2) { | 327 | if (hsotg->bus_suspended) { |
| 328 | hsotg->flags.b.port_suspend_change = 1; | 328 | hsotg->flags.b.port_suspend_change = 1; |
| 329 | usb_hcd_resume_root_hub(hsotg->priv); | 329 | usb_hcd_resume_root_hub(hsotg->priv); |
| 330 | } else { | ||
| 331 | hsotg->flags.b.port_l1_change = 1; | ||
| 332 | } | 330 | } |
| 331 | |||
| 332 | if (hsotg->lx_state == DWC2_L1) | ||
| 333 | hsotg->flags.b.port_l1_change = 1; | ||
| 333 | } | 334 | } |
| 334 | 335 | ||
| 335 | /** | 336 | /** |
| @@ -1428,8 +1429,8 @@ static void dwc2_wakeup_detected(unsigned long data) | |||
| 1428 | dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", | 1429 | dev_dbg(hsotg->dev, "Clear Resume: HPRT0=%0x\n", |
| 1429 | dwc2_readl(hsotg->regs + HPRT0)); | 1430 | dwc2_readl(hsotg->regs + HPRT0)); |
| 1430 | 1431 | ||
| 1431 | hsotg->bus_suspended = 0; | ||
| 1432 | dwc2_hcd_rem_wakeup(hsotg); | 1432 | dwc2_hcd_rem_wakeup(hsotg); |
| 1433 | hsotg->bus_suspended = 0; | ||
| 1433 | 1434 | ||
| 1434 | /* Change to L0 state */ | 1435 | /* Change to L0 state */ |
| 1435 | hsotg->lx_state = DWC2_L0; | 1436 | hsotg->lx_state = DWC2_L0; |
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 5859b0fa19ee..e61d773cf65e 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c | |||
| @@ -108,7 +108,8 @@ static const struct dwc2_core_params params_rk3066 = { | |||
| 108 | .host_ls_low_power_phy_clk = -1, | 108 | .host_ls_low_power_phy_clk = -1, |
| 109 | .ts_dline = -1, | 109 | .ts_dline = -1, |
| 110 | .reload_ctl = -1, | 110 | .reload_ctl = -1, |
| 111 | .ahbcfg = 0x7, /* INCR16 */ | 111 | .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << |
| 112 | GAHBCFG_HBSTLEN_SHIFT, | ||
| 112 | .uframe_sched = -1, | 113 | .uframe_sched = -1, |
| 113 | .external_id_pin_ctl = -1, | 114 | .external_id_pin_ctl = -1, |
| 114 | .hibernation = -1, | 115 | .hibernation = -1, |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 77a622cb48ab..009d83048c8c 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
| @@ -34,6 +34,8 @@ | |||
| 34 | #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 | 34 | #define PCI_DEVICE_ID_INTEL_BSW 0x22b7 |
| 35 | #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 | 35 | #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 |
| 36 | #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 | 36 | #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 |
| 37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa | ||
| 38 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | ||
| 37 | 39 | ||
| 38 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | 40 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
| 39 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; | 41 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
| @@ -210,6 +212,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
| 210 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, | 212 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, |
| 211 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, | 213 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, |
| 212 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, | 214 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, |
| 215 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, | ||
| 216 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | ||
| 213 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 217 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
| 214 | { } /* Terminating Entry */ | 218 | { } /* Terminating Entry */ |
| 215 | }; | 219 | }; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 55ba447fdf8b..e24a01cc98df 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -2744,12 +2744,34 @@ int dwc3_gadget_init(struct dwc3 *dwc) | |||
| 2744 | } | 2744 | } |
| 2745 | 2745 | ||
| 2746 | dwc->gadget.ops = &dwc3_gadget_ops; | 2746 | dwc->gadget.ops = &dwc3_gadget_ops; |
| 2747 | dwc->gadget.max_speed = USB_SPEED_SUPER; | ||
| 2748 | dwc->gadget.speed = USB_SPEED_UNKNOWN; | 2747 | dwc->gadget.speed = USB_SPEED_UNKNOWN; |
| 2749 | dwc->gadget.sg_supported = true; | 2748 | dwc->gadget.sg_supported = true; |
| 2750 | dwc->gadget.name = "dwc3-gadget"; | 2749 | dwc->gadget.name = "dwc3-gadget"; |
| 2751 | 2750 | ||
| 2752 | /* | 2751 | /* |
| 2752 | * FIXME We might be setting max_speed to <SUPER, however versions | ||
| 2753 | * <2.20a of dwc3 have an issue with metastability (documented | ||
| 2754 | * elsewhere in this driver) which tells us we can't set max speed to | ||
| 2755 | * anything lower than SUPER. | ||
| 2756 | * | ||
| 2757 | * Because gadget.max_speed is only used by composite.c and function | ||
| 2758 | * drivers (i.e. it won't go into dwc3's registers) we are allowing this | ||
| 2759 | * to happen so we avoid sending SuperSpeed Capability descriptor | ||
| 2760 | * together with our BOS descriptor as that could confuse host into | ||
| 2761 | * thinking we can handle super speed. | ||
| 2762 | * | ||
| 2763 | * Note that, in fact, we won't even support GetBOS requests when speed | ||
| 2764 | * is less than super speed because we don't have means, yet, to tell | ||
| 2765 | * composite.c that we are USB 2.0 + LPM ECN. | ||
| 2766 | */ | ||
| 2767 | if (dwc->revision < DWC3_REVISION_220A) | ||
| 2768 | dwc3_trace(trace_dwc3_gadget, | ||
| 2769 | "Changing max_speed on rev %08x\n", | ||
| 2770 | dwc->revision); | ||
| 2771 | |||
| 2772 | dwc->gadget.max_speed = dwc->maximum_speed; | ||
| 2773 | |||
| 2774 | /* | ||
| 2753 | * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize | 2775 | * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize |
| 2754 | * on ep out. | 2776 | * on ep out. |
| 2755 | */ | 2777 | */ |
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c index 23933bdf2d9d..ddc3aad886b7 100644 --- a/drivers/usb/gadget/function/f_loopback.c +++ b/drivers/usb/gadget/function/f_loopback.c | |||
| @@ -329,7 +329,7 @@ static int alloc_requests(struct usb_composite_dev *cdev, | |||
| 329 | for (i = 0; i < loop->qlen && result == 0; i++) { | 329 | for (i = 0; i < loop->qlen && result == 0; i++) { |
| 330 | result = -ENOMEM; | 330 | result = -ENOMEM; |
| 331 | 331 | ||
| 332 | in_req = usb_ep_alloc_request(loop->in_ep, GFP_KERNEL); | 332 | in_req = usb_ep_alloc_request(loop->in_ep, GFP_ATOMIC); |
| 333 | if (!in_req) | 333 | if (!in_req) |
| 334 | goto fail; | 334 | goto fail; |
| 335 | 335 | ||
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index f0f2b066ac08..f92f5aff0dd5 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c | |||
| @@ -1633,7 +1633,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) | |||
| 1633 | spin_lock(&udc->lock); | 1633 | spin_lock(&udc->lock); |
| 1634 | 1634 | ||
| 1635 | int_enb = usba_int_enb_get(udc); | 1635 | int_enb = usba_int_enb_get(udc); |
| 1636 | status = usba_readl(udc, INT_STA) & int_enb; | 1636 | status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED); |
| 1637 | DBG(DBG_INT, "irq, status=%#08x\n", status); | 1637 | DBG(DBG_INT, "irq, status=%#08x\n", status); |
| 1638 | 1638 | ||
| 1639 | if (status & USBA_DET_SUSPEND) { | 1639 | if (status & USBA_DET_SUSPEND) { |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 5d2d7e954bd4..0230965fb78c 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -782,12 +782,15 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 782 | status |= USB_PORT_STAT_SUSPEND; | 782 | status |= USB_PORT_STAT_SUSPEND; |
| 783 | } | 783 | } |
| 784 | } | 784 | } |
| 785 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 | 785 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0 && |
| 786 | && (raw_port_status & PORT_POWER) | 786 | (raw_port_status & PORT_POWER)) { |
| 787 | && (bus_state->suspended_ports & (1 << wIndex))) { | 787 | if (bus_state->suspended_ports & (1 << wIndex)) { |
| 788 | bus_state->suspended_ports &= ~(1 << wIndex); | 788 | bus_state->suspended_ports &= ~(1 << wIndex); |
| 789 | if (hcd->speed < HCD_USB3) | 789 | if (hcd->speed < HCD_USB3) |
| 790 | bus_state->port_c_suspend |= 1 << wIndex; | 790 | bus_state->port_c_suspend |= 1 << wIndex; |
| 791 | } | ||
| 792 | bus_state->resume_done[wIndex] = 0; | ||
| 793 | clear_bit(wIndex, &bus_state->resuming_ports); | ||
| 791 | } | 794 | } |
| 792 | if (raw_port_status & PORT_CONNECT) { | 795 | if (raw_port_status & PORT_CONNECT) { |
| 793 | status |= USB_PORT_STAT_CONNECTION; | 796 | status |= USB_PORT_STAT_CONNECTION; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fa836251ca21..6c5e8133cf87 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -3896,28 +3896,6 @@ cleanup: | |||
| 3896 | return ret; | 3896 | return ret; |
| 3897 | } | 3897 | } |
| 3898 | 3898 | ||
| 3899 | static int ep_ring_is_processing(struct xhci_hcd *xhci, | ||
| 3900 | int slot_id, unsigned int ep_index) | ||
| 3901 | { | ||
| 3902 | struct xhci_virt_device *xdev; | ||
| 3903 | struct xhci_ring *ep_ring; | ||
| 3904 | struct xhci_ep_ctx *ep_ctx; | ||
| 3905 | struct xhci_virt_ep *xep; | ||
| 3906 | dma_addr_t hw_deq; | ||
| 3907 | |||
| 3908 | xdev = xhci->devs[slot_id]; | ||
| 3909 | xep = &xhci->devs[slot_id]->eps[ep_index]; | ||
| 3910 | ep_ring = xep->ring; | ||
| 3911 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); | ||
| 3912 | |||
| 3913 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) != EP_STATE_RUNNING) | ||
| 3914 | return 0; | ||
| 3915 | |||
| 3916 | hw_deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; | ||
| 3917 | return (hw_deq != | ||
| 3918 | xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue)); | ||
| 3919 | } | ||
| 3920 | |||
| 3921 | /* | 3899 | /* |
| 3922 | * Check transfer ring to guarantee there is enough room for the urb. | 3900 | * Check transfer ring to guarantee there is enough room for the urb. |
| 3923 | * Update ISO URB start_frame and interval. | 3901 | * Update ISO URB start_frame and interval. |
| @@ -3983,10 +3961,12 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
| 3983 | } | 3961 | } |
| 3984 | 3962 | ||
| 3985 | /* Calculate the start frame and put it in urb->start_frame. */ | 3963 | /* Calculate the start frame and put it in urb->start_frame. */ |
| 3986 | if (HCC_CFC(xhci->hcc_params) && | 3964 | if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { |
| 3987 | ep_ring_is_processing(xhci, slot_id, ep_index)) { | 3965 | if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == |
| 3988 | urb->start_frame = xep->next_frame_id; | 3966 | EP_STATE_RUNNING) { |
| 3989 | goto skip_start_over; | 3967 | urb->start_frame = xep->next_frame_id; |
| 3968 | goto skip_start_over; | ||
| 3969 | } | ||
| 3990 | } | 3970 | } |
| 3991 | 3971 | ||
| 3992 | start_frame = readl(&xhci->run_regs->microframe_index); | 3972 | start_frame = readl(&xhci->run_regs->microframe_index); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6e7dc6f93978..dfa44d3e8eee 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci) | |||
| 175 | command |= CMD_RESET; | 175 | command |= CMD_RESET; |
| 176 | writel(command, &xhci->op_regs->command); | 176 | writel(command, &xhci->op_regs->command); |
| 177 | 177 | ||
| 178 | /* Existing Intel xHCI controllers require a delay of 1 mS, | ||
| 179 | * after setting the CMD_RESET bit, and before accessing any | ||
| 180 | * HC registers. This allows the HC to complete the | ||
| 181 | * reset operation and be ready for HC register access. | ||
| 182 | * Without this delay, the subsequent HC register access, | ||
| 183 | * may result in a system hang very rarely. | ||
| 184 | */ | ||
| 185 | if (xhci->quirks & XHCI_INTEL_HOST) | ||
| 186 | udelay(1000); | ||
| 187 | |||
| 178 | ret = xhci_handshake(&xhci->op_regs->command, | 188 | ret = xhci_handshake(&xhci->op_regs->command, |
| 179 | CMD_RESET, 0, 10 * 1000 * 1000); | 189 | CMD_RESET, 0, 10 * 1000 * 1000); |
| 180 | if (ret) | 190 | if (ret) |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index ba13529cbd52..18cfc0a361cb 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev) | |||
| 132 | /*-------------------------------------------------------------------------*/ | 132 | /*-------------------------------------------------------------------------*/ |
| 133 | 133 | ||
| 134 | #ifndef CONFIG_BLACKFIN | 134 | #ifndef CONFIG_BLACKFIN |
| 135 | static int musb_ulpi_read(struct usb_phy *phy, u32 offset) | 135 | static int musb_ulpi_read(struct usb_phy *phy, u32 reg) |
| 136 | { | 136 | { |
| 137 | void __iomem *addr = phy->io_priv; | 137 | void __iomem *addr = phy->io_priv; |
| 138 | int i = 0; | 138 | int i = 0; |
| @@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset) | |||
| 151 | * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. | 151 | * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. |
| 152 | */ | 152 | */ |
| 153 | 153 | ||
| 154 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); | 154 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); |
| 155 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, | 155 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, |
| 156 | MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); | 156 | MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); |
| 157 | 157 | ||
| @@ -176,7 +176,7 @@ out: | |||
| 176 | return ret; | 176 | return ret; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) | 179 | static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg) |
| 180 | { | 180 | { |
| 181 | void __iomem *addr = phy->io_priv; | 181 | void __iomem *addr = phy->io_priv; |
| 182 | int i = 0; | 182 | int i = 0; |
| @@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) | |||
| 191 | power &= ~MUSB_POWER_SUSPENDM; | 191 | power &= ~MUSB_POWER_SUSPENDM; |
| 192 | musb_writeb(addr, MUSB_POWER, power); | 192 | musb_writeb(addr, MUSB_POWER, power); |
| 193 | 193 | ||
| 194 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); | 194 | musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg); |
| 195 | musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); | 195 | musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val); |
| 196 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); | 196 | musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); |
| 197 | 197 | ||
| 198 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) | 198 | while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) |
| @@ -1668,7 +1668,7 @@ EXPORT_SYMBOL_GPL(musb_interrupt); | |||
| 1668 | static bool use_dma = 1; | 1668 | static bool use_dma = 1; |
| 1669 | 1669 | ||
| 1670 | /* "modprobe ... use_dma=0" etc */ | 1670 | /* "modprobe ... use_dma=0" etc */ |
| 1671 | module_param(use_dma, bool, 0); | 1671 | module_param(use_dma, bool, 0644); |
| 1672 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); | 1672 | MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); |
| 1673 | 1673 | ||
| 1674 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) | 1674 | void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 26c65e66cc0f..795a45b1b25b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -112,22 +112,32 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | |||
| 112 | struct musb *musb = ep->musb; | 112 | struct musb *musb = ep->musb; |
| 113 | void __iomem *epio = ep->regs; | 113 | void __iomem *epio = ep->regs; |
| 114 | u16 csr; | 114 | u16 csr; |
| 115 | u16 lastcsr = 0; | ||
| 116 | int retries = 1000; | 115 | int retries = 1000; |
| 117 | 116 | ||
| 118 | csr = musb_readw(epio, MUSB_TXCSR); | 117 | csr = musb_readw(epio, MUSB_TXCSR); |
| 119 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 118 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
| 120 | if (csr != lastcsr) | ||
| 121 | dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | ||
| 122 | lastcsr = csr; | ||
| 123 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; | 119 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_TXPKTRDY; |
| 124 | musb_writew(epio, MUSB_TXCSR, csr); | 120 | musb_writew(epio, MUSB_TXCSR, csr); |
| 125 | csr = musb_readw(epio, MUSB_TXCSR); | 121 | csr = musb_readw(epio, MUSB_TXCSR); |
| 126 | if (WARN(retries-- < 1, | 122 | |
| 123 | /* | ||
| 124 | * FIXME: sometimes the tx fifo flush failed, it has been | ||
| 125 | * observed during device disconnect on AM335x. | ||
| 126 | * | ||
| 127 | * To reproduce the issue, ensure tx urb(s) are queued when | ||
| 128 | * unplug the usb device which is connected to AM335x usb | ||
| 129 | * host port. | ||
| 130 | * | ||
| 131 | * I found using a usb-ethernet device and running iperf | ||
| 132 | * (client on AM335x) has very high chance to trigger it. | ||
| 133 | * | ||
| 134 | * Better to turn on dev_dbg() in musb_cleanup_urb() with | ||
| 135 | * CPPI enabled to see the issue when aborting the tx channel. | ||
| 136 | */ | ||
| 137 | if (dev_WARN_ONCE(musb->controller, retries-- < 1, | ||
| 127 | "Could not flush host TX%d fifo: csr: %04x\n", | 138 | "Could not flush host TX%d fifo: csr: %04x\n", |
| 128 | ep->epnum, csr)) | 139 | ep->epnum, csr)) |
| 129 | return; | 140 | return; |
| 130 | mdelay(1); | ||
| 131 | } | 141 | } |
| 132 | } | 142 | } |
| 133 | 143 | ||
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 173132416170..22e8ecb6bfbd 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
| @@ -21,7 +21,6 @@ config AB8500_USB | |||
| 21 | config FSL_USB2_OTG | 21 | config FSL_USB2_OTG |
| 22 | bool "Freescale USB OTG Transceiver Driver" | 22 | bool "Freescale USB OTG Transceiver Driver" |
| 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM | 23 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM |
| 24 | select USB_OTG | ||
| 25 | select USB_PHY | 24 | select USB_PHY |
| 26 | help | 25 | help |
| 27 | Enable this to support Freescale USB OTG transceiver. | 26 | Enable this to support Freescale USB OTG transceiver. |
| @@ -168,8 +167,7 @@ config USB_QCOM_8X16_PHY | |||
| 168 | 167 | ||
| 169 | config USB_MV_OTG | 168 | config USB_MV_OTG |
| 170 | tristate "Marvell USB OTG support" | 169 | tristate "Marvell USB OTG support" |
| 171 | depends on USB_EHCI_MV && USB_MV_UDC && PM | 170 | depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG |
| 172 | select USB_OTG | ||
| 173 | select USB_PHY | 171 | select USB_PHY |
| 174 | help | 172 | help |
| 175 | Say Y here if you want to build Marvell USB OTG transciever | 173 | Say Y here if you want to build Marvell USB OTG transciever |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 4d863ebc117c..b7536af777ab 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
| @@ -452,10 +452,13 @@ static int mxs_phy_probe(struct platform_device *pdev) | |||
| 452 | struct clk *clk; | 452 | struct clk *clk; |
| 453 | struct mxs_phy *mxs_phy; | 453 | struct mxs_phy *mxs_phy; |
| 454 | int ret; | 454 | int ret; |
| 455 | const struct of_device_id *of_id = | 455 | const struct of_device_id *of_id; |
| 456 | of_match_device(mxs_phy_dt_ids, &pdev->dev); | ||
| 457 | struct device_node *np = pdev->dev.of_node; | 456 | struct device_node *np = pdev->dev.of_node; |
| 458 | 457 | ||
| 458 | of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev); | ||
| 459 | if (!of_id) | ||
| 460 | return -ENODEV; | ||
| 461 | |||
| 459 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 462 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 460 | base = devm_ioremap_resource(&pdev->dev, res); | 463 | base = devm_ioremap_resource(&pdev->dev, res); |
| 461 | if (IS_ERR(base)) | 464 | if (IS_ERR(base)) |
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 1270906ccb95..c4bf2de6d14e 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c | |||
| @@ -105,7 +105,6 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
| 105 | extcon = extcon_get_extcon_dev(config->extcon); | 105 | extcon = extcon_get_extcon_dev(config->extcon); |
| 106 | if (!extcon) | 106 | if (!extcon) |
| 107 | return -EPROBE_DEFER; | 107 | return -EPROBE_DEFER; |
| 108 | otg_dev->extcon = extcon; | ||
| 109 | 108 | ||
| 110 | otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); | 109 | otg_dev = devm_kzalloc(&pdev->dev, sizeof(*otg_dev), GFP_KERNEL); |
| 111 | if (!otg_dev) | 110 | if (!otg_dev) |
| @@ -115,6 +114,7 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
| 115 | if (IS_ERR(otg_dev->base)) | 114 | if (IS_ERR(otg_dev->base)) |
| 116 | return PTR_ERR(otg_dev->base); | 115 | return PTR_ERR(otg_dev->base); |
| 117 | 116 | ||
| 117 | otg_dev->extcon = extcon; | ||
| 118 | otg_dev->id_nb.notifier_call = omap_otg_id_notifier; | 118 | otg_dev->id_nb.notifier_call = omap_otg_id_notifier; |
| 119 | otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; | 119 | otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; |
| 120 | 120 | ||
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 685fef71d3d1..f2280606b73c 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -161,6 +161,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 161 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 | 161 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 |
| 162 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 | 162 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 |
| 163 | #define NOVATELWIRELESS_PRODUCT_E371 0x9011 | 163 | #define NOVATELWIRELESS_PRODUCT_E371 0x9011 |
| 164 | #define NOVATELWIRELESS_PRODUCT_U620L 0x9022 | ||
| 164 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 | 165 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 |
| 165 | #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 | 166 | #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 |
| 166 | 167 | ||
| @@ -354,6 +355,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 354 | /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * | 355 | /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * |
| 355 | * It seems to contain a Qualcomm QSC6240/6290 chipset */ | 356 | * It seems to contain a Qualcomm QSC6240/6290 chipset */ |
| 356 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 | 357 | #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 |
| 358 | #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01 | ||
| 357 | 359 | ||
| 358 | /* iBall 3.5G connect wireless modem */ | 360 | /* iBall 3.5G connect wireless modem */ |
| 359 | #define IBALL_3_5G_CONNECT 0x9605 | 361 | #define IBALL_3_5G_CONNECT 0x9605 |
| @@ -519,6 +521,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = { | |||
| 519 | .sendsetup = BIT(0) | BIT(1), | 521 | .sendsetup = BIT(0) | BIT(1), |
| 520 | }; | 522 | }; |
| 521 | 523 | ||
| 524 | static const struct option_blacklist_info four_g_w100_blacklist = { | ||
| 525 | .sendsetup = BIT(1) | BIT(2), | ||
| 526 | .reserved = BIT(3), | ||
| 527 | }; | ||
| 528 | |||
| 522 | static const struct option_blacklist_info alcatel_x200_blacklist = { | 529 | static const struct option_blacklist_info alcatel_x200_blacklist = { |
| 523 | .sendsetup = BIT(0) | BIT(1), | 530 | .sendsetup = BIT(0) | BIT(1), |
| 524 | .reserved = BIT(4), | 531 | .reserved = BIT(4), |
| @@ -1052,6 +1059,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 1052 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, | 1059 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, |
| 1053 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, | 1060 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, |
| 1054 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, | 1061 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) }, |
| 1062 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) }, | ||
| 1055 | 1063 | ||
| 1056 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, | 1064 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, |
| 1057 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, | 1065 | { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, |
| @@ -1641,6 +1649,9 @@ static const struct usb_device_id option_ids[] = { | |||
| 1641 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), | 1649 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), |
| 1642 | .driver_info = (kernel_ulong_t)&four_g_w14_blacklist | 1650 | .driver_info = (kernel_ulong_t)&four_g_w14_blacklist |
| 1643 | }, | 1651 | }, |
| 1652 | { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100), | ||
| 1653 | .driver_info = (kernel_ulong_t)&four_g_w100_blacklist | ||
| 1654 | }, | ||
| 1644 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, | 1655 | { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) }, |
| 1645 | { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, | 1656 | { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, |
| 1646 | { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, | 1657 | { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 5022fcfa0260..9919d2a9faf2 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | #define DRIVER_AUTHOR "Qualcomm Inc" | 22 | #define DRIVER_AUTHOR "Qualcomm Inc" |
| 23 | #define DRIVER_DESC "Qualcomm USB Serial driver" | 23 | #define DRIVER_DESC "Qualcomm USB Serial driver" |
| 24 | 24 | ||
| 25 | #define QUECTEL_EC20_PID 0x9215 | ||
| 26 | |||
| 25 | /* standard device layouts supported by this driver */ | 27 | /* standard device layouts supported by this driver */ |
| 26 | enum qcserial_layouts { | 28 | enum qcserial_layouts { |
| 27 | QCSERIAL_G2K = 0, /* Gobi 2000 */ | 29 | QCSERIAL_G2K = 0, /* Gobi 2000 */ |
| @@ -171,6 +173,38 @@ static const struct usb_device_id id_table[] = { | |||
| 171 | }; | 173 | }; |
| 172 | MODULE_DEVICE_TABLE(usb, id_table); | 174 | MODULE_DEVICE_TABLE(usb, id_table); |
| 173 | 175 | ||
| 176 | static int handle_quectel_ec20(struct device *dev, int ifnum) | ||
| 177 | { | ||
| 178 | int altsetting = 0; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Quectel EC20 Mini PCIe LTE module layout: | ||
| 182 | * 0: DM/DIAG (use libqcdm from ModemManager for communication) | ||
| 183 | * 1: NMEA | ||
| 184 | * 2: AT-capable modem port | ||
| 185 | * 3: Modem interface | ||
| 186 | * 4: NDIS | ||
| 187 | */ | ||
| 188 | switch (ifnum) { | ||
| 189 | case 0: | ||
| 190 | dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n"); | ||
| 191 | break; | ||
| 192 | case 1: | ||
| 193 | dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n"); | ||
| 194 | break; | ||
| 195 | case 2: | ||
| 196 | case 3: | ||
| 197 | dev_dbg(dev, "Quectel EC20 Modem port found\n"); | ||
| 198 | break; | ||
| 199 | case 4: | ||
| 200 | /* Don't claim the QMI/net interface */ | ||
| 201 | altsetting = -1; | ||
| 202 | break; | ||
| 203 | } | ||
| 204 | |||
| 205 | return altsetting; | ||
| 206 | } | ||
| 207 | |||
| 174 | static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | 208 | static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
| 175 | { | 209 | { |
| 176 | struct usb_host_interface *intf = serial->interface->cur_altsetting; | 210 | struct usb_host_interface *intf = serial->interface->cur_altsetting; |
| @@ -181,6 +215,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 181 | int altsetting = -1; | 215 | int altsetting = -1; |
| 182 | bool sendsetup = false; | 216 | bool sendsetup = false; |
| 183 | 217 | ||
| 218 | /* we only support vendor specific functions */ | ||
| 219 | if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) | ||
| 220 | goto done; | ||
| 221 | |||
| 184 | nintf = serial->dev->actconfig->desc.bNumInterfaces; | 222 | nintf = serial->dev->actconfig->desc.bNumInterfaces; |
| 185 | dev_dbg(dev, "Num Interfaces = %d\n", nintf); | 223 | dev_dbg(dev, "Num Interfaces = %d\n", nintf); |
| 186 | ifnum = intf->desc.bInterfaceNumber; | 224 | ifnum = intf->desc.bInterfaceNumber; |
| @@ -240,6 +278,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 240 | altsetting = -1; | 278 | altsetting = -1; |
| 241 | break; | 279 | break; |
| 242 | case QCSERIAL_G2K: | 280 | case QCSERIAL_G2K: |
| 281 | /* handle non-standard layouts */ | ||
| 282 | if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) { | ||
| 283 | altsetting = handle_quectel_ec20(dev, ifnum); | ||
| 284 | goto done; | ||
| 285 | } | ||
| 286 | |||
| 243 | /* | 287 | /* |
| 244 | * Gobi 2K+ USB layout: | 288 | * Gobi 2K+ USB layout: |
| 245 | * 0: QMI/net | 289 | * 0: QMI/net |
| @@ -301,29 +345,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) | |||
| 301 | break; | 345 | break; |
| 302 | case QCSERIAL_HWI: | 346 | case QCSERIAL_HWI: |
| 303 | /* | 347 | /* |
| 304 | * Huawei layout: | 348 | * Huawei devices map functions by subclass + protocol |
| 305 | * 0: AT-capable modem port | 349 | * instead of interface numbers. The protocol identify |
| 306 | * 1: DM/DIAG | 350 | * a specific function, while the subclass indicate a |
| 307 | * 2: AT-capable modem port | 351 | * specific firmware source |
| 308 | * 3: CCID-compatible PCSC interface | 352 | * |
| 309 | * 4: QMI/net | 353 | * This is a blacklist of functions known to be |
| 310 | * 5: NMEA | 354 | * non-serial. The rest are assumed to be serial and |
| 355 | * will be handled by this driver | ||
| 311 | */ | 356 | */ |
| 312 | switch (ifnum) { | 357 | switch (intf->desc.bInterfaceProtocol) { |
| 313 | case 0: | 358 | /* QMI combined (qmi_wwan) */ |
| 314 | case 2: | 359 | case 0x07: |
| 315 | dev_dbg(dev, "Modem port found\n"); | 360 | case 0x37: |
| 316 | break; | 361 | case 0x67: |
| 317 | case 1: | 362 | /* QMI data (qmi_wwan) */ |
| 318 | dev_dbg(dev, "DM/DIAG interface found\n"); | 363 | case 0x08: |
| 319 | break; | 364 | case 0x38: |
| 320 | case 5: | 365 | case 0x68: |
| 321 | dev_dbg(dev, "NMEA GPS interface found\n"); | 366 | /* QMI control (qmi_wwan) */ |
| 322 | break; | 367 | case 0x09: |
| 323 | default: | 368 | case 0x39: |
| 324 | /* don't claim any unsupported interface */ | 369 | case 0x69: |
| 370 | /* NCM like (huawei_cdc_ncm) */ | ||
| 371 | case 0x16: | ||
| 372 | case 0x46: | ||
| 373 | case 0x76: | ||
| 325 | altsetting = -1; | 374 | altsetting = -1; |
| 326 | break; | 375 | break; |
| 376 | default: | ||
| 377 | dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n", | ||
| 378 | intf->desc.bInterfaceClass, | ||
| 379 | intf->desc.bInterfaceSubClass, | ||
| 380 | intf->desc.bInterfaceProtocol); | ||
| 327 | } | 381 | } |
| 328 | break; | 382 | break; |
| 329 | default: | 383 | default: |
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index e9da41d9fe7f..2694df2f4559 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
| @@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = { | |||
| 159 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, | 159 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, |
| 160 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, | 160 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
| 161 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, | 161 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
| 162 | { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, | ||
| 162 | { } /* terminator */ | 163 | { } /* terminator */ |
| 163 | }; | 164 | }; |
| 164 | 165 | ||
| @@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = { | |||
| 191 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, | 192 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, |
| 192 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, | 193 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
| 193 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, | 194 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
| 195 | { USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) }, | ||
| 194 | { } /* terminator */ | 196 | { } /* terminator */ |
| 195 | }; | 197 | }; |
| 196 | 198 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index 4a2423e84d55..98f35c656c02 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h | |||
| @@ -56,6 +56,10 @@ | |||
| 56 | #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID | 56 | #define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID |
| 57 | #define ABBOTT_STRIP_PORT_ID 0x3420 | 57 | #define ABBOTT_STRIP_PORT_ID 0x3420 |
| 58 | 58 | ||
| 59 | /* Honeywell vendor and product IDs */ | ||
| 60 | #define HONEYWELL_VENDOR_ID 0x10ac | ||
| 61 | #define HONEYWELL_HGI80_PRODUCT_ID 0x0102 /* Honeywell HGI80 */ | ||
| 62 | |||
| 59 | /* Commands */ | 63 | /* Commands */ |
| 60 | #define TI_GET_VERSION 0x01 | 64 | #define TI_GET_VERSION 0x01 |
| 61 | #define TI_GET_PORT_STATUS 0x02 | 65 | #define TI_GET_PORT_STATUS 0x02 |
diff --git a/fs/Kconfig b/fs/Kconfig index da3f32f1a4e4..6ce72d8d1ee1 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
| @@ -46,6 +46,12 @@ config FS_DAX | |||
| 46 | or if unsure, say N. Saying Y will increase the size of the kernel | 46 | or if unsure, say N. Saying Y will increase the size of the kernel |
| 47 | by about 5kB. | 47 | by about 5kB. |
| 48 | 48 | ||
| 49 | config FS_DAX_PMD | ||
| 50 | bool | ||
| 51 | default FS_DAX | ||
| 52 | depends on FS_DAX | ||
| 53 | depends on BROKEN | ||
| 54 | |||
| 49 | endif # BLOCK | 55 | endif # BLOCK |
| 50 | 56 | ||
| 51 | # Posix ACL utility routines | 57 | # Posix ACL utility routines |
diff --git a/fs/block_dev.c b/fs/block_dev.c index bb0dfb1c7af1..c25639e907bd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -390,9 +390,17 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, | |||
| 390 | struct page *page) | 390 | struct page *page) |
| 391 | { | 391 | { |
| 392 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 392 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
| 393 | int result = -EOPNOTSUPP; | ||
| 394 | |||
| 393 | if (!ops->rw_page || bdev_get_integrity(bdev)) | 395 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
| 394 | return -EOPNOTSUPP; | 396 | return result; |
| 395 | return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); | 397 | |
| 398 | result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); | ||
| 399 | if (result) | ||
| 400 | return result; | ||
| 401 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ); | ||
| 402 | blk_queue_exit(bdev->bd_queue); | ||
| 403 | return result; | ||
| 396 | } | 404 | } |
| 397 | EXPORT_SYMBOL_GPL(bdev_read_page); | 405 | EXPORT_SYMBOL_GPL(bdev_read_page); |
| 398 | 406 | ||
| @@ -421,14 +429,20 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
| 421 | int result; | 429 | int result; |
| 422 | int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; | 430 | int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE; |
| 423 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 431 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
| 432 | |||
| 424 | if (!ops->rw_page || bdev_get_integrity(bdev)) | 433 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
| 425 | return -EOPNOTSUPP; | 434 | return -EOPNOTSUPP; |
| 435 | result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL); | ||
| 436 | if (result) | ||
| 437 | return result; | ||
| 438 | |||
| 426 | set_page_writeback(page); | 439 | set_page_writeback(page); |
| 427 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); | 440 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw); |
| 428 | if (result) | 441 | if (result) |
| 429 | end_page_writeback(page); | 442 | end_page_writeback(page); |
| 430 | else | 443 | else |
| 431 | unlock_page(page); | 444 | unlock_page(page); |
| 445 | blk_queue_exit(bdev->bd_queue); | ||
| 432 | return result; | 446 | return result; |
| 433 | } | 447 | } |
| 434 | EXPORT_SYMBOL_GPL(bdev_write_page); | 448 | EXPORT_SYMBOL_GPL(bdev_write_page); |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c81ce7f200a6..a7a1b218f308 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
| @@ -1636,6 +1636,116 @@ const struct file_operations configfs_dir_operations = { | |||
| 1636 | .iterate = configfs_readdir, | 1636 | .iterate = configfs_readdir, |
| 1637 | }; | 1637 | }; |
| 1638 | 1638 | ||
| 1639 | /** | ||
| 1640 | * configfs_register_group - creates a parent-child relation between two groups | ||
| 1641 | * @parent_group: parent group | ||
| 1642 | * @group: child group | ||
| 1643 | * | ||
| 1644 | * link groups, creates dentry for the child and attaches it to the | ||
| 1645 | * parent dentry. | ||
| 1646 | * | ||
| 1647 | * Return: 0 on success, negative errno code on error | ||
| 1648 | */ | ||
| 1649 | int configfs_register_group(struct config_group *parent_group, | ||
| 1650 | struct config_group *group) | ||
| 1651 | { | ||
| 1652 | struct configfs_subsystem *subsys = parent_group->cg_subsys; | ||
| 1653 | struct dentry *parent; | ||
| 1654 | int ret; | ||
| 1655 | |||
| 1656 | mutex_lock(&subsys->su_mutex); | ||
| 1657 | link_group(parent_group, group); | ||
| 1658 | mutex_unlock(&subsys->su_mutex); | ||
| 1659 | |||
| 1660 | parent = parent_group->cg_item.ci_dentry; | ||
| 1661 | |||
| 1662 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
| 1663 | ret = create_default_group(parent_group, group); | ||
| 1664 | if (!ret) { | ||
| 1665 | spin_lock(&configfs_dirent_lock); | ||
| 1666 | configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); | ||
| 1667 | spin_unlock(&configfs_dirent_lock); | ||
| 1668 | } | ||
| 1669 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
| 1670 | return ret; | ||
| 1671 | } | ||
| 1672 | EXPORT_SYMBOL(configfs_register_group); | ||
| 1673 | |||
| 1674 | /** | ||
| 1675 | * configfs_unregister_group() - unregisters a child group from its parent | ||
| 1676 | * @group: parent group to be unregistered | ||
| 1677 | * | ||
| 1678 | * Undoes configfs_register_group() | ||
| 1679 | */ | ||
| 1680 | void configfs_unregister_group(struct config_group *group) | ||
| 1681 | { | ||
| 1682 | struct configfs_subsystem *subsys = group->cg_subsys; | ||
| 1683 | struct dentry *dentry = group->cg_item.ci_dentry; | ||
| 1684 | struct dentry *parent = group->cg_item.ci_parent->ci_dentry; | ||
| 1685 | |||
| 1686 | mutex_lock_nested(&d_inode(parent)->i_mutex, I_MUTEX_PARENT); | ||
| 1687 | spin_lock(&configfs_dirent_lock); | ||
| 1688 | configfs_detach_prep(dentry, NULL); | ||
| 1689 | spin_unlock(&configfs_dirent_lock); | ||
| 1690 | |||
| 1691 | configfs_detach_group(&group->cg_item); | ||
| 1692 | d_inode(dentry)->i_flags |= S_DEAD; | ||
| 1693 | dont_mount(dentry); | ||
| 1694 | d_delete(dentry); | ||
| 1695 | mutex_unlock(&d_inode(parent)->i_mutex); | ||
| 1696 | |||
| 1697 | dput(dentry); | ||
| 1698 | |||
| 1699 | mutex_lock(&subsys->su_mutex); | ||
| 1700 | unlink_group(group); | ||
| 1701 | mutex_unlock(&subsys->su_mutex); | ||
| 1702 | } | ||
| 1703 | EXPORT_SYMBOL(configfs_unregister_group); | ||
| 1704 | |||
| 1705 | /** | ||
| 1706 | * configfs_register_default_group() - allocates and registers a child group | ||
| 1707 | * @parent_group: parent group | ||
| 1708 | * @name: child group name | ||
| 1709 | * @item_type: child item type description | ||
| 1710 | * | ||
| 1711 | * boilerplate to allocate and register a child group with its parent. We need | ||
| 1712 | * kzalloc'ed memory because child's default_group is initially empty. | ||
| 1713 | * | ||
| 1714 | * Return: allocated config group or ERR_PTR() on error | ||
| 1715 | */ | ||
| 1716 | struct config_group * | ||
| 1717 | configfs_register_default_group(struct config_group *parent_group, | ||
| 1718 | const char *name, | ||
| 1719 | struct config_item_type *item_type) | ||
| 1720 | { | ||
| 1721 | int ret; | ||
| 1722 | struct config_group *group; | ||
| 1723 | |||
| 1724 | group = kzalloc(sizeof(*group), GFP_KERNEL); | ||
| 1725 | if (!group) | ||
| 1726 | return ERR_PTR(-ENOMEM); | ||
| 1727 | config_group_init_type_name(group, name, item_type); | ||
| 1728 | |||
| 1729 | ret = configfs_register_group(parent_group, group); | ||
| 1730 | if (ret) { | ||
| 1731 | kfree(group); | ||
| 1732 | return ERR_PTR(ret); | ||
| 1733 | } | ||
| 1734 | return group; | ||
| 1735 | } | ||
| 1736 | EXPORT_SYMBOL(configfs_register_default_group); | ||
| 1737 | |||
| 1738 | /** | ||
| 1739 | * configfs_unregister_default_group() - unregisters and frees a child group | ||
| 1740 | * @group: the group to act on | ||
| 1741 | */ | ||
| 1742 | void configfs_unregister_default_group(struct config_group *group) | ||
| 1743 | { | ||
| 1744 | configfs_unregister_group(group); | ||
| 1745 | kfree(group); | ||
| 1746 | } | ||
| 1747 | EXPORT_SYMBOL(configfs_unregister_default_group); | ||
| 1748 | |||
| 1639 | int configfs_register_subsystem(struct configfs_subsystem *subsys) | 1749 | int configfs_register_subsystem(struct configfs_subsystem *subsys) |
| 1640 | { | 1750 | { |
| 1641 | int err; | 1751 | int err; |
| @@ -541,6 +541,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 541 | unsigned long pfn; | 541 | unsigned long pfn; |
| 542 | int result = 0; | 542 | int result = 0; |
| 543 | 543 | ||
| 544 | /* dax pmd mappings are broken wrt gup and fork */ | ||
| 545 | if (!IS_ENABLED(CONFIG_FS_DAX_PMD)) | ||
| 546 | return VM_FAULT_FALLBACK; | ||
| 547 | |||
| 544 | /* Fall back to PTEs if we're going to COW */ | 548 | /* Fall back to PTEs if we're going to COW */ |
| 545 | if (write && !(vma->vm_flags & VM_SHARED)) | 549 | if (write && !(vma->vm_flags & VM_SHARED)) |
| 546 | return VM_FAULT_FALLBACK; | 550 | return VM_FAULT_FALLBACK; |
diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 3a71cea68420..748d35afc902 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c | |||
| @@ -569,6 +569,8 @@ static int parse_options(char *options, struct super_block *sb) | |||
| 569 | /* Fall through */ | 569 | /* Fall through */ |
| 570 | case Opt_dax: | 570 | case Opt_dax: |
| 571 | #ifdef CONFIG_FS_DAX | 571 | #ifdef CONFIG_FS_DAX |
| 572 | ext2_msg(sb, KERN_WARNING, | ||
| 573 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); | ||
| 572 | set_opt(sbi->s_mount_opt, DAX); | 574 | set_opt(sbi->s_mount_opt, DAX); |
| 573 | #else | 575 | #else |
| 574 | ext2_msg(sb, KERN_INFO, "dax option not supported"); | 576 | ext2_msg(sb, KERN_INFO, "dax option not supported"); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 753f4e68b820..c9ab67da6e5a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -1664,8 +1664,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, | |||
| 1664 | } | 1664 | } |
| 1665 | sbi->s_jquota_fmt = m->mount_opt; | 1665 | sbi->s_jquota_fmt = m->mount_opt; |
| 1666 | #endif | 1666 | #endif |
| 1667 | #ifndef CONFIG_FS_DAX | ||
| 1668 | } else if (token == Opt_dax) { | 1667 | } else if (token == Opt_dax) { |
| 1668 | #ifdef CONFIG_FS_DAX | ||
| 1669 | ext4_msg(sb, KERN_WARNING, | ||
| 1670 | "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); | ||
| 1671 | sbi->s_mount_opt |= m->mount_opt; | ||
| 1672 | #else | ||
| 1669 | ext4_msg(sb, KERN_INFO, "dax option not supported"); | 1673 | ext4_msg(sb, KERN_INFO, "dax option not supported"); |
| 1670 | return -1; | 1674 | return -1; |
| 1671 | #endif | 1675 | #endif |
diff --git a/fs/fat/dir.c b/fs/fat/dir.c index 4afc4d9d2e41..8b2127ffb226 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c | |||
| @@ -610,9 +610,9 @@ parse_record: | |||
| 610 | int status = fat_parse_long(inode, &cpos, &bh, &de, | 610 | int status = fat_parse_long(inode, &cpos, &bh, &de, |
| 611 | &unicode, &nr_slots); | 611 | &unicode, &nr_slots); |
| 612 | if (status < 0) { | 612 | if (status < 0) { |
| 613 | ctx->pos = cpos; | 613 | bh = NULL; |
| 614 | ret = status; | 614 | ret = status; |
| 615 | goto out; | 615 | goto end_of_dir; |
| 616 | } else if (status == PARSE_INVALID) | 616 | } else if (status == PARSE_INVALID) |
| 617 | goto record_end; | 617 | goto record_end; |
| 618 | else if (status == PARSE_NOT_LONGNAME) | 618 | else if (status == PARSE_NOT_LONGNAME) |
| @@ -654,8 +654,9 @@ parse_record: | |||
| 654 | fill_len = short_len; | 654 | fill_len = short_len; |
| 655 | 655 | ||
| 656 | start_filldir: | 656 | start_filldir: |
| 657 | if (!fake_offset) | 657 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); |
| 658 | ctx->pos = cpos - (nr_slots + 1) * sizeof(struct msdos_dir_entry); | 658 | if (fake_offset && ctx->pos < 2) |
| 659 | ctx->pos = 2; | ||
| 659 | 660 | ||
| 660 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { | 661 | if (!memcmp(de->name, MSDOS_DOT, MSDOS_NAME)) { |
| 661 | if (!dir_emit_dot(file, ctx)) | 662 | if (!dir_emit_dot(file, ctx)) |
| @@ -681,14 +682,19 @@ record_end: | |||
| 681 | fake_offset = 0; | 682 | fake_offset = 0; |
| 682 | ctx->pos = cpos; | 683 | ctx->pos = cpos; |
| 683 | goto get_new; | 684 | goto get_new; |
| 685 | |||
| 684 | end_of_dir: | 686 | end_of_dir: |
| 685 | ctx->pos = cpos; | 687 | if (fake_offset && cpos < 2) |
| 688 | ctx->pos = 2; | ||
| 689 | else | ||
| 690 | ctx->pos = cpos; | ||
| 686 | fill_failed: | 691 | fill_failed: |
| 687 | brelse(bh); | 692 | brelse(bh); |
| 688 | if (unicode) | 693 | if (unicode) |
| 689 | __putname(unicode); | 694 | __putname(unicode); |
| 690 | out: | 695 | out: |
| 691 | mutex_unlock(&sbi->s_lock); | 696 | mutex_unlock(&sbi->s_lock); |
| 697 | |||
| 692 | return ret; | 698 | return ret; |
| 693 | } | 699 | } |
| 694 | 700 | ||
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 316adb968b65..de4bdfac0cec 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
| @@ -332,12 +332,17 @@ static void remove_huge_page(struct page *page) | |||
| 332 | * truncation is indicated by end of range being LLONG_MAX | 332 | * truncation is indicated by end of range being LLONG_MAX |
| 333 | * In this case, we first scan the range and release found pages. | 333 | * In this case, we first scan the range and release found pages. |
| 334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | 334 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv |
| 335 | * maps and global counts. | 335 | * maps and global counts. Page faults can not race with truncation |
| 336 | * in this routine. hugetlb_no_page() prevents page faults in the | ||
| 337 | * truncated range. It checks i_size before allocation, and again after | ||
| 338 | * with the page table lock for the page held. The same lock must be | ||
| 339 | * acquired to unmap a page. | ||
| 336 | * hole punch is indicated if end is not LLONG_MAX | 340 | * hole punch is indicated if end is not LLONG_MAX |
| 337 | * In the hole punch case we scan the range and release found pages. | 341 | * In the hole punch case we scan the range and release found pages. |
| 338 | * Only when releasing a page is the associated region/reserv map | 342 | * Only when releasing a page is the associated region/reserv map |
| 339 | * deleted. The region/reserv map for ranges without associated | 343 | * deleted. The region/reserv map for ranges without associated |
| 340 | * pages are not modified. | 344 | * pages are not modified. Page faults can race with hole punch. |
| 345 | * This is indicated if we find a mapped page. | ||
| 341 | * Note: If the passed end of range value is beyond the end of file, but | 346 | * Note: If the passed end of range value is beyond the end of file, but |
| 342 | * not LLONG_MAX this routine still performs a hole punch operation. | 347 | * not LLONG_MAX this routine still performs a hole punch operation. |
| 343 | */ | 348 | */ |
| @@ -361,46 +366,37 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
| 361 | next = start; | 366 | next = start; |
| 362 | while (next < end) { | 367 | while (next < end) { |
| 363 | /* | 368 | /* |
| 364 | * Make sure to never grab more pages that we | 369 | * Don't grab more pages than the number left in the range. |
| 365 | * might possibly need. | ||
| 366 | */ | 370 | */ |
| 367 | if (end - next < lookup_nr) | 371 | if (end - next < lookup_nr) |
| 368 | lookup_nr = end - next; | 372 | lookup_nr = end - next; |
| 369 | 373 | ||
| 370 | /* | 374 | /* |
| 371 | * This pagevec_lookup() may return pages past 'end', | 375 | * When no more pages are found, we are done. |
| 372 | * so we must check for page->index > end. | ||
| 373 | */ | 376 | */ |
| 374 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) { | 377 | if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) |
| 375 | if (next == start) | 378 | break; |
| 376 | break; | ||
| 377 | next = start; | ||
| 378 | continue; | ||
| 379 | } | ||
| 380 | 379 | ||
| 381 | for (i = 0; i < pagevec_count(&pvec); ++i) { | 380 | for (i = 0; i < pagevec_count(&pvec); ++i) { |
| 382 | struct page *page = pvec.pages[i]; | 381 | struct page *page = pvec.pages[i]; |
| 383 | u32 hash; | 382 | u32 hash; |
| 384 | 383 | ||
| 384 | /* | ||
| 385 | * The page (index) could be beyond end. This is | ||
| 386 | * only possible in the punch hole case as end is | ||
| 387 | * max page offset in the truncate case. | ||
| 388 | */ | ||
| 389 | next = page->index; | ||
| 390 | if (next >= end) | ||
| 391 | break; | ||
| 392 | |||
| 385 | hash = hugetlb_fault_mutex_hash(h, current->mm, | 393 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
| 386 | &pseudo_vma, | 394 | &pseudo_vma, |
| 387 | mapping, next, 0); | 395 | mapping, next, 0); |
| 388 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | 396 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
| 389 | 397 | ||
| 390 | lock_page(page); | 398 | lock_page(page); |
| 391 | if (page->index >= end) { | 399 | if (likely(!page_mapped(page))) { |
| 392 | unlock_page(page); | ||
| 393 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | ||
| 394 | next = end; /* we are done */ | ||
| 395 | break; | ||
| 396 | } | ||
| 397 | |||
| 398 | /* | ||
| 399 | * If page is mapped, it was faulted in after being | ||
| 400 | * unmapped. Do nothing in this race case. In the | ||
| 401 | * normal case page is not mapped. | ||
| 402 | */ | ||
| 403 | if (!page_mapped(page)) { | ||
| 404 | bool rsv_on_error = !PagePrivate(page); | 400 | bool rsv_on_error = !PagePrivate(page); |
| 405 | /* | 401 | /* |
| 406 | * We must free the huge page and remove | 402 | * We must free the huge page and remove |
| @@ -421,17 +417,23 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |||
| 421 | hugetlb_fix_reserve_counts( | 417 | hugetlb_fix_reserve_counts( |
| 422 | inode, rsv_on_error); | 418 | inode, rsv_on_error); |
| 423 | } | 419 | } |
| 420 | } else { | ||
| 421 | /* | ||
| 422 | * If page is mapped, it was faulted in after | ||
| 423 | * being unmapped. It indicates a race between | ||
| 424 | * hole punch and page fault. Do nothing in | ||
| 425 | * this case. Getting here in a truncate | ||
| 426 | * operation is a bug. | ||
| 427 | */ | ||
| 428 | BUG_ON(truncate_op); | ||
| 424 | } | 429 | } |
| 425 | 430 | ||
| 426 | if (page->index > next) | ||
| 427 | next = page->index; | ||
| 428 | |||
| 429 | ++next; | ||
| 430 | unlock_page(page); | 431 | unlock_page(page); |
| 431 | |||
| 432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | 432 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
| 433 | } | 433 | } |
| 434 | ++next; | ||
| 434 | huge_pagevec_release(&pvec); | 435 | huge_pagevec_release(&pvec); |
| 436 | cond_resched(); | ||
| 435 | } | 437 | } |
| 436 | 438 | ||
| 437 | if (truncate_op) | 439 | if (truncate_op) |
| @@ -647,9 +649,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |||
| 647 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | 649 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) |
| 648 | i_size_write(inode, offset + len); | 650 | i_size_write(inode, offset + len); |
| 649 | inode->i_ctime = CURRENT_TIME; | 651 | inode->i_ctime = CURRENT_TIME; |
| 650 | spin_lock(&inode->i_lock); | ||
| 651 | inode->i_private = NULL; | ||
| 652 | spin_unlock(&inode->i_lock); | ||
| 653 | out: | 652 | out: |
| 654 | mutex_unlock(&inode->i_mutex); | 653 | mutex_unlock(&inode->i_mutex); |
| 655 | return error; | 654 | return error; |
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index 79b113048eac..0a3f9b594602 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c | |||
| @@ -525,6 +525,8 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg | |||
| 525 | switch (rqdata.cmd) { | 525 | switch (rqdata.cmd) { |
| 526 | case NCP_LOCK_EX: | 526 | case NCP_LOCK_EX: |
| 527 | case NCP_LOCK_SH: | 527 | case NCP_LOCK_SH: |
| 528 | if (rqdata.timeout < 0) | ||
| 529 | return -EINVAL; | ||
| 528 | if (rqdata.timeout == 0) | 530 | if (rqdata.timeout == 0) |
| 529 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; | 531 | rqdata.timeout = NCP_LOCK_DEFAULT_TIMEOUT; |
| 530 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) | 532 | else if (rqdata.timeout > NCP_LOCK_MAX_TIMEOUT) |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 3b48ac25d8a7..a03f6f433075 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
| @@ -372,6 +372,8 @@ static int ocfs2_mknod(struct inode *dir, | |||
| 372 | mlog_errno(status); | 372 | mlog_errno(status); |
| 373 | goto leave; | 373 | goto leave; |
| 374 | } | 374 | } |
| 375 | /* update inode->i_mode after mask with "umask". */ | ||
| 376 | inode->i_mode = mode; | ||
| 375 | 377 | ||
| 376 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, | 378 | handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, |
| 377 | S_ISDIR(mode), | 379 | S_ISDIR(mode), |
diff --git a/fs/splice.c b/fs/splice.c index 801c21cd77fe..4cf700d50b40 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
| @@ -809,6 +809,13 @@ static int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_des | |||
| 809 | */ | 809 | */ |
| 810 | static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) | 810 | static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) |
| 811 | { | 811 | { |
| 812 | /* | ||
| 813 | * Check for signal early to make process killable when there are | ||
| 814 | * always buffers available | ||
| 815 | */ | ||
| 816 | if (signal_pending(current)) | ||
| 817 | return -ERESTARTSYS; | ||
| 818 | |||
| 812 | while (!pipe->nrbufs) { | 819 | while (!pipe->nrbufs) { |
| 813 | if (!pipe->writers) | 820 | if (!pipe->writers) |
| 814 | return 0; | 821 | return 0; |
| @@ -884,6 +891,7 @@ ssize_t __splice_from_pipe(struct pipe_inode_info *pipe, struct splice_desc *sd, | |||
| 884 | 891 | ||
| 885 | splice_from_pipe_begin(sd); | 892 | splice_from_pipe_begin(sd); |
| 886 | do { | 893 | do { |
| 894 | cond_resched(); | ||
| 887 | ret = splice_from_pipe_next(pipe, sd); | 895 | ret = splice_from_pipe_next(pipe, sd); |
| 888 | if (ret > 0) | 896 | if (ret > 0) |
| 889 | ret = splice_from_pipe_feed(pipe, sd, actor); | 897 | ret = splice_from_pipe_feed(pipe, sd, actor); |
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c index 590ad9206e3f..02fa1dcc5969 100644 --- a/fs/sysv/inode.c +++ b/fs/sysv/inode.c | |||
| @@ -162,15 +162,8 @@ void sysv_set_inode(struct inode *inode, dev_t rdev) | |||
| 162 | inode->i_fop = &sysv_dir_operations; | 162 | inode->i_fop = &sysv_dir_operations; |
| 163 | inode->i_mapping->a_ops = &sysv_aops; | 163 | inode->i_mapping->a_ops = &sysv_aops; |
| 164 | } else if (S_ISLNK(inode->i_mode)) { | 164 | } else if (S_ISLNK(inode->i_mode)) { |
| 165 | if (inode->i_blocks) { | 165 | inode->i_op = &sysv_symlink_inode_operations; |
| 166 | inode->i_op = &sysv_symlink_inode_operations; | 166 | inode->i_mapping->a_ops = &sysv_aops; |
| 167 | inode->i_mapping->a_ops = &sysv_aops; | ||
| 168 | } else { | ||
| 169 | inode->i_op = &simple_symlink_inode_operations; | ||
| 170 | inode->i_link = (char *)SYSV_I(inode)->i_data; | ||
| 171 | nd_terminate_link(inode->i_link, inode->i_size, | ||
| 172 | sizeof(SYSV_I(inode)->i_data) - 1); | ||
| 173 | } | ||
| 174 | } else | 167 | } else |
| 175 | init_special_inode(inode, inode->i_mode, rdev); | 168 | init_special_inode(inode, inode->i_mode, rdev); |
| 176 | } | 169 | } |
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index e67aeac2aee0..4b74c97d297a 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h | |||
| @@ -136,6 +136,9 @@ drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, | |||
| 136 | 136 | ||
| 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); | 137 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state); |
| 138 | 138 | ||
| 139 | void | ||
| 140 | drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret); | ||
| 141 | |||
| 139 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); | 142 | int __must_check drm_atomic_check_only(struct drm_atomic_state *state); |
| 140 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); | 143 | int __must_check drm_atomic_commit(struct drm_atomic_state *state); |
| 141 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); | 144 | int __must_check drm_atomic_async_commit(struct drm_atomic_state *state); |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 9c747cb14ad8..d2f41477f8ae 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
| @@ -342,10 +342,10 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, | |||
| 342 | struct irq_phys_map *map, bool level); | 342 | struct irq_phys_map *map, bool level); |
| 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); | 343 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); |
| 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); | 344 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); |
| 345 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); | ||
| 346 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, | 345 | struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, |
| 347 | int virt_irq, int irq); | 346 | int virt_irq, int irq); |
| 348 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | 347 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map); |
| 348 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map); | ||
| 349 | 349 | ||
| 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) | 350 | #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) |
| 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) | 351 | #define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3fe27f8d91f0..c0d2b7927c1f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -794,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 795 | struct scsi_ioctl_command __user *); | 795 | struct scsi_ioctl_command __user *); |
| 796 | 796 | ||
| 797 | extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
| 798 | extern void blk_queue_exit(struct request_queue *q); | ||
| 797 | extern void blk_start_queue(struct request_queue *q); | 799 | extern void blk_start_queue(struct request_queue *q); |
| 798 | extern void blk_stop_queue(struct request_queue *q); | 800 | extern void blk_stop_queue(struct request_queue *q); |
| 799 | extern void blk_sync_queue(struct request_queue *q); | 801 | extern void blk_sync_queue(struct request_queue *q); |
diff --git a/include/linux/configfs.h b/include/linux/configfs.h index a8a335b7fce0..758a029011b1 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h | |||
| @@ -197,6 +197,16 @@ static inline struct configfs_subsystem *to_configfs_subsystem(struct config_gro | |||
| 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); | 197 | int configfs_register_subsystem(struct configfs_subsystem *subsys); |
| 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); | 198 | void configfs_unregister_subsystem(struct configfs_subsystem *subsys); |
| 199 | 199 | ||
| 200 | int configfs_register_group(struct config_group *parent_group, | ||
| 201 | struct config_group *group); | ||
| 202 | void configfs_unregister_group(struct config_group *group); | ||
| 203 | |||
| 204 | struct config_group * | ||
| 205 | configfs_register_default_group(struct config_group *parent_group, | ||
| 206 | const char *name, | ||
| 207 | struct config_item_type *item_type); | ||
| 208 | void configfs_unregister_default_group(struct config_group *group); | ||
| 209 | |||
| 200 | /* These functions can sleep and can alloc with GFP_KERNEL */ | 210 | /* These functions can sleep and can alloc with GFP_KERNEL */ |
| 201 | /* WARNING: These cannot be called underneath configfs callbacks!! */ | 211 | /* WARNING: These cannot be called underneath configfs callbacks!! */ |
| 202 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); | 212 | int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6523109e136d..8942af0813e3 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -271,7 +271,7 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) | |||
| 271 | 271 | ||
| 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) | 272 | static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
| 273 | { | 273 | { |
| 274 | return gfp_flags & __GFP_DIRECT_RECLAIM; | 274 | return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | #ifdef CONFIG_HIGHMEM | 277 | #ifdef CONFIG_HIGHMEM |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5706a2108f0a..c923350ca20a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -460,6 +460,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | |||
| 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | 460 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
| 461 | idx++) | 461 | idx++) |
| 462 | 462 | ||
| 463 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) | ||
| 464 | { | ||
| 465 | struct kvm_vcpu *vcpu; | ||
| 466 | int i; | ||
| 467 | |||
| 468 | kvm_for_each_vcpu(i, vcpu, kvm) | ||
| 469 | if (vcpu->vcpu_id == id) | ||
| 470 | return vcpu; | ||
| 471 | return NULL; | ||
| 472 | } | ||
| 473 | |||
| 463 | #define kvm_for_each_memslot(memslot, slots) \ | 474 | #define kvm_for_each_memslot(memslot, slots) \ |
| 464 | for (memslot = &slots->memslots[0]; \ | 475 | for (memslot = &slots->memslots[0]; \ |
| 465 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ | 476 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 69c9057e1ab8..3db5552b17d5 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
| @@ -58,7 +58,6 @@ enum { | |||
| 58 | struct nvm_id_group { | 58 | struct nvm_id_group { |
| 59 | u8 mtype; | 59 | u8 mtype; |
| 60 | u8 fmtype; | 60 | u8 fmtype; |
| 61 | u16 res16; | ||
| 62 | u8 num_ch; | 61 | u8 num_ch; |
| 63 | u8 num_lun; | 62 | u8 num_lun; |
| 64 | u8 num_pln; | 63 | u8 num_pln; |
| @@ -74,9 +73,9 @@ struct nvm_id_group { | |||
| 74 | u32 tbet; | 73 | u32 tbet; |
| 75 | u32 tbem; | 74 | u32 tbem; |
| 76 | u32 mpos; | 75 | u32 mpos; |
| 76 | u32 mccap; | ||
| 77 | u16 cpar; | 77 | u16 cpar; |
| 78 | u8 res[913]; | 78 | }; |
| 79 | } __packed; | ||
| 80 | 79 | ||
| 81 | struct nvm_addr_format { | 80 | struct nvm_addr_format { |
| 82 | u8 ch_offset; | 81 | u8 ch_offset; |
| @@ -91,19 +90,15 @@ struct nvm_addr_format { | |||
| 91 | u8 pg_len; | 90 | u8 pg_len; |
| 92 | u8 sect_offset; | 91 | u8 sect_offset; |
| 93 | u8 sect_len; | 92 | u8 sect_len; |
| 94 | u8 res[4]; | ||
| 95 | }; | 93 | }; |
| 96 | 94 | ||
| 97 | struct nvm_id { | 95 | struct nvm_id { |
| 98 | u8 ver_id; | 96 | u8 ver_id; |
| 99 | u8 vmnt; | 97 | u8 vmnt; |
| 100 | u8 cgrps; | 98 | u8 cgrps; |
| 101 | u8 res[5]; | ||
| 102 | u32 cap; | 99 | u32 cap; |
| 103 | u32 dom; | 100 | u32 dom; |
| 104 | struct nvm_addr_format ppaf; | 101 | struct nvm_addr_format ppaf; |
| 105 | u8 ppat; | ||
| 106 | u8 resv[224]; | ||
| 107 | struct nvm_id_group groups[4]; | 102 | struct nvm_id_group groups[4]; |
| 108 | } __packed; | 103 | } __packed; |
| 109 | 104 | ||
| @@ -123,39 +118,28 @@ struct nvm_tgt_instance { | |||
| 123 | #define NVM_VERSION_MINOR 0 | 118 | #define NVM_VERSION_MINOR 0 |
| 124 | #define NVM_VERSION_PATCH 0 | 119 | #define NVM_VERSION_PATCH 0 |
| 125 | 120 | ||
| 126 | #define NVM_SEC_BITS (8) | ||
| 127 | #define NVM_PL_BITS (6) | ||
| 128 | #define NVM_PG_BITS (16) | ||
| 129 | #define NVM_BLK_BITS (16) | 121 | #define NVM_BLK_BITS (16) |
| 130 | #define NVM_LUN_BITS (10) | 122 | #define NVM_PG_BITS (16) |
| 123 | #define NVM_SEC_BITS (8) | ||
| 124 | #define NVM_PL_BITS (8) | ||
| 125 | #define NVM_LUN_BITS (8) | ||
| 131 | #define NVM_CH_BITS (8) | 126 | #define NVM_CH_BITS (8) |
| 132 | 127 | ||
| 133 | struct ppa_addr { | 128 | struct ppa_addr { |
| 129 | /* Generic structure for all addresses */ | ||
| 134 | union { | 130 | union { |
| 135 | /* Channel-based PPA format in nand 4x2x2x2x8x10 */ | ||
| 136 | struct { | ||
| 137 | u64 ch : 4; | ||
| 138 | u64 sec : 2; /* 4 sectors per page */ | ||
| 139 | u64 pl : 2; /* 4 planes per LUN */ | ||
| 140 | u64 lun : 2; /* 4 LUNs per channel */ | ||
| 141 | u64 pg : 8; /* 256 pages per block */ | ||
| 142 | u64 blk : 10;/* 1024 blocks per plane */ | ||
| 143 | u64 resved : 36; | ||
| 144 | } chnl; | ||
| 145 | |||
| 146 | /* Generic structure for all addresses */ | ||
| 147 | struct { | 131 | struct { |
| 132 | u64 blk : NVM_BLK_BITS; | ||
| 133 | u64 pg : NVM_PG_BITS; | ||
| 148 | u64 sec : NVM_SEC_BITS; | 134 | u64 sec : NVM_SEC_BITS; |
| 149 | u64 pl : NVM_PL_BITS; | 135 | u64 pl : NVM_PL_BITS; |
| 150 | u64 pg : NVM_PG_BITS; | ||
| 151 | u64 blk : NVM_BLK_BITS; | ||
| 152 | u64 lun : NVM_LUN_BITS; | 136 | u64 lun : NVM_LUN_BITS; |
| 153 | u64 ch : NVM_CH_BITS; | 137 | u64 ch : NVM_CH_BITS; |
| 154 | } g; | 138 | } g; |
| 155 | 139 | ||
| 156 | u64 ppa; | 140 | u64 ppa; |
| 157 | }; | 141 | }; |
| 158 | } __packed; | 142 | }; |
| 159 | 143 | ||
| 160 | struct nvm_rq { | 144 | struct nvm_rq { |
| 161 | struct nvm_tgt_instance *ins; | 145 | struct nvm_tgt_instance *ins; |
| @@ -191,11 +175,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) | |||
| 191 | struct nvm_block; | 175 | struct nvm_block; |
| 192 | 176 | ||
| 193 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | 177 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); |
| 194 | typedef int (nvm_bb_update_fn)(u32, void *, unsigned int, void *); | 178 | typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); |
| 195 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | 179 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); |
| 196 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | 180 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, |
| 197 | nvm_l2p_update_fn *, void *); | 181 | nvm_l2p_update_fn *, void *); |
| 198 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, int, unsigned int, | 182 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, |
| 199 | nvm_bb_update_fn *, void *); | 183 | nvm_bb_update_fn *, void *); |
| 200 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | 184 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); |
| 201 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | 185 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); |
| @@ -210,7 +194,7 @@ struct nvm_dev_ops { | |||
| 210 | nvm_id_fn *identity; | 194 | nvm_id_fn *identity; |
| 211 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | 195 | nvm_get_l2p_tbl_fn *get_l2p_tbl; |
| 212 | nvm_op_bb_tbl_fn *get_bb_tbl; | 196 | nvm_op_bb_tbl_fn *get_bb_tbl; |
| 213 | nvm_op_set_bb_fn *set_bb; | 197 | nvm_op_set_bb_fn *set_bb_tbl; |
| 214 | 198 | ||
| 215 | nvm_submit_io_fn *submit_io; | 199 | nvm_submit_io_fn *submit_io; |
| 216 | nvm_erase_blk_fn *erase_block; | 200 | nvm_erase_blk_fn *erase_block; |
| @@ -220,7 +204,7 @@ struct nvm_dev_ops { | |||
| 220 | nvm_dev_dma_alloc_fn *dev_dma_alloc; | 204 | nvm_dev_dma_alloc_fn *dev_dma_alloc; |
| 221 | nvm_dev_dma_free_fn *dev_dma_free; | 205 | nvm_dev_dma_free_fn *dev_dma_free; |
| 222 | 206 | ||
| 223 | uint8_t max_phys_sect; | 207 | unsigned int max_phys_sect; |
| 224 | }; | 208 | }; |
| 225 | 209 | ||
| 226 | struct nvm_lun { | 210 | struct nvm_lun { |
| @@ -229,7 +213,9 @@ struct nvm_lun { | |||
| 229 | int lun_id; | 213 | int lun_id; |
| 230 | int chnl_id; | 214 | int chnl_id; |
| 231 | 215 | ||
| 216 | unsigned int nr_inuse_blocks; /* Number of used blocks */ | ||
| 232 | unsigned int nr_free_blocks; /* Number of unused blocks */ | 217 | unsigned int nr_free_blocks; /* Number of unused blocks */ |
| 218 | unsigned int nr_bad_blocks; /* Number of bad blocks */ | ||
| 233 | struct nvm_block *blocks; | 219 | struct nvm_block *blocks; |
| 234 | 220 | ||
| 235 | spinlock_t lock; | 221 | spinlock_t lock; |
| @@ -263,8 +249,7 @@ struct nvm_dev { | |||
| 263 | int blks_per_lun; | 249 | int blks_per_lun; |
| 264 | int sec_size; | 250 | int sec_size; |
| 265 | int oob_size; | 251 | int oob_size; |
| 266 | int addr_mode; | 252 | struct nvm_addr_format ppaf; |
| 267 | struct nvm_addr_format addr_format; | ||
| 268 | 253 | ||
| 269 | /* Calculated/Cached values. These do not reflect the actual usable | 254 | /* Calculated/Cached values. These do not reflect the actual usable |
| 270 | * blocks at run-time. | 255 | * blocks at run-time. |
| @@ -290,118 +275,45 @@ struct nvm_dev { | |||
| 290 | char name[DISK_NAME_LEN]; | 275 | char name[DISK_NAME_LEN]; |
| 291 | }; | 276 | }; |
| 292 | 277 | ||
| 293 | /* fallback conversion */ | 278 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, |
| 294 | static struct ppa_addr __generic_to_linear_addr(struct nvm_dev *dev, | 279 | struct ppa_addr r) |
| 295 | struct ppa_addr r) | ||
| 296 | { | ||
| 297 | struct ppa_addr l; | ||
| 298 | |||
| 299 | l.ppa = r.g.sec + | ||
| 300 | r.g.pg * dev->sec_per_pg + | ||
| 301 | r.g.blk * (dev->pgs_per_blk * | ||
| 302 | dev->sec_per_pg) + | ||
| 303 | r.g.lun * (dev->blks_per_lun * | ||
| 304 | dev->pgs_per_blk * | ||
| 305 | dev->sec_per_pg) + | ||
| 306 | r.g.ch * (dev->blks_per_lun * | ||
| 307 | dev->pgs_per_blk * | ||
| 308 | dev->luns_per_chnl * | ||
| 309 | dev->sec_per_pg); | ||
| 310 | |||
| 311 | return l; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* fallback conversion */ | ||
| 315 | static struct ppa_addr __linear_to_generic_addr(struct nvm_dev *dev, | ||
| 316 | struct ppa_addr r) | ||
| 317 | { | 280 | { |
| 318 | struct ppa_addr l; | 281 | struct ppa_addr l; |
| 319 | int secs, pgs, blks, luns; | ||
| 320 | sector_t ppa = r.ppa; | ||
| 321 | 282 | ||
| 322 | l.ppa = 0; | 283 | l.ppa = ((u64)r.g.blk) << dev->ppaf.blk_offset; |
| 323 | 284 | l.ppa |= ((u64)r.g.pg) << dev->ppaf.pg_offset; | |
| 324 | div_u64_rem(ppa, dev->sec_per_pg, &secs); | 285 | l.ppa |= ((u64)r.g.sec) << dev->ppaf.sect_offset; |
| 325 | l.g.sec = secs; | 286 | l.ppa |= ((u64)r.g.pl) << dev->ppaf.pln_offset; |
| 326 | 287 | l.ppa |= ((u64)r.g.lun) << dev->ppaf.lun_offset; | |
| 327 | sector_div(ppa, dev->sec_per_pg); | 288 | l.ppa |= ((u64)r.g.ch) << dev->ppaf.ch_offset; |
| 328 | div_u64_rem(ppa, dev->sec_per_blk, &pgs); | ||
| 329 | l.g.pg = pgs; | ||
| 330 | |||
| 331 | sector_div(ppa, dev->pgs_per_blk); | ||
| 332 | div_u64_rem(ppa, dev->blks_per_lun, &blks); | ||
| 333 | l.g.blk = blks; | ||
| 334 | |||
| 335 | sector_div(ppa, dev->blks_per_lun); | ||
| 336 | div_u64_rem(ppa, dev->luns_per_chnl, &luns); | ||
| 337 | l.g.lun = luns; | ||
| 338 | |||
| 339 | sector_div(ppa, dev->luns_per_chnl); | ||
| 340 | l.g.ch = ppa; | ||
| 341 | 289 | ||
| 342 | return l; | 290 | return l; |
| 343 | } | 291 | } |
| 344 | 292 | ||
| 345 | static struct ppa_addr __generic_to_chnl_addr(struct ppa_addr r) | 293 | static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, |
| 294 | struct ppa_addr r) | ||
| 346 | { | 295 | { |
| 347 | struct ppa_addr l; | 296 | struct ppa_addr l; |
| 348 | 297 | ||
| 349 | l.ppa = 0; | 298 | /* |
| 350 | 299 | * (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc. | |
| 351 | l.chnl.sec = r.g.sec; | 300 | */ |
| 352 | l.chnl.pl = r.g.pl; | 301 | l.g.blk = (r.ppa >> dev->ppaf.blk_offset) & |
| 353 | l.chnl.pg = r.g.pg; | 302 | (((1 << dev->ppaf.blk_len) - 1)); |
| 354 | l.chnl.blk = r.g.blk; | 303 | l.g.pg |= (r.ppa >> dev->ppaf.pg_offset) & |
| 355 | l.chnl.lun = r.g.lun; | 304 | (((1 << dev->ppaf.pg_len) - 1)); |
| 356 | l.chnl.ch = r.g.ch; | 305 | l.g.sec |= (r.ppa >> dev->ppaf.sect_offset) & |
| 357 | 306 | (((1 << dev->ppaf.sect_len) - 1)); | |
| 358 | return l; | 307 | l.g.pl |= (r.ppa >> dev->ppaf.pln_offset) & |
| 359 | } | 308 | (((1 << dev->ppaf.pln_len) - 1)); |
| 360 | 309 | l.g.lun |= (r.ppa >> dev->ppaf.lun_offset) & | |
| 361 | static struct ppa_addr __chnl_to_generic_addr(struct ppa_addr r) | 310 | (((1 << dev->ppaf.lun_len) - 1)); |
| 362 | { | 311 | l.g.ch |= (r.ppa >> dev->ppaf.ch_offset) & |
| 363 | struct ppa_addr l; | 312 | (((1 << dev->ppaf.ch_len) - 1)); |
| 364 | |||
| 365 | l.ppa = 0; | ||
| 366 | |||
| 367 | l.g.sec = r.chnl.sec; | ||
| 368 | l.g.pl = r.chnl.pl; | ||
| 369 | l.g.pg = r.chnl.pg; | ||
| 370 | l.g.blk = r.chnl.blk; | ||
| 371 | l.g.lun = r.chnl.lun; | ||
| 372 | l.g.ch = r.chnl.ch; | ||
| 373 | 313 | ||
| 374 | return l; | 314 | return l; |
| 375 | } | 315 | } |
| 376 | 316 | ||
| 377 | static inline struct ppa_addr addr_to_generic_mode(struct nvm_dev *dev, | ||
| 378 | struct ppa_addr gppa) | ||
| 379 | { | ||
| 380 | switch (dev->addr_mode) { | ||
| 381 | case NVM_ADDRMODE_LINEAR: | ||
| 382 | return __linear_to_generic_addr(dev, gppa); | ||
| 383 | case NVM_ADDRMODE_CHANNEL: | ||
| 384 | return __chnl_to_generic_addr(gppa); | ||
| 385 | default: | ||
| 386 | BUG(); | ||
| 387 | } | ||
| 388 | return gppa; | ||
| 389 | } | ||
| 390 | |||
| 391 | static inline struct ppa_addr generic_to_addr_mode(struct nvm_dev *dev, | ||
| 392 | struct ppa_addr gppa) | ||
| 393 | { | ||
| 394 | switch (dev->addr_mode) { | ||
| 395 | case NVM_ADDRMODE_LINEAR: | ||
| 396 | return __generic_to_linear_addr(dev, gppa); | ||
| 397 | case NVM_ADDRMODE_CHANNEL: | ||
| 398 | return __generic_to_chnl_addr(gppa); | ||
| 399 | default: | ||
| 400 | BUG(); | ||
| 401 | } | ||
| 402 | return gppa; | ||
| 403 | } | ||
| 404 | |||
| 405 | static inline int ppa_empty(struct ppa_addr ppa_addr) | 317 | static inline int ppa_empty(struct ppa_addr ppa_addr) |
| 406 | { | 318 | { |
| 407 | return (ppa_addr.ppa == ADDR_EMPTY); | 319 | return (ppa_addr.ppa == ADDR_EMPTY); |
| @@ -468,7 +380,7 @@ typedef int (nvmm_end_io_fn)(struct nvm_rq *, int); | |||
| 468 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, | 380 | typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *, |
| 469 | unsigned long); | 381 | unsigned long); |
| 470 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); | 382 | typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int); |
| 471 | typedef void (nvmm_free_blocks_print_fn)(struct nvm_dev *); | 383 | typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *); |
| 472 | 384 | ||
| 473 | struct nvmm_type { | 385 | struct nvmm_type { |
| 474 | const char *name; | 386 | const char *name; |
| @@ -492,7 +404,7 @@ struct nvmm_type { | |||
| 492 | nvmm_get_lun_fn *get_lun; | 404 | nvmm_get_lun_fn *get_lun; |
| 493 | 405 | ||
| 494 | /* Statistics */ | 406 | /* Statistics */ |
| 495 | nvmm_free_blocks_print_fn *free_blocks_print; | 407 | nvmm_lun_info_print_fn *lun_info_print; |
| 496 | struct list_head list; | 408 | struct list_head list; |
| 497 | }; | 409 | }; |
| 498 | 410 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 3a19c79918e0..4560d8f1545d 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -302,6 +302,28 @@ struct mod_tree_node { | |||
| 302 | struct latch_tree_node node; | 302 | struct latch_tree_node node; |
| 303 | }; | 303 | }; |
| 304 | 304 | ||
| 305 | struct module_layout { | ||
| 306 | /* The actual code + data. */ | ||
| 307 | void *base; | ||
| 308 | /* Total size. */ | ||
| 309 | unsigned int size; | ||
| 310 | /* The size of the executable code. */ | ||
| 311 | unsigned int text_size; | ||
| 312 | /* Size of RO section of the module (text+rodata) */ | ||
| 313 | unsigned int ro_size; | ||
| 314 | |||
| 315 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 316 | struct mod_tree_node mtn; | ||
| 317 | #endif | ||
| 318 | }; | ||
| 319 | |||
| 320 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 321 | /* Only touch one cacheline for common rbtree-for-core-layout case. */ | ||
| 322 | #define __module_layout_align ____cacheline_aligned | ||
| 323 | #else | ||
| 324 | #define __module_layout_align | ||
| 325 | #endif | ||
| 326 | |||
| 305 | struct module { | 327 | struct module { |
| 306 | enum module_state state; | 328 | enum module_state state; |
| 307 | 329 | ||
| @@ -366,37 +388,9 @@ struct module { | |||
| 366 | /* Startup function. */ | 388 | /* Startup function. */ |
| 367 | int (*init)(void); | 389 | int (*init)(void); |
| 368 | 390 | ||
| 369 | /* | 391 | /* Core layout: rbtree is accessed frequently, so keep together. */ |
| 370 | * If this is non-NULL, vfree() after init() returns. | 392 | struct module_layout core_layout __module_layout_align; |
| 371 | * | 393 | struct module_layout init_layout; |
| 372 | * Cacheline align here, such that: | ||
| 373 | * module_init, module_core, init_size, core_size, | ||
| 374 | * init_text_size, core_text_size and mtn_core::{mod,node[0]} | ||
| 375 | * are on the same cacheline. | ||
| 376 | */ | ||
| 377 | void *module_init ____cacheline_aligned; | ||
| 378 | |||
| 379 | /* Here is the actual code + data, vfree'd on unload. */ | ||
| 380 | void *module_core; | ||
| 381 | |||
| 382 | /* Here are the sizes of the init and core sections */ | ||
| 383 | unsigned int init_size, core_size; | ||
| 384 | |||
| 385 | /* The size of the executable code in each section. */ | ||
| 386 | unsigned int init_text_size, core_text_size; | ||
| 387 | |||
| 388 | #ifdef CONFIG_MODULES_TREE_LOOKUP | ||
| 389 | /* | ||
| 390 | * We want mtn_core::{mod,node[0]} to be in the same cacheline as the | ||
| 391 | * above entries such that a regular lookup will only touch one | ||
| 392 | * cacheline. | ||
| 393 | */ | ||
| 394 | struct mod_tree_node mtn_core; | ||
| 395 | struct mod_tree_node mtn_init; | ||
| 396 | #endif | ||
| 397 | |||
| 398 | /* Size of RO sections of the module (text+rodata) */ | ||
| 399 | unsigned int init_ro_size, core_ro_size; | ||
| 400 | 394 | ||
| 401 | /* Arch-specific module values */ | 395 | /* Arch-specific module values */ |
| 402 | struct mod_arch_specific arch; | 396 | struct mod_arch_specific arch; |
| @@ -505,15 +499,15 @@ bool is_module_text_address(unsigned long addr); | |||
| 505 | static inline bool within_module_core(unsigned long addr, | 499 | static inline bool within_module_core(unsigned long addr, |
| 506 | const struct module *mod) | 500 | const struct module *mod) |
| 507 | { | 501 | { |
| 508 | return (unsigned long)mod->module_core <= addr && | 502 | return (unsigned long)mod->core_layout.base <= addr && |
| 509 | addr < (unsigned long)mod->module_core + mod->core_size; | 503 | addr < (unsigned long)mod->core_layout.base + mod->core_layout.size; |
| 510 | } | 504 | } |
| 511 | 505 | ||
| 512 | static inline bool within_module_init(unsigned long addr, | 506 | static inline bool within_module_init(unsigned long addr, |
| 513 | const struct module *mod) | 507 | const struct module *mod) |
| 514 | { | 508 | { |
| 515 | return (unsigned long)mod->module_init <= addr && | 509 | return (unsigned long)mod->init_layout.base <= addr && |
| 516 | addr < (unsigned long)mod->module_init + mod->init_size; | 510 | addr < (unsigned long)mod->init_layout.base + mod->init_layout.size; |
| 517 | } | 511 | } |
| 518 | 512 | ||
| 519 | static inline bool within_module(unsigned long addr, const struct module *mod) | 513 | static inline bool within_module(unsigned long addr, const struct module *mod) |
| @@ -768,9 +762,13 @@ extern int module_sysfs_initialized; | |||
| 768 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX | 762 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
| 769 | extern void set_all_modules_text_rw(void); | 763 | extern void set_all_modules_text_rw(void); |
| 770 | extern void set_all_modules_text_ro(void); | 764 | extern void set_all_modules_text_ro(void); |
| 765 | extern void module_enable_ro(const struct module *mod); | ||
| 766 | extern void module_disable_ro(const struct module *mod); | ||
| 771 | #else | 767 | #else |
| 772 | static inline void set_all_modules_text_rw(void) { } | 768 | static inline void set_all_modules_text_rw(void) { } |
| 773 | static inline void set_all_modules_text_ro(void) { } | 769 | static inline void set_all_modules_text_ro(void) { } |
| 770 | static inline void module_enable_ro(const struct module *mod) { } | ||
| 771 | static inline void module_disable_ro(const struct module *mod) { } | ||
| 774 | #endif | 772 | #endif |
| 775 | 773 | ||
| 776 | #ifdef CONFIG_GENERIC_BUG | 774 | #ifdef CONFIG_GENERIC_BUG |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index 36112cdd665a..b90d8ec57c1f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
| @@ -80,7 +80,7 @@ static inline int of_dma_router_register(struct device_node *np, | |||
| 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 80 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
| 81 | const char *name) | 81 | const char *name) |
| 82 | { | 82 | { |
| 83 | return NULL; | 83 | return ERR_PTR(-ENODEV); |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 86 | static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
diff --git a/include/linux/signal.h b/include/linux/signal.h index ab1e0392b5ac..92557bbce7e7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -239,7 +239,6 @@ extern int sigprocmask(int, sigset_t *, sigset_t *); | |||
| 239 | extern void set_current_blocked(sigset_t *); | 239 | extern void set_current_blocked(sigset_t *); |
| 240 | extern void __set_current_blocked(const sigset_t *); | 240 | extern void __set_current_blocked(const sigset_t *); |
| 241 | extern int show_unhandled_signals; | 241 | extern int show_unhandled_signals; |
| 242 | extern int sigsuspend(sigset_t *); | ||
| 243 | 242 | ||
| 244 | struct sigaction { | 243 | struct sigaction { |
| 245 | #ifndef __ARCH_HAS_IRIX_SIGACTION | 244 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 7c82e3b307a3..2037a861e367 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -158,6 +158,24 @@ size_t ksize(const void *); | |||
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| 161 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 162 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 163 | * aligned buffers. | ||
| 164 | */ | ||
| 165 | #ifndef ARCH_SLAB_MINALIGN | ||
| 166 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 167 | #endif | ||
| 168 | |||
| 169 | /* | ||
| 170 | * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned | ||
| 171 | * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN | ||
| 172 | * aligned pointers. | ||
| 173 | */ | ||
| 174 | #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) | ||
| 175 | #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) | ||
| 176 | #define __assume_page_alignment __assume_aligned(PAGE_SIZE) | ||
| 177 | |||
| 178 | /* | ||
| 161 | * Kmalloc array related definitions | 179 | * Kmalloc array related definitions |
| 162 | */ | 180 | */ |
| 163 | 181 | ||
| @@ -286,8 +304,8 @@ static __always_inline int kmalloc_index(size_t size) | |||
| 286 | } | 304 | } |
| 287 | #endif /* !CONFIG_SLOB */ | 305 | #endif /* !CONFIG_SLOB */ |
| 288 | 306 | ||
| 289 | void *__kmalloc(size_t size, gfp_t flags); | 307 | void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment; |
| 290 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | 308 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment; |
| 291 | void kmem_cache_free(struct kmem_cache *, void *); | 309 | void kmem_cache_free(struct kmem_cache *, void *); |
| 292 | 310 | ||
| 293 | /* | 311 | /* |
| @@ -298,11 +316,11 @@ void kmem_cache_free(struct kmem_cache *, void *); | |||
| 298 | * Note that interrupts must be enabled when calling these functions. | 316 | * Note that interrupts must be enabled when calling these functions. |
| 299 | */ | 317 | */ |
| 300 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 318 | void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
| 301 | bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 319 | int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
| 302 | 320 | ||
| 303 | #ifdef CONFIG_NUMA | 321 | #ifdef CONFIG_NUMA |
| 304 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 322 | void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment; |
| 305 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 323 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment; |
| 306 | #else | 324 | #else |
| 307 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | 325 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) |
| 308 | { | 326 | { |
| @@ -316,12 +334,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f | |||
| 316 | #endif | 334 | #endif |
| 317 | 335 | ||
| 318 | #ifdef CONFIG_TRACING | 336 | #ifdef CONFIG_TRACING |
| 319 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | 337 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment; |
| 320 | 338 | ||
| 321 | #ifdef CONFIG_NUMA | 339 | #ifdef CONFIG_NUMA |
| 322 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | 340 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| 323 | gfp_t gfpflags, | 341 | gfp_t gfpflags, |
| 324 | int node, size_t size); | 342 | int node, size_t size) __assume_slab_alignment; |
| 325 | #else | 343 | #else |
| 326 | static __always_inline void * | 344 | static __always_inline void * |
| 327 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | 345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, |
| @@ -354,10 +372,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, | |||
| 354 | } | 372 | } |
| 355 | #endif /* CONFIG_TRACING */ | 373 | #endif /* CONFIG_TRACING */ |
| 356 | 374 | ||
| 357 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); | 375 | extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 358 | 376 | ||
| 359 | #ifdef CONFIG_TRACING | 377 | #ifdef CONFIG_TRACING |
| 360 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | 378 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment; |
| 361 | #else | 379 | #else |
| 362 | static __always_inline void * | 380 | static __always_inline void * |
| 363 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | 381 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) |
| @@ -482,15 +500,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
| 482 | return __kmalloc_node(size, flags, node); | 500 | return __kmalloc_node(size, flags, node); |
| 483 | } | 501 | } |
| 484 | 502 | ||
| 485 | /* | ||
| 486 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | ||
| 487 | * Intended for arches that get misalignment faults even for 64 bit integer | ||
| 488 | * aligned buffers. | ||
| 489 | */ | ||
| 490 | #ifndef ARCH_SLAB_MINALIGN | ||
| 491 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | ||
| 492 | #endif | ||
| 493 | |||
| 494 | struct memcg_cache_array { | 503 | struct memcg_cache_array { |
| 495 | struct rcu_head rcu; | 504 | struct rcu_head rcu; |
| 496 | struct kmem_cache *entries[0]; | 505 | struct kmem_cache *entries[0]; |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 5b04b0a5375b..5e31f1b99037 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -607,7 +607,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); | |||
| 607 | 607 | ||
| 608 | /* tty_audit.c */ | 608 | /* tty_audit.c */ |
| 609 | #ifdef CONFIG_AUDIT | 609 | #ifdef CONFIG_AUDIT |
| 610 | extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, | 610 | extern void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 611 | size_t size, unsigned icanon); | 611 | size_t size, unsigned icanon); |
| 612 | extern void tty_audit_exit(void); | 612 | extern void tty_audit_exit(void); |
| 613 | extern void tty_audit_fork(struct signal_struct *sig); | 613 | extern void tty_audit_fork(struct signal_struct *sig); |
| @@ -615,8 +615,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch); | |||
| 615 | extern void tty_audit_push(struct tty_struct *tty); | 615 | extern void tty_audit_push(struct tty_struct *tty); |
| 616 | extern int tty_audit_push_current(void); | 616 | extern int tty_audit_push_current(void); |
| 617 | #else | 617 | #else |
| 618 | static inline void tty_audit_add_data(struct tty_struct *tty, | 618 | static inline void tty_audit_add_data(struct tty_struct *tty, const void *data, |
| 619 | unsigned char *data, size_t size, unsigned icanon) | 619 | size_t size, unsigned icanon) |
| 620 | { | 620 | { |
| 621 | } | 621 | } |
| 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) | 622 | static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch) |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4121345498e0..2a20c0dfdafc 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -2021,7 +2021,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 2021 | continue; | 2021 | continue; |
| 2022 | 2022 | ||
| 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
| 2024 | mod->core_size, (void *)mod); | 2024 | mod->core_layout.size, (void *)mod); |
| 2025 | #ifdef CONFIG_MODULE_UNLOAD | 2025 | #ifdef CONFIG_MODULE_UNLOAD |
| 2026 | kdb_printf("%4d ", module_refcount(mod)); | 2026 | kdb_printf("%4d ", module_refcount(mod)); |
| 2027 | #endif | 2027 | #endif |
| @@ -2031,7 +2031,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
| 2031 | kdb_printf(" (Loading)"); | 2031 | kdb_printf(" (Loading)"); |
| 2032 | else | 2032 | else |
| 2033 | kdb_printf(" (Live)"); | 2033 | kdb_printf(" (Live)"); |
| 2034 | kdb_printf(" 0x%p", mod->module_core); | 2034 | kdb_printf(" 0x%p", mod->core_layout.base); |
| 2035 | 2035 | ||
| 2036 | #ifdef CONFIG_MODULE_UNLOAD | 2036 | #ifdef CONFIG_MODULE_UNLOAD |
| 2037 | { | 2037 | { |
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index 7080ae1eb6c1..2f9df37940a0 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c | |||
| @@ -123,11 +123,6 @@ void gcov_enable_events(void) | |||
| 123 | } | 123 | } |
| 124 | 124 | ||
| 125 | #ifdef CONFIG_MODULES | 125 | #ifdef CONFIG_MODULES |
| 126 | static inline int within(void *addr, void *start, unsigned long size) | ||
| 127 | { | ||
| 128 | return ((addr >= start) && (addr < start + size)); | ||
| 129 | } | ||
| 130 | |||
| 131 | /* Update list and generate events when modules are unloaded. */ | 126 | /* Update list and generate events when modules are unloaded. */ |
| 132 | static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, | 127 | static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, |
| 133 | void *data) | 128 | void *data) |
| @@ -142,7 +137,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, | |||
| 142 | 137 | ||
| 143 | /* Remove entries located in module from linked list. */ | 138 | /* Remove entries located in module from linked list. */ |
| 144 | while ((info = gcov_info_next(info))) { | 139 | while ((info = gcov_info_next(info))) { |
| 145 | if (within(info, mod->module_core, mod->core_size)) { | 140 | if (within_module((unsigned long)info, mod)) { |
| 146 | gcov_info_unlink(prev, info); | 141 | gcov_info_unlink(prev, info); |
| 147 | if (gcov_events_enabled) | 142 | if (gcov_events_enabled) |
| 148 | gcov_event(GCOV_REMOVE, info); | 143 | gcov_event(GCOV_REMOVE, info); |
diff --git a/kernel/module.c b/kernel/module.c index 8f051a106676..912e891e0e2f 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -80,15 +80,6 @@ | |||
| 80 | # define debug_align(X) (X) | 80 | # define debug_align(X) (X) |
| 81 | #endif | 81 | #endif |
| 82 | 82 | ||
| 83 | /* | ||
| 84 | * Given BASE and SIZE this macro calculates the number of pages the | ||
| 85 | * memory regions occupies | ||
| 86 | */ | ||
| 87 | #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ | ||
| 88 | (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ | ||
| 89 | PFN_DOWN((unsigned long)BASE) + 1) \ | ||
| 90 | : (0UL)) | ||
| 91 | |||
| 92 | /* If this is set, the section belongs in the init part of the module */ | 83 | /* If this is set, the section belongs in the init part of the module */ |
| 93 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 84 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
| 94 | 85 | ||
| @@ -108,13 +99,6 @@ static LIST_HEAD(modules); | |||
| 108 | * Use a latched RB-tree for __module_address(); this allows us to use | 99 | * Use a latched RB-tree for __module_address(); this allows us to use |
| 109 | * RCU-sched lookups of the address from any context. | 100 | * RCU-sched lookups of the address from any context. |
| 110 | * | 101 | * |
| 111 | * Because modules have two address ranges: init and core, we need two | ||
| 112 | * latch_tree_nodes entries. Therefore we need the back-pointer from | ||
| 113 | * mod_tree_node. | ||
| 114 | * | ||
| 115 | * Because init ranges are short lived we mark them unlikely and have placed | ||
| 116 | * them outside the critical cacheline in struct module. | ||
| 117 | * | ||
| 118 | * This is conditional on PERF_EVENTS || TRACING because those can really hit | 102 | * This is conditional on PERF_EVENTS || TRACING because those can really hit |
| 119 | * __module_address() hard by doing a lot of stack unwinding; potentially from | 103 | * __module_address() hard by doing a lot of stack unwinding; potentially from |
| 120 | * NMI context. | 104 | * NMI context. |
| @@ -122,24 +106,16 @@ static LIST_HEAD(modules); | |||
| 122 | 106 | ||
| 123 | static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) | 107 | static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) |
| 124 | { | 108 | { |
| 125 | struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); | 109 | struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
| 126 | struct module *mod = mtn->mod; | ||
| 127 | 110 | ||
| 128 | if (unlikely(mtn == &mod->mtn_init)) | 111 | return (unsigned long)layout->base; |
| 129 | return (unsigned long)mod->module_init; | ||
| 130 | |||
| 131 | return (unsigned long)mod->module_core; | ||
| 132 | } | 112 | } |
| 133 | 113 | ||
| 134 | static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) | 114 | static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n) |
| 135 | { | 115 | { |
| 136 | struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node); | 116 | struct module_layout *layout = container_of(n, struct module_layout, mtn.node); |
| 137 | struct module *mod = mtn->mod; | ||
| 138 | |||
| 139 | if (unlikely(mtn == &mod->mtn_init)) | ||
| 140 | return (unsigned long)mod->init_size; | ||
| 141 | 117 | ||
| 142 | return (unsigned long)mod->core_size; | 118 | return (unsigned long)layout->size; |
| 143 | } | 119 | } |
| 144 | 120 | ||
| 145 | static __always_inline bool | 121 | static __always_inline bool |
| @@ -197,23 +173,23 @@ static void __mod_tree_remove(struct mod_tree_node *node) | |||
| 197 | */ | 173 | */ |
| 198 | static void mod_tree_insert(struct module *mod) | 174 | static void mod_tree_insert(struct module *mod) |
| 199 | { | 175 | { |
| 200 | mod->mtn_core.mod = mod; | 176 | mod->core_layout.mtn.mod = mod; |
| 201 | mod->mtn_init.mod = mod; | 177 | mod->init_layout.mtn.mod = mod; |
| 202 | 178 | ||
| 203 | __mod_tree_insert(&mod->mtn_core); | 179 | __mod_tree_insert(&mod->core_layout.mtn); |
| 204 | if (mod->init_size) | 180 | if (mod->init_layout.size) |
| 205 | __mod_tree_insert(&mod->mtn_init); | 181 | __mod_tree_insert(&mod->init_layout.mtn); |
| 206 | } | 182 | } |
| 207 | 183 | ||
| 208 | static void mod_tree_remove_init(struct module *mod) | 184 | static void mod_tree_remove_init(struct module *mod) |
| 209 | { | 185 | { |
| 210 | if (mod->init_size) | 186 | if (mod->init_layout.size) |
| 211 | __mod_tree_remove(&mod->mtn_init); | 187 | __mod_tree_remove(&mod->init_layout.mtn); |
| 212 | } | 188 | } |
| 213 | 189 | ||
| 214 | static void mod_tree_remove(struct module *mod) | 190 | static void mod_tree_remove(struct module *mod) |
| 215 | { | 191 | { |
| 216 | __mod_tree_remove(&mod->mtn_core); | 192 | __mod_tree_remove(&mod->core_layout.mtn); |
| 217 | mod_tree_remove_init(mod); | 193 | mod_tree_remove_init(mod); |
| 218 | } | 194 | } |
| 219 | 195 | ||
| @@ -267,9 +243,9 @@ static void __mod_update_bounds(void *base, unsigned int size) | |||
| 267 | 243 | ||
| 268 | static void mod_update_bounds(struct module *mod) | 244 | static void mod_update_bounds(struct module *mod) |
| 269 | { | 245 | { |
| 270 | __mod_update_bounds(mod->module_core, mod->core_size); | 246 | __mod_update_bounds(mod->core_layout.base, mod->core_layout.size); |
| 271 | if (mod->init_size) | 247 | if (mod->init_layout.size) |
| 272 | __mod_update_bounds(mod->module_init, mod->init_size); | 248 | __mod_update_bounds(mod->init_layout.base, mod->init_layout.size); |
| 273 | } | 249 | } |
| 274 | 250 | ||
| 275 | #ifdef CONFIG_KGDB_KDB | 251 | #ifdef CONFIG_KGDB_KDB |
| @@ -1214,7 +1190,7 @@ struct module_attribute module_uevent = | |||
| 1214 | static ssize_t show_coresize(struct module_attribute *mattr, | 1190 | static ssize_t show_coresize(struct module_attribute *mattr, |
| 1215 | struct module_kobject *mk, char *buffer) | 1191 | struct module_kobject *mk, char *buffer) |
| 1216 | { | 1192 | { |
| 1217 | return sprintf(buffer, "%u\n", mk->mod->core_size); | 1193 | return sprintf(buffer, "%u\n", mk->mod->core_layout.size); |
| 1218 | } | 1194 | } |
| 1219 | 1195 | ||
| 1220 | static struct module_attribute modinfo_coresize = | 1196 | static struct module_attribute modinfo_coresize = |
| @@ -1223,7 +1199,7 @@ static struct module_attribute modinfo_coresize = | |||
| 1223 | static ssize_t show_initsize(struct module_attribute *mattr, | 1199 | static ssize_t show_initsize(struct module_attribute *mattr, |
| 1224 | struct module_kobject *mk, char *buffer) | 1200 | struct module_kobject *mk, char *buffer) |
| 1225 | { | 1201 | { |
| 1226 | return sprintf(buffer, "%u\n", mk->mod->init_size); | 1202 | return sprintf(buffer, "%u\n", mk->mod->init_layout.size); |
| 1227 | } | 1203 | } |
| 1228 | 1204 | ||
| 1229 | static struct module_attribute modinfo_initsize = | 1205 | static struct module_attribute modinfo_initsize = |
| @@ -1873,64 +1849,75 @@ static void mod_sysfs_teardown(struct module *mod) | |||
| 1873 | /* | 1849 | /* |
| 1874 | * LKM RO/NX protection: protect module's text/ro-data | 1850 | * LKM RO/NX protection: protect module's text/ro-data |
| 1875 | * from modification and any data from execution. | 1851 | * from modification and any data from execution. |
| 1852 | * | ||
| 1853 | * General layout of module is: | ||
| 1854 | * [text] [read-only-data] [writable data] | ||
| 1855 | * text_size -----^ ^ ^ | ||
| 1856 | * ro_size ------------------------| | | ||
| 1857 | * size -------------------------------------------| | ||
| 1858 | * | ||
| 1859 | * These values are always page-aligned (as is base) | ||
| 1876 | */ | 1860 | */ |
| 1877 | void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) | 1861 | static void frob_text(const struct module_layout *layout, |
| 1862 | int (*set_memory)(unsigned long start, int num_pages)) | ||
| 1878 | { | 1863 | { |
| 1879 | unsigned long begin_pfn = PFN_DOWN((unsigned long)start); | 1864 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); |
| 1880 | unsigned long end_pfn = PFN_DOWN((unsigned long)end); | 1865 | BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); |
| 1866 | set_memory((unsigned long)layout->base, | ||
| 1867 | layout->text_size >> PAGE_SHIFT); | ||
| 1868 | } | ||
| 1881 | 1869 | ||
| 1882 | if (end_pfn > begin_pfn) | 1870 | static void frob_rodata(const struct module_layout *layout, |
| 1883 | set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | 1871 | int (*set_memory)(unsigned long start, int num_pages)) |
| 1872 | { | ||
| 1873 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); | ||
| 1874 | BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1)); | ||
| 1875 | BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); | ||
| 1876 | set_memory((unsigned long)layout->base + layout->text_size, | ||
| 1877 | (layout->ro_size - layout->text_size) >> PAGE_SHIFT); | ||
| 1884 | } | 1878 | } |
| 1885 | 1879 | ||
| 1886 | static void set_section_ro_nx(void *base, | 1880 | static void frob_writable_data(const struct module_layout *layout, |
| 1887 | unsigned long text_size, | 1881 | int (*set_memory)(unsigned long start, int num_pages)) |
| 1888 | unsigned long ro_size, | ||
| 1889 | unsigned long total_size) | ||
| 1890 | { | 1882 | { |
| 1891 | /* begin and end PFNs of the current subsection */ | 1883 | BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1)); |
| 1892 | unsigned long begin_pfn; | 1884 | BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1)); |
| 1893 | unsigned long end_pfn; | 1885 | BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1)); |
| 1886 | set_memory((unsigned long)layout->base + layout->ro_size, | ||
| 1887 | (layout->size - layout->ro_size) >> PAGE_SHIFT); | ||
| 1888 | } | ||
| 1894 | 1889 | ||
| 1895 | /* | 1890 | /* livepatching wants to disable read-only so it can frob module. */ |
| 1896 | * Set RO for module text and RO-data: | 1891 | void module_disable_ro(const struct module *mod) |
| 1897 | * - Always protect first page. | 1892 | { |
| 1898 | * - Do not protect last partial page. | 1893 | frob_text(&mod->core_layout, set_memory_rw); |
| 1899 | */ | 1894 | frob_rodata(&mod->core_layout, set_memory_rw); |
| 1900 | if (ro_size > 0) | 1895 | frob_text(&mod->init_layout, set_memory_rw); |
| 1901 | set_page_attributes(base, base + ro_size, set_memory_ro); | 1896 | frob_rodata(&mod->init_layout, set_memory_rw); |
| 1897 | } | ||
| 1902 | 1898 | ||
| 1903 | /* | 1899 | void module_enable_ro(const struct module *mod) |
| 1904 | * Set NX permissions for module data: | 1900 | { |
| 1905 | * - Do not protect first partial page. | 1901 | frob_text(&mod->core_layout, set_memory_ro); |
| 1906 | * - Always protect last page. | 1902 | frob_rodata(&mod->core_layout, set_memory_ro); |
| 1907 | */ | 1903 | frob_text(&mod->init_layout, set_memory_ro); |
| 1908 | if (total_size > text_size) { | 1904 | frob_rodata(&mod->init_layout, set_memory_ro); |
| 1909 | begin_pfn = PFN_UP((unsigned long)base + text_size); | ||
| 1910 | end_pfn = PFN_UP((unsigned long)base + total_size); | ||
| 1911 | if (end_pfn > begin_pfn) | ||
| 1912 | set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); | ||
| 1913 | } | ||
| 1914 | } | 1905 | } |
| 1915 | 1906 | ||
| 1916 | static void unset_module_core_ro_nx(struct module *mod) | 1907 | static void module_enable_nx(const struct module *mod) |
| 1917 | { | 1908 | { |
| 1918 | set_page_attributes(mod->module_core + mod->core_text_size, | 1909 | frob_rodata(&mod->core_layout, set_memory_nx); |
| 1919 | mod->module_core + mod->core_size, | 1910 | frob_writable_data(&mod->core_layout, set_memory_nx); |
| 1920 | set_memory_x); | 1911 | frob_rodata(&mod->init_layout, set_memory_nx); |
| 1921 | set_page_attributes(mod->module_core, | 1912 | frob_writable_data(&mod->init_layout, set_memory_nx); |
| 1922 | mod->module_core + mod->core_ro_size, | ||
| 1923 | set_memory_rw); | ||
| 1924 | } | 1913 | } |
| 1925 | 1914 | ||
| 1926 | static void unset_module_init_ro_nx(struct module *mod) | 1915 | static void module_disable_nx(const struct module *mod) |
| 1927 | { | 1916 | { |
| 1928 | set_page_attributes(mod->module_init + mod->init_text_size, | 1917 | frob_rodata(&mod->core_layout, set_memory_x); |
| 1929 | mod->module_init + mod->init_size, | 1918 | frob_writable_data(&mod->core_layout, set_memory_x); |
| 1930 | set_memory_x); | 1919 | frob_rodata(&mod->init_layout, set_memory_x); |
| 1931 | set_page_attributes(mod->module_init, | 1920 | frob_writable_data(&mod->init_layout, set_memory_x); |
| 1932 | mod->module_init + mod->init_ro_size, | ||
| 1933 | set_memory_rw); | ||
| 1934 | } | 1921 | } |
| 1935 | 1922 | ||
| 1936 | /* Iterate through all modules and set each module's text as RW */ | 1923 | /* Iterate through all modules and set each module's text as RW */ |
| @@ -1942,16 +1929,9 @@ void set_all_modules_text_rw(void) | |||
| 1942 | list_for_each_entry_rcu(mod, &modules, list) { | 1929 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1943 | if (mod->state == MODULE_STATE_UNFORMED) | 1930 | if (mod->state == MODULE_STATE_UNFORMED) |
| 1944 | continue; | 1931 | continue; |
| 1945 | if ((mod->module_core) && (mod->core_text_size)) { | 1932 | |
| 1946 | set_page_attributes(mod->module_core, | 1933 | frob_text(&mod->core_layout, set_memory_rw); |
| 1947 | mod->module_core + mod->core_text_size, | 1934 | frob_text(&mod->init_layout, set_memory_rw); |
| 1948 | set_memory_rw); | ||
| 1949 | } | ||
| 1950 | if ((mod->module_init) && (mod->init_text_size)) { | ||
| 1951 | set_page_attributes(mod->module_init, | ||
| 1952 | mod->module_init + mod->init_text_size, | ||
| 1953 | set_memory_rw); | ||
| 1954 | } | ||
| 1955 | } | 1935 | } |
| 1956 | mutex_unlock(&module_mutex); | 1936 | mutex_unlock(&module_mutex); |
| 1957 | } | 1937 | } |
| @@ -1965,23 +1945,25 @@ void set_all_modules_text_ro(void) | |||
| 1965 | list_for_each_entry_rcu(mod, &modules, list) { | 1945 | list_for_each_entry_rcu(mod, &modules, list) { |
| 1966 | if (mod->state == MODULE_STATE_UNFORMED) | 1946 | if (mod->state == MODULE_STATE_UNFORMED) |
| 1967 | continue; | 1947 | continue; |
| 1968 | if ((mod->module_core) && (mod->core_text_size)) { | 1948 | |
| 1969 | set_page_attributes(mod->module_core, | 1949 | frob_text(&mod->core_layout, set_memory_ro); |
| 1970 | mod->module_core + mod->core_text_size, | 1950 | frob_text(&mod->init_layout, set_memory_ro); |
| 1971 | set_memory_ro); | ||
| 1972 | } | ||
| 1973 | if ((mod->module_init) && (mod->init_text_size)) { | ||
| 1974 | set_page_attributes(mod->module_init, | ||
| 1975 | mod->module_init + mod->init_text_size, | ||
| 1976 | set_memory_ro); | ||
| 1977 | } | ||
| 1978 | } | 1951 | } |
| 1979 | mutex_unlock(&module_mutex); | 1952 | mutex_unlock(&module_mutex); |
| 1980 | } | 1953 | } |
| 1954 | |||
| 1955 | static void disable_ro_nx(const struct module_layout *layout) | ||
| 1956 | { | ||
| 1957 | frob_text(layout, set_memory_rw); | ||
| 1958 | frob_rodata(layout, set_memory_rw); | ||
| 1959 | frob_rodata(layout, set_memory_x); | ||
| 1960 | frob_writable_data(layout, set_memory_x); | ||
| 1961 | } | ||
| 1962 | |||
| 1981 | #else | 1963 | #else |
| 1982 | static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } | 1964 | static void disable_ro_nx(const struct module_layout *layout) { } |
| 1983 | static void unset_module_core_ro_nx(struct module *mod) { } | 1965 | static void module_enable_nx(const struct module *mod) { } |
| 1984 | static void unset_module_init_ro_nx(struct module *mod) { } | 1966 | static void module_disable_nx(const struct module *mod) { } |
| 1985 | #endif | 1967 | #endif |
| 1986 | 1968 | ||
| 1987 | void __weak module_memfree(void *module_region) | 1969 | void __weak module_memfree(void *module_region) |
| @@ -2033,19 +2015,19 @@ static void free_module(struct module *mod) | |||
| 2033 | synchronize_sched(); | 2015 | synchronize_sched(); |
| 2034 | mutex_unlock(&module_mutex); | 2016 | mutex_unlock(&module_mutex); |
| 2035 | 2017 | ||
| 2036 | /* This may be NULL, but that's OK */ | 2018 | /* This may be empty, but that's OK */ |
| 2037 | unset_module_init_ro_nx(mod); | 2019 | disable_ro_nx(&mod->init_layout); |
| 2038 | module_arch_freeing_init(mod); | 2020 | module_arch_freeing_init(mod); |
| 2039 | module_memfree(mod->module_init); | 2021 | module_memfree(mod->init_layout.base); |
| 2040 | kfree(mod->args); | 2022 | kfree(mod->args); |
| 2041 | percpu_modfree(mod); | 2023 | percpu_modfree(mod); |
| 2042 | 2024 | ||
| 2043 | /* Free lock-classes; relies on the preceding sync_rcu(). */ | 2025 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
| 2044 | lockdep_free_key_range(mod->module_core, mod->core_size); | 2026 | lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
| 2045 | 2027 | ||
| 2046 | /* Finally, free the core (containing the module structure) */ | 2028 | /* Finally, free the core (containing the module structure) */ |
| 2047 | unset_module_core_ro_nx(mod); | 2029 | disable_ro_nx(&mod->core_layout); |
| 2048 | module_memfree(mod->module_core); | 2030 | module_memfree(mod->core_layout.base); |
| 2049 | 2031 | ||
| 2050 | #ifdef CONFIG_MPU | 2032 | #ifdef CONFIG_MPU |
| 2051 | update_protections(current->mm); | 2033 | update_protections(current->mm); |
| @@ -2248,20 +2230,20 @@ static void layout_sections(struct module *mod, struct load_info *info) | |||
| 2248 | || s->sh_entsize != ~0UL | 2230 | || s->sh_entsize != ~0UL |
| 2249 | || strstarts(sname, ".init")) | 2231 | || strstarts(sname, ".init")) |
| 2250 | continue; | 2232 | continue; |
| 2251 | s->sh_entsize = get_offset(mod, &mod->core_size, s, i); | 2233 | s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); |
| 2252 | pr_debug("\t%s\n", sname); | 2234 | pr_debug("\t%s\n", sname); |
| 2253 | } | 2235 | } |
| 2254 | switch (m) { | 2236 | switch (m) { |
| 2255 | case 0: /* executable */ | 2237 | case 0: /* executable */ |
| 2256 | mod->core_size = debug_align(mod->core_size); | 2238 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2257 | mod->core_text_size = mod->core_size; | 2239 | mod->core_layout.text_size = mod->core_layout.size; |
| 2258 | break; | 2240 | break; |
| 2259 | case 1: /* RO: text and ro-data */ | 2241 | case 1: /* RO: text and ro-data */ |
| 2260 | mod->core_size = debug_align(mod->core_size); | 2242 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2261 | mod->core_ro_size = mod->core_size; | 2243 | mod->core_layout.ro_size = mod->core_layout.size; |
| 2262 | break; | 2244 | break; |
| 2263 | case 3: /* whole core */ | 2245 | case 3: /* whole core */ |
| 2264 | mod->core_size = debug_align(mod->core_size); | 2246 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2265 | break; | 2247 | break; |
| 2266 | } | 2248 | } |
| 2267 | } | 2249 | } |
| @@ -2277,21 +2259,21 @@ static void layout_sections(struct module *mod, struct load_info *info) | |||
| 2277 | || s->sh_entsize != ~0UL | 2259 | || s->sh_entsize != ~0UL |
| 2278 | || !strstarts(sname, ".init")) | 2260 | || !strstarts(sname, ".init")) |
| 2279 | continue; | 2261 | continue; |
| 2280 | s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) | 2262 | s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) |
| 2281 | | INIT_OFFSET_MASK); | 2263 | | INIT_OFFSET_MASK); |
| 2282 | pr_debug("\t%s\n", sname); | 2264 | pr_debug("\t%s\n", sname); |
| 2283 | } | 2265 | } |
| 2284 | switch (m) { | 2266 | switch (m) { |
| 2285 | case 0: /* executable */ | 2267 | case 0: /* executable */ |
| 2286 | mod->init_size = debug_align(mod->init_size); | 2268 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2287 | mod->init_text_size = mod->init_size; | 2269 | mod->init_layout.text_size = mod->init_layout.size; |
| 2288 | break; | 2270 | break; |
| 2289 | case 1: /* RO: text and ro-data */ | 2271 | case 1: /* RO: text and ro-data */ |
| 2290 | mod->init_size = debug_align(mod->init_size); | 2272 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2291 | mod->init_ro_size = mod->init_size; | 2273 | mod->init_layout.ro_size = mod->init_layout.size; |
| 2292 | break; | 2274 | break; |
| 2293 | case 3: /* whole init */ | 2275 | case 3: /* whole init */ |
| 2294 | mod->init_size = debug_align(mod->init_size); | 2276 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2295 | break; | 2277 | break; |
| 2296 | } | 2278 | } |
| 2297 | } | 2279 | } |
| @@ -2401,7 +2383,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
| 2401 | } | 2383 | } |
| 2402 | if (sym->st_shndx == SHN_UNDEF) | 2384 | if (sym->st_shndx == SHN_UNDEF) |
| 2403 | return 'U'; | 2385 | return 'U'; |
| 2404 | if (sym->st_shndx == SHN_ABS) | 2386 | if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu) |
| 2405 | return 'a'; | 2387 | return 'a'; |
| 2406 | if (sym->st_shndx >= SHN_LORESERVE) | 2388 | if (sym->st_shndx >= SHN_LORESERVE) |
| 2407 | return '?'; | 2389 | return '?'; |
| @@ -2430,7 +2412,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info) | |||
| 2430 | } | 2412 | } |
| 2431 | 2413 | ||
| 2432 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | 2414 | static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, |
| 2433 | unsigned int shnum) | 2415 | unsigned int shnum, unsigned int pcpundx) |
| 2434 | { | 2416 | { |
| 2435 | const Elf_Shdr *sec; | 2417 | const Elf_Shdr *sec; |
| 2436 | 2418 | ||
| @@ -2439,6 +2421,11 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, | |||
| 2439 | || !src->st_name) | 2421 | || !src->st_name) |
| 2440 | return false; | 2422 | return false; |
| 2441 | 2423 | ||
| 2424 | #ifdef CONFIG_KALLSYMS_ALL | ||
| 2425 | if (src->st_shndx == pcpundx) | ||
| 2426 | return true; | ||
| 2427 | #endif | ||
| 2428 | |||
| 2442 | sec = sechdrs + src->st_shndx; | 2429 | sec = sechdrs + src->st_shndx; |
| 2443 | if (!(sec->sh_flags & SHF_ALLOC) | 2430 | if (!(sec->sh_flags & SHF_ALLOC) |
| 2444 | #ifndef CONFIG_KALLSYMS_ALL | 2431 | #ifndef CONFIG_KALLSYMS_ALL |
| @@ -2466,7 +2453,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
| 2466 | 2453 | ||
| 2467 | /* Put symbol section at end of init part of module. */ | 2454 | /* Put symbol section at end of init part of module. */ |
| 2468 | symsect->sh_flags |= SHF_ALLOC; | 2455 | symsect->sh_flags |= SHF_ALLOC; |
| 2469 | symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, | 2456 | symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect, |
| 2470 | info->index.sym) | INIT_OFFSET_MASK; | 2457 | info->index.sym) | INIT_OFFSET_MASK; |
| 2471 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); | 2458 | pr_debug("\t%s\n", info->secstrings + symsect->sh_name); |
| 2472 | 2459 | ||
| @@ -2476,23 +2463,24 @@ static void layout_symtab(struct module *mod, struct load_info *info) | |||
| 2476 | /* Compute total space required for the core symbols' strtab. */ | 2463 | /* Compute total space required for the core symbols' strtab. */ |
| 2477 | for (ndst = i = 0; i < nsrc; i++) { | 2464 | for (ndst = i = 0; i < nsrc; i++) { |
| 2478 | if (i == 0 || | 2465 | if (i == 0 || |
| 2479 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2466 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
| 2467 | info->index.pcpu)) { | ||
| 2480 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; | 2468 | strtab_size += strlen(&info->strtab[src[i].st_name])+1; |
| 2481 | ndst++; | 2469 | ndst++; |
| 2482 | } | 2470 | } |
| 2483 | } | 2471 | } |
| 2484 | 2472 | ||
| 2485 | /* Append room for core symbols at end of core part. */ | 2473 | /* Append room for core symbols at end of core part. */ |
| 2486 | info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); | 2474 | info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); |
| 2487 | info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); | 2475 | info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); |
| 2488 | mod->core_size += strtab_size; | 2476 | mod->core_layout.size += strtab_size; |
| 2489 | mod->core_size = debug_align(mod->core_size); | 2477 | mod->core_layout.size = debug_align(mod->core_layout.size); |
| 2490 | 2478 | ||
| 2491 | /* Put string table section at end of init part of module. */ | 2479 | /* Put string table section at end of init part of module. */ |
| 2492 | strsect->sh_flags |= SHF_ALLOC; | 2480 | strsect->sh_flags |= SHF_ALLOC; |
| 2493 | strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, | 2481 | strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, |
| 2494 | info->index.str) | INIT_OFFSET_MASK; | 2482 | info->index.str) | INIT_OFFSET_MASK; |
| 2495 | mod->init_size = debug_align(mod->init_size); | 2483 | mod->init_layout.size = debug_align(mod->init_layout.size); |
| 2496 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); | 2484 | pr_debug("\t%s\n", info->secstrings + strsect->sh_name); |
| 2497 | } | 2485 | } |
| 2498 | 2486 | ||
| @@ -2513,12 +2501,13 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) | |||
| 2513 | for (i = 0; i < mod->num_symtab; i++) | 2501 | for (i = 0; i < mod->num_symtab; i++) |
| 2514 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); | 2502 | mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); |
| 2515 | 2503 | ||
| 2516 | mod->core_symtab = dst = mod->module_core + info->symoffs; | 2504 | mod->core_symtab = dst = mod->core_layout.base + info->symoffs; |
| 2517 | mod->core_strtab = s = mod->module_core + info->stroffs; | 2505 | mod->core_strtab = s = mod->core_layout.base + info->stroffs; |
| 2518 | src = mod->symtab; | 2506 | src = mod->symtab; |
| 2519 | for (ndst = i = 0; i < mod->num_symtab; i++) { | 2507 | for (ndst = i = 0; i < mod->num_symtab; i++) { |
| 2520 | if (i == 0 || | 2508 | if (i == 0 || |
| 2521 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { | 2509 | is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, |
| 2510 | info->index.pcpu)) { | ||
| 2522 | dst[ndst] = src[i]; | 2511 | dst[ndst] = src[i]; |
| 2523 | dst[ndst++].st_name = s - mod->core_strtab; | 2512 | dst[ndst++].st_name = s - mod->core_strtab; |
| 2524 | s += strlcpy(s, &mod->strtab[src[i].st_name], | 2513 | s += strlcpy(s, &mod->strtab[src[i].st_name], |
| @@ -2964,7 +2953,7 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2964 | void *ptr; | 2953 | void *ptr; |
| 2965 | 2954 | ||
| 2966 | /* Do the allocs. */ | 2955 | /* Do the allocs. */ |
| 2967 | ptr = module_alloc(mod->core_size); | 2956 | ptr = module_alloc(mod->core_layout.size); |
| 2968 | /* | 2957 | /* |
| 2969 | * The pointer to this block is stored in the module structure | 2958 | * The pointer to this block is stored in the module structure |
| 2970 | * which is inside the block. Just mark it as not being a | 2959 | * which is inside the block. Just mark it as not being a |
| @@ -2974,11 +2963,11 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2974 | if (!ptr) | 2963 | if (!ptr) |
| 2975 | return -ENOMEM; | 2964 | return -ENOMEM; |
| 2976 | 2965 | ||
| 2977 | memset(ptr, 0, mod->core_size); | 2966 | memset(ptr, 0, mod->core_layout.size); |
| 2978 | mod->module_core = ptr; | 2967 | mod->core_layout.base = ptr; |
| 2979 | 2968 | ||
| 2980 | if (mod->init_size) { | 2969 | if (mod->init_layout.size) { |
| 2981 | ptr = module_alloc(mod->init_size); | 2970 | ptr = module_alloc(mod->init_layout.size); |
| 2982 | /* | 2971 | /* |
| 2983 | * The pointer to this block is stored in the module structure | 2972 | * The pointer to this block is stored in the module structure |
| 2984 | * which is inside the block. This block doesn't need to be | 2973 | * which is inside the block. This block doesn't need to be |
| @@ -2987,13 +2976,13 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 2987 | */ | 2976 | */ |
| 2988 | kmemleak_ignore(ptr); | 2977 | kmemleak_ignore(ptr); |
| 2989 | if (!ptr) { | 2978 | if (!ptr) { |
| 2990 | module_memfree(mod->module_core); | 2979 | module_memfree(mod->core_layout.base); |
| 2991 | return -ENOMEM; | 2980 | return -ENOMEM; |
| 2992 | } | 2981 | } |
| 2993 | memset(ptr, 0, mod->init_size); | 2982 | memset(ptr, 0, mod->init_layout.size); |
| 2994 | mod->module_init = ptr; | 2983 | mod->init_layout.base = ptr; |
| 2995 | } else | 2984 | } else |
| 2996 | mod->module_init = NULL; | 2985 | mod->init_layout.base = NULL; |
| 2997 | 2986 | ||
| 2998 | /* Transfer each section which specifies SHF_ALLOC */ | 2987 | /* Transfer each section which specifies SHF_ALLOC */ |
| 2999 | pr_debug("final section addresses:\n"); | 2988 | pr_debug("final section addresses:\n"); |
| @@ -3005,10 +2994,10 @@ static int move_module(struct module *mod, struct load_info *info) | |||
| 3005 | continue; | 2994 | continue; |
| 3006 | 2995 | ||
| 3007 | if (shdr->sh_entsize & INIT_OFFSET_MASK) | 2996 | if (shdr->sh_entsize & INIT_OFFSET_MASK) |
| 3008 | dest = mod->module_init | 2997 | dest = mod->init_layout.base |
| 3009 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); | 2998 | + (shdr->sh_entsize & ~INIT_OFFSET_MASK); |
| 3010 | else | 2999 | else |
| 3011 | dest = mod->module_core + shdr->sh_entsize; | 3000 | dest = mod->core_layout.base + shdr->sh_entsize; |
| 3012 | 3001 | ||
| 3013 | if (shdr->sh_type != SHT_NOBITS) | 3002 | if (shdr->sh_type != SHT_NOBITS) |
| 3014 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); | 3003 | memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); |
| @@ -3070,12 +3059,12 @@ static void flush_module_icache(const struct module *mod) | |||
| 3070 | * Do it before processing of module parameters, so the module | 3059 | * Do it before processing of module parameters, so the module |
| 3071 | * can provide parameter accessor functions of its own. | 3060 | * can provide parameter accessor functions of its own. |
| 3072 | */ | 3061 | */ |
| 3073 | if (mod->module_init) | 3062 | if (mod->init_layout.base) |
| 3074 | flush_icache_range((unsigned long)mod->module_init, | 3063 | flush_icache_range((unsigned long)mod->init_layout.base, |
| 3075 | (unsigned long)mod->module_init | 3064 | (unsigned long)mod->init_layout.base |
| 3076 | + mod->init_size); | 3065 | + mod->init_layout.size); |
| 3077 | flush_icache_range((unsigned long)mod->module_core, | 3066 | flush_icache_range((unsigned long)mod->core_layout.base, |
| 3078 | (unsigned long)mod->module_core + mod->core_size); | 3067 | (unsigned long)mod->core_layout.base + mod->core_layout.size); |
| 3079 | 3068 | ||
| 3080 | set_fs(old_fs); | 3069 | set_fs(old_fs); |
| 3081 | } | 3070 | } |
| @@ -3133,8 +3122,8 @@ static void module_deallocate(struct module *mod, struct load_info *info) | |||
| 3133 | { | 3122 | { |
| 3134 | percpu_modfree(mod); | 3123 | percpu_modfree(mod); |
| 3135 | module_arch_freeing_init(mod); | 3124 | module_arch_freeing_init(mod); |
| 3136 | module_memfree(mod->module_init); | 3125 | module_memfree(mod->init_layout.base); |
| 3137 | module_memfree(mod->module_core); | 3126 | module_memfree(mod->core_layout.base); |
| 3138 | } | 3127 | } |
| 3139 | 3128 | ||
| 3140 | int __weak module_finalize(const Elf_Ehdr *hdr, | 3129 | int __weak module_finalize(const Elf_Ehdr *hdr, |
| @@ -3221,7 +3210,7 @@ static noinline int do_init_module(struct module *mod) | |||
| 3221 | ret = -ENOMEM; | 3210 | ret = -ENOMEM; |
| 3222 | goto fail; | 3211 | goto fail; |
| 3223 | } | 3212 | } |
| 3224 | freeinit->module_init = mod->module_init; | 3213 | freeinit->module_init = mod->init_layout.base; |
| 3225 | 3214 | ||
| 3226 | /* | 3215 | /* |
| 3227 | * We want to find out whether @mod uses async during init. Clear | 3216 | * We want to find out whether @mod uses async during init. Clear |
| @@ -3279,12 +3268,12 @@ static noinline int do_init_module(struct module *mod) | |||
| 3279 | mod->strtab = mod->core_strtab; | 3268 | mod->strtab = mod->core_strtab; |
| 3280 | #endif | 3269 | #endif |
| 3281 | mod_tree_remove_init(mod); | 3270 | mod_tree_remove_init(mod); |
| 3282 | unset_module_init_ro_nx(mod); | 3271 | disable_ro_nx(&mod->init_layout); |
| 3283 | module_arch_freeing_init(mod); | 3272 | module_arch_freeing_init(mod); |
| 3284 | mod->module_init = NULL; | 3273 | mod->init_layout.base = NULL; |
| 3285 | mod->init_size = 0; | 3274 | mod->init_layout.size = 0; |
| 3286 | mod->init_ro_size = 0; | 3275 | mod->init_layout.ro_size = 0; |
| 3287 | mod->init_text_size = 0; | 3276 | mod->init_layout.text_size = 0; |
| 3288 | /* | 3277 | /* |
| 3289 | * We want to free module_init, but be aware that kallsyms may be | 3278 | * We want to free module_init, but be aware that kallsyms may be |
| 3290 | * walking this with preempt disabled. In all the failure paths, we | 3279 | * walking this with preempt disabled. In all the failure paths, we |
| @@ -3373,17 +3362,9 @@ static int complete_formation(struct module *mod, struct load_info *info) | |||
| 3373 | /* This relies on module_mutex for list integrity. */ | 3362 | /* This relies on module_mutex for list integrity. */ |
| 3374 | module_bug_finalize(info->hdr, info->sechdrs, mod); | 3363 | module_bug_finalize(info->hdr, info->sechdrs, mod); |
| 3375 | 3364 | ||
| 3376 | /* Set RO and NX regions for core */ | 3365 | /* Set RO and NX regions */ |
| 3377 | set_section_ro_nx(mod->module_core, | 3366 | module_enable_ro(mod); |
| 3378 | mod->core_text_size, | 3367 | module_enable_nx(mod); |
| 3379 | mod->core_ro_size, | ||
| 3380 | mod->core_size); | ||
| 3381 | |||
| 3382 | /* Set RO and NX regions for init */ | ||
| 3383 | set_section_ro_nx(mod->module_init, | ||
| 3384 | mod->init_text_size, | ||
| 3385 | mod->init_ro_size, | ||
| 3386 | mod->init_size); | ||
| 3387 | 3368 | ||
| 3388 | /* Mark state as coming so strong_try_module_get() ignores us, | 3369 | /* Mark state as coming so strong_try_module_get() ignores us, |
| 3389 | * but kallsyms etc. can see us. */ | 3370 | * but kallsyms etc. can see us. */ |
| @@ -3548,8 +3529,8 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3548 | MODULE_STATE_GOING, mod); | 3529 | MODULE_STATE_GOING, mod); |
| 3549 | 3530 | ||
| 3550 | /* we can't deallocate the module until we clear memory protection */ | 3531 | /* we can't deallocate the module until we clear memory protection */ |
| 3551 | unset_module_init_ro_nx(mod); | 3532 | module_disable_ro(mod); |
| 3552 | unset_module_core_ro_nx(mod); | 3533 | module_disable_nx(mod); |
| 3553 | 3534 | ||
| 3554 | ddebug_cleanup: | 3535 | ddebug_cleanup: |
| 3555 | dynamic_debug_remove(info->debug); | 3536 | dynamic_debug_remove(info->debug); |
| @@ -3572,7 +3553,7 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
| 3572 | mutex_unlock(&module_mutex); | 3553 | mutex_unlock(&module_mutex); |
| 3573 | free_module: | 3554 | free_module: |
| 3574 | /* Free lock-classes; relies on the preceding sync_rcu() */ | 3555 | /* Free lock-classes; relies on the preceding sync_rcu() */ |
| 3575 | lockdep_free_key_range(mod->module_core, mod->core_size); | 3556 | lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); |
| 3576 | 3557 | ||
| 3577 | module_deallocate(mod, info); | 3558 | module_deallocate(mod, info); |
| 3578 | free_copy: | 3559 | free_copy: |
| @@ -3650,9 +3631,9 @@ static const char *get_ksymbol(struct module *mod, | |||
| 3650 | 3631 | ||
| 3651 | /* At worse, next value is at end of module */ | 3632 | /* At worse, next value is at end of module */ |
| 3652 | if (within_module_init(addr, mod)) | 3633 | if (within_module_init(addr, mod)) |
| 3653 | nextval = (unsigned long)mod->module_init+mod->init_text_size; | 3634 | nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size; |
| 3654 | else | 3635 | else |
| 3655 | nextval = (unsigned long)mod->module_core+mod->core_text_size; | 3636 | nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; |
| 3656 | 3637 | ||
| 3657 | /* Scan for closest preceding symbol, and next symbol. (ELF | 3638 | /* Scan for closest preceding symbol, and next symbol. (ELF |
| 3658 | starts real symbols at 1). */ | 3639 | starts real symbols at 1). */ |
| @@ -3899,7 +3880,7 @@ static int m_show(struct seq_file *m, void *p) | |||
| 3899 | return 0; | 3880 | return 0; |
| 3900 | 3881 | ||
| 3901 | seq_printf(m, "%s %u", | 3882 | seq_printf(m, "%s %u", |
| 3902 | mod->name, mod->init_size + mod->core_size); | 3883 | mod->name, mod->init_layout.size + mod->core_layout.size); |
| 3903 | print_unload_info(m, mod); | 3884 | print_unload_info(m, mod); |
| 3904 | 3885 | ||
| 3905 | /* Informative for users. */ | 3886 | /* Informative for users. */ |
| @@ -3908,7 +3889,7 @@ static int m_show(struct seq_file *m, void *p) | |||
| 3908 | mod->state == MODULE_STATE_COMING ? "Loading" : | 3889 | mod->state == MODULE_STATE_COMING ? "Loading" : |
| 3909 | "Live"); | 3890 | "Live"); |
| 3910 | /* Used by oprofile and other similar tools. */ | 3891 | /* Used by oprofile and other similar tools. */ |
| 3911 | seq_printf(m, " 0x%pK", mod->module_core); | 3892 | seq_printf(m, " 0x%pK", mod->core_layout.base); |
| 3912 | 3893 | ||
| 3913 | /* Taints info */ | 3894 | /* Taints info */ |
| 3914 | if (mod->taints) | 3895 | if (mod->taints) |
| @@ -4051,8 +4032,8 @@ struct module *__module_text_address(unsigned long addr) | |||
| 4051 | struct module *mod = __module_address(addr); | 4032 | struct module *mod = __module_address(addr); |
| 4052 | if (mod) { | 4033 | if (mod) { |
| 4053 | /* Make sure it's within the text section. */ | 4034 | /* Make sure it's within the text section. */ |
| 4054 | if (!within(addr, mod->module_init, mod->init_text_size) | 4035 | if (!within(addr, mod->init_layout.base, mod->init_layout.text_size) |
| 4055 | && !within(addr, mod->module_core, mod->core_text_size)) | 4036 | && !within(addr, mod->core_layout.base, mod->core_layout.text_size)) |
| 4056 | mod = NULL; | 4037 | mod = NULL; |
| 4057 | } | 4038 | } |
| 4058 | return mod; | 4039 | return mod; |
diff --git a/kernel/panic.c b/kernel/panic.c index 4579dbb7ed87..4b150bc0c6c1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -152,8 +152,11 @@ void panic(const char *fmt, ...) | |||
| 152 | * We may have ended up stopping the CPU holding the lock (in | 152 | * We may have ended up stopping the CPU holding the lock (in |
| 153 | * smp_send_stop()) while still having some valuable data in the console | 153 | * smp_send_stop()) while still having some valuable data in the console |
| 154 | * buffer. Try to acquire the lock then release it regardless of the | 154 | * buffer. Try to acquire the lock then release it regardless of the |
| 155 | * result. The release will also print the buffers out. | 155 | * result. The release will also print the buffers out. Locks debug |
| 156 | * should be disabled to avoid reporting bad unlock balance when | ||
| 157 | * panic() is not being callled from OOPS. | ||
| 156 | */ | 158 | */ |
| 159 | debug_locks_off(); | ||
| 157 | console_trylock(); | 160 | console_trylock(); |
| 158 | console_unlock(); | 161 | console_unlock(); |
| 159 | 162 | ||
diff --git a/kernel/pid.c b/kernel/pid.c index ca368793808e..78b3d9f80d44 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
| @@ -467,7 +467,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | |||
| 467 | rcu_read_lock(); | 467 | rcu_read_lock(); |
| 468 | if (type != PIDTYPE_PID) | 468 | if (type != PIDTYPE_PID) |
| 469 | task = task->group_leader; | 469 | task = task->group_leader; |
| 470 | pid = get_pid(task->pids[type].pid); | 470 | pid = get_pid(rcu_dereference(task->pids[type].pid)); |
| 471 | rcu_read_unlock(); | 471 | rcu_read_unlock(); |
| 472 | return pid; | 472 | return pid; |
| 473 | } | 473 | } |
| @@ -528,7 +528,7 @@ pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | |||
| 528 | if (likely(pid_alive(task))) { | 528 | if (likely(pid_alive(task))) { |
| 529 | if (type != PIDTYPE_PID) | 529 | if (type != PIDTYPE_PID) |
| 530 | task = task->group_leader; | 530 | task = task->group_leader; |
| 531 | nr = pid_nr_ns(task->pids[type].pid, ns); | 531 | nr = pid_nr_ns(rcu_dereference(task->pids[type].pid), ns); |
| 532 | } | 532 | } |
| 533 | rcu_read_unlock(); | 533 | rcu_read_unlock(); |
| 534 | 534 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index c0b01fe24bbd..f3f1f7a972fd 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -3503,7 +3503,7 @@ SYSCALL_DEFINE0(pause) | |||
| 3503 | 3503 | ||
| 3504 | #endif | 3504 | #endif |
| 3505 | 3505 | ||
| 3506 | int sigsuspend(sigset_t *set) | 3506 | static int sigsuspend(sigset_t *set) |
| 3507 | { | 3507 | { |
| 3508 | current->saved_sigmask = current->blocked; | 3508 | current->saved_sigmask = current->blocked; |
| 3509 | set_current_blocked(set); | 3509 | set_current_blocked(set); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c29ddebc8705..62fe06bb7d04 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -2009,7 +2009,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
| 2009 | /* | 2009 | /* |
| 2010 | * Be somewhat over-protective like KSM for now! | 2010 | * Be somewhat over-protective like KSM for now! |
| 2011 | */ | 2011 | */ |
| 2012 | if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP)) | 2012 | if (*vm_flags & VM_NO_THP) |
| 2013 | return -EINVAL; | 2013 | return -EINVAL; |
| 2014 | *vm_flags &= ~VM_NOHUGEPAGE; | 2014 | *vm_flags &= ~VM_NOHUGEPAGE; |
| 2015 | *vm_flags |= VM_HUGEPAGE; | 2015 | *vm_flags |= VM_HUGEPAGE; |
| @@ -2025,7 +2025,7 @@ int hugepage_madvise(struct vm_area_struct *vma, | |||
| 2025 | /* | 2025 | /* |
| 2026 | * Be somewhat over-protective like KSM for now! | 2026 | * Be somewhat over-protective like KSM for now! |
| 2027 | */ | 2027 | */ |
| 2028 | if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP)) | 2028 | if (*vm_flags & VM_NO_THP) |
| 2029 | return -EINVAL; | 2029 | return -EINVAL; |
| 2030 | *vm_flags &= ~VM_HUGEPAGE; | 2030 | *vm_flags &= ~VM_HUGEPAGE; |
| 2031 | *vm_flags |= VM_NOHUGEPAGE; | 2031 | *vm_flags |= VM_NOHUGEPAGE; |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index d41b21bce6a0..bc0a8d8b8f42 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/export.h> | 19 | #include <linux/export.h> |
| 20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
| 21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
| 22 | #include <linux/kmemleak.h> | ||
| 22 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
| 23 | #include <linux/memory.h> | 24 | #include <linux/memory.h> |
| 24 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
| @@ -444,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size) | |||
| 444 | 445 | ||
| 445 | if (ret) { | 446 | if (ret) { |
| 446 | find_vm_area(addr)->flags |= VM_KASAN; | 447 | find_vm_area(addr)->flags |= VM_KASAN; |
| 448 | kmemleak_ignore(ret); | ||
| 447 | return 0; | 449 | return 0; |
| 448 | } | 450 | } |
| 449 | 451 | ||
diff --git a/mm/memory.c b/mm/memory.c index deb679c31f2a..c387430f06c3 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3015,9 +3015,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3015 | } else { | 3015 | } else { |
| 3016 | /* | 3016 | /* |
| 3017 | * The fault handler has no page to lock, so it holds | 3017 | * The fault handler has no page to lock, so it holds |
| 3018 | * i_mmap_lock for write to protect against truncate. | 3018 | * i_mmap_lock for read to protect against truncate. |
| 3019 | */ | 3019 | */ |
| 3020 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 3020 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
| 3021 | } | 3021 | } |
| 3022 | goto uncharge_out; | 3022 | goto uncharge_out; |
| 3023 | } | 3023 | } |
| @@ -3031,9 +3031,9 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3031 | } else { | 3031 | } else { |
| 3032 | /* | 3032 | /* |
| 3033 | * The fault handler has no page to lock, so it holds | 3033 | * The fault handler has no page to lock, so it holds |
| 3034 | * i_mmap_lock for write to protect against truncate. | 3034 | * i_mmap_lock for read to protect against truncate. |
| 3035 | */ | 3035 | */ |
| 3036 | i_mmap_unlock_write(vma->vm_file->f_mapping); | 3036 | i_mmap_unlock_read(vma->vm_file->f_mapping); |
| 3037 | } | 3037 | } |
| 3038 | return ret; | 3038 | return ret; |
| 3039 | uncharge_out: | 3039 | uncharge_out: |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2c90357c34ea..3e4d65445fa7 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -1542,7 +1542,9 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
| 1542 | for (;;) { | 1542 | for (;;) { |
| 1543 | unsigned long now = jiffies; | 1543 | unsigned long now = jiffies; |
| 1544 | unsigned long dirty, thresh, bg_thresh; | 1544 | unsigned long dirty, thresh, bg_thresh; |
| 1545 | unsigned long m_dirty, m_thresh, m_bg_thresh; | 1545 | unsigned long m_dirty = 0; /* stop bogus uninit warnings */ |
| 1546 | unsigned long m_thresh = 0; | ||
| 1547 | unsigned long m_bg_thresh = 0; | ||
| 1546 | 1548 | ||
| 1547 | /* | 1549 | /* |
| 1548 | * Unstable writes are a feature of certain networked | 1550 | * Unstable writes are a feature of certain networked |
| @@ -3419,7 +3419,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
| 3419 | } | 3419 | } |
| 3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 3420 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 3421 | 3421 | ||
| 3422 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 3422 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 3423 | void **p) | 3423 | void **p) |
| 3424 | { | 3424 | { |
| 3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 3425 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
| @@ -170,7 +170,7 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, | |||
| 170 | * may be allocated or freed using these operations. | 170 | * may be allocated or freed using these operations. |
| 171 | */ | 171 | */ |
| 172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | 172 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
| 173 | bool __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); | 173 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
| 174 | 174 | ||
| 175 | #ifdef CONFIG_MEMCG_KMEM | 175 | #ifdef CONFIG_MEMCG_KMEM |
| 176 | /* | 176 | /* |
diff --git a/mm/slab_common.c b/mm/slab_common.c index d88e97c10a2e..3c6a86b4ec25 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -112,7 +112,7 @@ void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) | |||
| 112 | kmem_cache_free(s, p[i]); | 112 | kmem_cache_free(s, p[i]); |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | 115 | int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, |
| 116 | void **p) | 116 | void **p) |
| 117 | { | 117 | { |
| 118 | size_t i; | 118 | size_t i; |
| @@ -121,10 +121,10 @@ bool __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, | |||
| 121 | void *x = p[i] = kmem_cache_alloc(s, flags); | 121 | void *x = p[i] = kmem_cache_alloc(s, flags); |
| 122 | if (!x) { | 122 | if (!x) { |
| 123 | __kmem_cache_free_bulk(s, i, p); | 123 | __kmem_cache_free_bulk(s, i, p); |
| 124 | return false; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | } | 126 | } |
| 127 | return true; | 127 | return i; |
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | #ifdef CONFIG_MEMCG_KMEM | 130 | #ifdef CONFIG_MEMCG_KMEM |
| @@ -617,7 +617,7 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | |||
| 617 | } | 617 | } |
| 618 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 618 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 619 | 619 | ||
| 620 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 620 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 621 | void **p) | 621 | void **p) |
| 622 | { | 622 | { |
| 623 | return __kmem_cache_alloc_bulk(s, flags, size, p); | 623 | return __kmem_cache_alloc_bulk(s, flags, size, p); |
| @@ -1065,11 +1065,15 @@ bad: | |||
| 1065 | return 0; | 1065 | return 0; |
| 1066 | } | 1066 | } |
| 1067 | 1067 | ||
| 1068 | /* Supports checking bulk free of a constructed freelist */ | ||
| 1068 | static noinline struct kmem_cache_node *free_debug_processing( | 1069 | static noinline struct kmem_cache_node *free_debug_processing( |
| 1069 | struct kmem_cache *s, struct page *page, void *object, | 1070 | struct kmem_cache *s, struct page *page, |
| 1071 | void *head, void *tail, int bulk_cnt, | ||
| 1070 | unsigned long addr, unsigned long *flags) | 1072 | unsigned long addr, unsigned long *flags) |
| 1071 | { | 1073 | { |
| 1072 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1074 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
| 1075 | void *object = head; | ||
| 1076 | int cnt = 0; | ||
| 1073 | 1077 | ||
| 1074 | spin_lock_irqsave(&n->list_lock, *flags); | 1078 | spin_lock_irqsave(&n->list_lock, *flags); |
| 1075 | slab_lock(page); | 1079 | slab_lock(page); |
| @@ -1077,6 +1081,9 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
| 1077 | if (!check_slab(s, page)) | 1081 | if (!check_slab(s, page)) |
| 1078 | goto fail; | 1082 | goto fail; |
| 1079 | 1083 | ||
| 1084 | next_object: | ||
| 1085 | cnt++; | ||
| 1086 | |||
| 1080 | if (!check_valid_pointer(s, page, object)) { | 1087 | if (!check_valid_pointer(s, page, object)) { |
| 1081 | slab_err(s, page, "Invalid object pointer 0x%p", object); | 1088 | slab_err(s, page, "Invalid object pointer 0x%p", object); |
| 1082 | goto fail; | 1089 | goto fail; |
| @@ -1107,8 +1114,19 @@ static noinline struct kmem_cache_node *free_debug_processing( | |||
| 1107 | if (s->flags & SLAB_STORE_USER) | 1114 | if (s->flags & SLAB_STORE_USER) |
| 1108 | set_track(s, object, TRACK_FREE, addr); | 1115 | set_track(s, object, TRACK_FREE, addr); |
| 1109 | trace(s, page, object, 0); | 1116 | trace(s, page, object, 0); |
| 1117 | /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ | ||
| 1110 | init_object(s, object, SLUB_RED_INACTIVE); | 1118 | init_object(s, object, SLUB_RED_INACTIVE); |
| 1119 | |||
| 1120 | /* Reached end of constructed freelist yet? */ | ||
| 1121 | if (object != tail) { | ||
| 1122 | object = get_freepointer(s, object); | ||
| 1123 | goto next_object; | ||
| 1124 | } | ||
| 1111 | out: | 1125 | out: |
| 1126 | if (cnt != bulk_cnt) | ||
| 1127 | slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", | ||
| 1128 | bulk_cnt, cnt); | ||
| 1129 | |||
| 1112 | slab_unlock(page); | 1130 | slab_unlock(page); |
| 1113 | /* | 1131 | /* |
| 1114 | * Keep node_lock to preserve integrity | 1132 | * Keep node_lock to preserve integrity |
| @@ -1204,7 +1222,7 @@ unsigned long kmem_cache_flags(unsigned long object_size, | |||
| 1204 | 1222 | ||
| 1205 | return flags; | 1223 | return flags; |
| 1206 | } | 1224 | } |
| 1207 | #else | 1225 | #else /* !CONFIG_SLUB_DEBUG */ |
| 1208 | static inline void setup_object_debug(struct kmem_cache *s, | 1226 | static inline void setup_object_debug(struct kmem_cache *s, |
| 1209 | struct page *page, void *object) {} | 1227 | struct page *page, void *object) {} |
| 1210 | 1228 | ||
| @@ -1212,7 +1230,8 @@ static inline int alloc_debug_processing(struct kmem_cache *s, | |||
| 1212 | struct page *page, void *object, unsigned long addr) { return 0; } | 1230 | struct page *page, void *object, unsigned long addr) { return 0; } |
| 1213 | 1231 | ||
| 1214 | static inline struct kmem_cache_node *free_debug_processing( | 1232 | static inline struct kmem_cache_node *free_debug_processing( |
| 1215 | struct kmem_cache *s, struct page *page, void *object, | 1233 | struct kmem_cache *s, struct page *page, |
| 1234 | void *head, void *tail, int bulk_cnt, | ||
| 1216 | unsigned long addr, unsigned long *flags) { return NULL; } | 1235 | unsigned long addr, unsigned long *flags) { return NULL; } |
| 1217 | 1236 | ||
| 1218 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) | 1237 | static inline int slab_pad_check(struct kmem_cache *s, struct page *page) |
| @@ -1273,14 +1292,21 @@ static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |||
| 1273 | return memcg_kmem_get_cache(s, flags); | 1292 | return memcg_kmem_get_cache(s, flags); |
| 1274 | } | 1293 | } |
| 1275 | 1294 | ||
| 1276 | static inline void slab_post_alloc_hook(struct kmem_cache *s, | 1295 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, |
| 1277 | gfp_t flags, void *object) | 1296 | size_t size, void **p) |
| 1278 | { | 1297 | { |
| 1298 | size_t i; | ||
| 1299 | |||
| 1279 | flags &= gfp_allowed_mask; | 1300 | flags &= gfp_allowed_mask; |
| 1280 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | 1301 | for (i = 0; i < size; i++) { |
| 1281 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); | 1302 | void *object = p[i]; |
| 1303 | |||
| 1304 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | ||
| 1305 | kmemleak_alloc_recursive(object, s->object_size, 1, | ||
| 1306 | s->flags, flags); | ||
| 1307 | kasan_slab_alloc(s, object); | ||
| 1308 | } | ||
| 1282 | memcg_kmem_put_cache(s); | 1309 | memcg_kmem_put_cache(s); |
| 1283 | kasan_slab_alloc(s, object); | ||
| 1284 | } | 1310 | } |
| 1285 | 1311 | ||
| 1286 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | 1312 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
| @@ -1308,6 +1334,29 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) | |||
| 1308 | kasan_slab_free(s, x); | 1334 | kasan_slab_free(s, x); |
| 1309 | } | 1335 | } |
| 1310 | 1336 | ||
| 1337 | static inline void slab_free_freelist_hook(struct kmem_cache *s, | ||
| 1338 | void *head, void *tail) | ||
| 1339 | { | ||
| 1340 | /* | ||
| 1341 | * Compiler cannot detect this function can be removed if slab_free_hook() | ||
| 1342 | * evaluates to nothing. Thus, catch all relevant config debug options here. | ||
| 1343 | */ | ||
| 1344 | #if defined(CONFIG_KMEMCHECK) || \ | ||
| 1345 | defined(CONFIG_LOCKDEP) || \ | ||
| 1346 | defined(CONFIG_DEBUG_KMEMLEAK) || \ | ||
| 1347 | defined(CONFIG_DEBUG_OBJECTS_FREE) || \ | ||
| 1348 | defined(CONFIG_KASAN) | ||
| 1349 | |||
| 1350 | void *object = head; | ||
| 1351 | void *tail_obj = tail ? : head; | ||
| 1352 | |||
| 1353 | do { | ||
| 1354 | slab_free_hook(s, object); | ||
| 1355 | } while ((object != tail_obj) && | ||
| 1356 | (object = get_freepointer(s, object))); | ||
| 1357 | #endif | ||
| 1358 | } | ||
| 1359 | |||
| 1311 | static void setup_object(struct kmem_cache *s, struct page *page, | 1360 | static void setup_object(struct kmem_cache *s, struct page *page, |
| 1312 | void *object) | 1361 | void *object) |
| 1313 | { | 1362 | { |
| @@ -2295,23 +2344,15 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) | |||
| 2295 | * And if we were unable to get a new slab from the partial slab lists then | 2344 | * And if we were unable to get a new slab from the partial slab lists then |
| 2296 | * we need to allocate a new slab. This is the slowest path since it involves | 2345 | * we need to allocate a new slab. This is the slowest path since it involves |
| 2297 | * a call to the page allocator and the setup of a new slab. | 2346 | * a call to the page allocator and the setup of a new slab. |
| 2347 | * | ||
| 2348 | * Version of __slab_alloc to use when we know that interrupts are | ||
| 2349 | * already disabled (which is the case for bulk allocation). | ||
| 2298 | */ | 2350 | */ |
| 2299 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | 2351 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
| 2300 | unsigned long addr, struct kmem_cache_cpu *c) | 2352 | unsigned long addr, struct kmem_cache_cpu *c) |
| 2301 | { | 2353 | { |
| 2302 | void *freelist; | 2354 | void *freelist; |
| 2303 | struct page *page; | 2355 | struct page *page; |
| 2304 | unsigned long flags; | ||
| 2305 | |||
| 2306 | local_irq_save(flags); | ||
| 2307 | #ifdef CONFIG_PREEMPT | ||
| 2308 | /* | ||
| 2309 | * We may have been preempted and rescheduled on a different | ||
| 2310 | * cpu before disabling interrupts. Need to reload cpu area | ||
| 2311 | * pointer. | ||
| 2312 | */ | ||
| 2313 | c = this_cpu_ptr(s->cpu_slab); | ||
| 2314 | #endif | ||
| 2315 | 2356 | ||
| 2316 | page = c->page; | 2357 | page = c->page; |
| 2317 | if (!page) | 2358 | if (!page) |
| @@ -2369,7 +2410,6 @@ load_freelist: | |||
| 2369 | VM_BUG_ON(!c->page->frozen); | 2410 | VM_BUG_ON(!c->page->frozen); |
| 2370 | c->freelist = get_freepointer(s, freelist); | 2411 | c->freelist = get_freepointer(s, freelist); |
| 2371 | c->tid = next_tid(c->tid); | 2412 | c->tid = next_tid(c->tid); |
| 2372 | local_irq_restore(flags); | ||
| 2373 | return freelist; | 2413 | return freelist; |
| 2374 | 2414 | ||
| 2375 | new_slab: | 2415 | new_slab: |
| @@ -2386,7 +2426,6 @@ new_slab: | |||
| 2386 | 2426 | ||
| 2387 | if (unlikely(!freelist)) { | 2427 | if (unlikely(!freelist)) { |
| 2388 | slab_out_of_memory(s, gfpflags, node); | 2428 | slab_out_of_memory(s, gfpflags, node); |
| 2389 | local_irq_restore(flags); | ||
| 2390 | return NULL; | 2429 | return NULL; |
| 2391 | } | 2430 | } |
| 2392 | 2431 | ||
| @@ -2402,11 +2441,35 @@ new_slab: | |||
| 2402 | deactivate_slab(s, page, get_freepointer(s, freelist)); | 2441 | deactivate_slab(s, page, get_freepointer(s, freelist)); |
| 2403 | c->page = NULL; | 2442 | c->page = NULL; |
| 2404 | c->freelist = NULL; | 2443 | c->freelist = NULL; |
| 2405 | local_irq_restore(flags); | ||
| 2406 | return freelist; | 2444 | return freelist; |
| 2407 | } | 2445 | } |
| 2408 | 2446 | ||
| 2409 | /* | 2447 | /* |
| 2448 | * Another one that disabled interrupt and compensates for possible | ||
| 2449 | * cpu changes by refetching the per cpu area pointer. | ||
| 2450 | */ | ||
| 2451 | static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, | ||
| 2452 | unsigned long addr, struct kmem_cache_cpu *c) | ||
| 2453 | { | ||
| 2454 | void *p; | ||
| 2455 | unsigned long flags; | ||
| 2456 | |||
| 2457 | local_irq_save(flags); | ||
| 2458 | #ifdef CONFIG_PREEMPT | ||
| 2459 | /* | ||
| 2460 | * We may have been preempted and rescheduled on a different | ||
| 2461 | * cpu before disabling interrupts. Need to reload cpu area | ||
| 2462 | * pointer. | ||
| 2463 | */ | ||
| 2464 | c = this_cpu_ptr(s->cpu_slab); | ||
| 2465 | #endif | ||
| 2466 | |||
| 2467 | p = ___slab_alloc(s, gfpflags, node, addr, c); | ||
| 2468 | local_irq_restore(flags); | ||
| 2469 | return p; | ||
| 2470 | } | ||
| 2471 | |||
| 2472 | /* | ||
| 2410 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) | 2473 | * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc) |
| 2411 | * have the fastpath folded into their functions. So no function call | 2474 | * have the fastpath folded into their functions. So no function call |
| 2412 | * overhead for requests that can be satisfied on the fastpath. | 2475 | * overhead for requests that can be satisfied on the fastpath. |
| @@ -2419,7 +2482,7 @@ new_slab: | |||
| 2419 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, | 2482 | static __always_inline void *slab_alloc_node(struct kmem_cache *s, |
| 2420 | gfp_t gfpflags, int node, unsigned long addr) | 2483 | gfp_t gfpflags, int node, unsigned long addr) |
| 2421 | { | 2484 | { |
| 2422 | void **object; | 2485 | void *object; |
| 2423 | struct kmem_cache_cpu *c; | 2486 | struct kmem_cache_cpu *c; |
| 2424 | struct page *page; | 2487 | struct page *page; |
| 2425 | unsigned long tid; | 2488 | unsigned long tid; |
| @@ -2498,7 +2561,7 @@ redo: | |||
| 2498 | if (unlikely(gfpflags & __GFP_ZERO) && object) | 2561 | if (unlikely(gfpflags & __GFP_ZERO) && object) |
| 2499 | memset(object, 0, s->object_size); | 2562 | memset(object, 0, s->object_size); |
| 2500 | 2563 | ||
| 2501 | slab_post_alloc_hook(s, gfpflags, object); | 2564 | slab_post_alloc_hook(s, gfpflags, 1, &object); |
| 2502 | 2565 | ||
| 2503 | return object; | 2566 | return object; |
| 2504 | } | 2567 | } |
| @@ -2569,10 +2632,11 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace); | |||
| 2569 | * handling required then we can return immediately. | 2632 | * handling required then we can return immediately. |
| 2570 | */ | 2633 | */ |
| 2571 | static void __slab_free(struct kmem_cache *s, struct page *page, | 2634 | static void __slab_free(struct kmem_cache *s, struct page *page, |
| 2572 | void *x, unsigned long addr) | 2635 | void *head, void *tail, int cnt, |
| 2636 | unsigned long addr) | ||
| 2637 | |||
| 2573 | { | 2638 | { |
| 2574 | void *prior; | 2639 | void *prior; |
| 2575 | void **object = (void *)x; | ||
| 2576 | int was_frozen; | 2640 | int was_frozen; |
| 2577 | struct page new; | 2641 | struct page new; |
| 2578 | unsigned long counters; | 2642 | unsigned long counters; |
| @@ -2582,7 +2646,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2582 | stat(s, FREE_SLOWPATH); | 2646 | stat(s, FREE_SLOWPATH); |
| 2583 | 2647 | ||
| 2584 | if (kmem_cache_debug(s) && | 2648 | if (kmem_cache_debug(s) && |
| 2585 | !(n = free_debug_processing(s, page, x, addr, &flags))) | 2649 | !(n = free_debug_processing(s, page, head, tail, cnt, |
| 2650 | addr, &flags))) | ||
| 2586 | return; | 2651 | return; |
| 2587 | 2652 | ||
| 2588 | do { | 2653 | do { |
| @@ -2592,10 +2657,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2592 | } | 2657 | } |
| 2593 | prior = page->freelist; | 2658 | prior = page->freelist; |
| 2594 | counters = page->counters; | 2659 | counters = page->counters; |
| 2595 | set_freepointer(s, object, prior); | 2660 | set_freepointer(s, tail, prior); |
| 2596 | new.counters = counters; | 2661 | new.counters = counters; |
| 2597 | was_frozen = new.frozen; | 2662 | was_frozen = new.frozen; |
| 2598 | new.inuse--; | 2663 | new.inuse -= cnt; |
| 2599 | if ((!new.inuse || !prior) && !was_frozen) { | 2664 | if ((!new.inuse || !prior) && !was_frozen) { |
| 2600 | 2665 | ||
| 2601 | if (kmem_cache_has_cpu_partial(s) && !prior) { | 2666 | if (kmem_cache_has_cpu_partial(s) && !prior) { |
| @@ -2626,7 +2691,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
| 2626 | 2691 | ||
| 2627 | } while (!cmpxchg_double_slab(s, page, | 2692 | } while (!cmpxchg_double_slab(s, page, |
| 2628 | prior, counters, | 2693 | prior, counters, |
| 2629 | object, new.counters, | 2694 | head, new.counters, |
| 2630 | "__slab_free")); | 2695 | "__slab_free")); |
| 2631 | 2696 | ||
| 2632 | if (likely(!n)) { | 2697 | if (likely(!n)) { |
| @@ -2691,15 +2756,20 @@ slab_empty: | |||
| 2691 | * | 2756 | * |
| 2692 | * If fastpath is not possible then fall back to __slab_free where we deal | 2757 | * If fastpath is not possible then fall back to __slab_free where we deal |
| 2693 | * with all sorts of special processing. | 2758 | * with all sorts of special processing. |
| 2759 | * | ||
| 2760 | * Bulk free of a freelist with several objects (all pointing to the | ||
| 2761 | * same page) possible by specifying head and tail ptr, plus objects | ||
| 2762 | * count (cnt). Bulk free indicated by tail pointer being set. | ||
| 2694 | */ | 2763 | */ |
| 2695 | static __always_inline void slab_free(struct kmem_cache *s, | 2764 | static __always_inline void slab_free(struct kmem_cache *s, struct page *page, |
| 2696 | struct page *page, void *x, unsigned long addr) | 2765 | void *head, void *tail, int cnt, |
| 2766 | unsigned long addr) | ||
| 2697 | { | 2767 | { |
| 2698 | void **object = (void *)x; | 2768 | void *tail_obj = tail ? : head; |
| 2699 | struct kmem_cache_cpu *c; | 2769 | struct kmem_cache_cpu *c; |
| 2700 | unsigned long tid; | 2770 | unsigned long tid; |
| 2701 | 2771 | ||
| 2702 | slab_free_hook(s, x); | 2772 | slab_free_freelist_hook(s, head, tail); |
| 2703 | 2773 | ||
| 2704 | redo: | 2774 | redo: |
| 2705 | /* | 2775 | /* |
| @@ -2718,19 +2788,19 @@ redo: | |||
| 2718 | barrier(); | 2788 | barrier(); |
| 2719 | 2789 | ||
| 2720 | if (likely(page == c->page)) { | 2790 | if (likely(page == c->page)) { |
| 2721 | set_freepointer(s, object, c->freelist); | 2791 | set_freepointer(s, tail_obj, c->freelist); |
| 2722 | 2792 | ||
| 2723 | if (unlikely(!this_cpu_cmpxchg_double( | 2793 | if (unlikely(!this_cpu_cmpxchg_double( |
| 2724 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2794 | s->cpu_slab->freelist, s->cpu_slab->tid, |
| 2725 | c->freelist, tid, | 2795 | c->freelist, tid, |
| 2726 | object, next_tid(tid)))) { | 2796 | head, next_tid(tid)))) { |
| 2727 | 2797 | ||
| 2728 | note_cmpxchg_failure("slab_free", s, tid); | 2798 | note_cmpxchg_failure("slab_free", s, tid); |
| 2729 | goto redo; | 2799 | goto redo; |
| 2730 | } | 2800 | } |
| 2731 | stat(s, FREE_FASTPATH); | 2801 | stat(s, FREE_FASTPATH); |
| 2732 | } else | 2802 | } else |
| 2733 | __slab_free(s, page, x, addr); | 2803 | __slab_free(s, page, head, tail_obj, cnt, addr); |
| 2734 | 2804 | ||
| 2735 | } | 2805 | } |
| 2736 | 2806 | ||
| @@ -2739,59 +2809,116 @@ void kmem_cache_free(struct kmem_cache *s, void *x) | |||
| 2739 | s = cache_from_obj(s, x); | 2809 | s = cache_from_obj(s, x); |
| 2740 | if (!s) | 2810 | if (!s) |
| 2741 | return; | 2811 | return; |
| 2742 | slab_free(s, virt_to_head_page(x), x, _RET_IP_); | 2812 | slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_); |
| 2743 | trace_kmem_cache_free(_RET_IP_, x); | 2813 | trace_kmem_cache_free(_RET_IP_, x); |
| 2744 | } | 2814 | } |
| 2745 | EXPORT_SYMBOL(kmem_cache_free); | 2815 | EXPORT_SYMBOL(kmem_cache_free); |
| 2746 | 2816 | ||
| 2747 | /* Note that interrupts must be enabled when calling this function. */ | 2817 | struct detached_freelist { |
| 2748 | void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) | ||
| 2749 | { | ||
| 2750 | struct kmem_cache_cpu *c; | ||
| 2751 | struct page *page; | 2818 | struct page *page; |
| 2752 | int i; | 2819 | void *tail; |
| 2820 | void *freelist; | ||
| 2821 | int cnt; | ||
| 2822 | }; | ||
| 2753 | 2823 | ||
| 2754 | local_irq_disable(); | 2824 | /* |
| 2755 | c = this_cpu_ptr(s->cpu_slab); | 2825 | * This function progressively scans the array with free objects (with |
| 2826 | * a limited look ahead) and extract objects belonging to the same | ||
| 2827 | * page. It builds a detached freelist directly within the given | ||
| 2828 | * page/objects. This can happen without any need for | ||
| 2829 | * synchronization, because the objects are owned by running process. | ||
| 2830 | * The freelist is build up as a single linked list in the objects. | ||
| 2831 | * The idea is, that this detached freelist can then be bulk | ||
| 2832 | * transferred to the real freelist(s), but only requiring a single | ||
| 2833 | * synchronization primitive. Look ahead in the array is limited due | ||
| 2834 | * to performance reasons. | ||
| 2835 | */ | ||
| 2836 | static int build_detached_freelist(struct kmem_cache *s, size_t size, | ||
| 2837 | void **p, struct detached_freelist *df) | ||
| 2838 | { | ||
| 2839 | size_t first_skipped_index = 0; | ||
| 2840 | int lookahead = 3; | ||
| 2841 | void *object; | ||
| 2756 | 2842 | ||
| 2757 | for (i = 0; i < size; i++) { | 2843 | /* Always re-init detached_freelist */ |
| 2758 | void *object = p[i]; | 2844 | df->page = NULL; |
| 2759 | 2845 | ||
| 2760 | BUG_ON(!object); | 2846 | do { |
| 2761 | /* kmem cache debug support */ | 2847 | object = p[--size]; |
| 2762 | s = cache_from_obj(s, object); | 2848 | } while (!object && size); |
| 2763 | if (unlikely(!s)) | ||
| 2764 | goto exit; | ||
| 2765 | slab_free_hook(s, object); | ||
| 2766 | 2849 | ||
| 2767 | page = virt_to_head_page(object); | 2850 | if (!object) |
| 2851 | return 0; | ||
| 2768 | 2852 | ||
| 2769 | if (c->page == page) { | 2853 | /* Start new detached freelist */ |
| 2770 | /* Fastpath: local CPU free */ | 2854 | set_freepointer(s, object, NULL); |
| 2771 | set_freepointer(s, object, c->freelist); | 2855 | df->page = virt_to_head_page(object); |
| 2772 | c->freelist = object; | 2856 | df->tail = object; |
| 2773 | } else { | 2857 | df->freelist = object; |
| 2774 | c->tid = next_tid(c->tid); | 2858 | p[size] = NULL; /* mark object processed */ |
| 2775 | local_irq_enable(); | 2859 | df->cnt = 1; |
| 2776 | /* Slowpath: overhead locked cmpxchg_double_slab */ | 2860 | |
| 2777 | __slab_free(s, page, object, _RET_IP_); | 2861 | while (size) { |
| 2778 | local_irq_disable(); | 2862 | object = p[--size]; |
| 2779 | c = this_cpu_ptr(s->cpu_slab); | 2863 | if (!object) |
| 2864 | continue; /* Skip processed objects */ | ||
| 2865 | |||
| 2866 | /* df->page is always set at this point */ | ||
| 2867 | if (df->page == virt_to_head_page(object)) { | ||
| 2868 | /* Opportunity build freelist */ | ||
| 2869 | set_freepointer(s, object, df->freelist); | ||
| 2870 | df->freelist = object; | ||
| 2871 | df->cnt++; | ||
| 2872 | p[size] = NULL; /* mark object processed */ | ||
| 2873 | |||
| 2874 | continue; | ||
| 2780 | } | 2875 | } |
| 2876 | |||
| 2877 | /* Limit look ahead search */ | ||
| 2878 | if (!--lookahead) | ||
| 2879 | break; | ||
| 2880 | |||
| 2881 | if (!first_skipped_index) | ||
| 2882 | first_skipped_index = size + 1; | ||
| 2781 | } | 2883 | } |
| 2782 | exit: | 2884 | |
| 2783 | c->tid = next_tid(c->tid); | 2885 | return first_skipped_index; |
| 2784 | local_irq_enable(); | 2886 | } |
| 2887 | |||
| 2888 | |||
| 2889 | /* Note that interrupts must be enabled when calling this function. */ | ||
| 2890 | void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) | ||
| 2891 | { | ||
| 2892 | if (WARN_ON(!size)) | ||
| 2893 | return; | ||
| 2894 | |||
| 2895 | do { | ||
| 2896 | struct detached_freelist df; | ||
| 2897 | struct kmem_cache *s; | ||
| 2898 | |||
| 2899 | /* Support for memcg */ | ||
| 2900 | s = cache_from_obj(orig_s, p[size - 1]); | ||
| 2901 | |||
| 2902 | size = build_detached_freelist(s, size, p, &df); | ||
| 2903 | if (unlikely(!df.page)) | ||
| 2904 | continue; | ||
| 2905 | |||
| 2906 | slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); | ||
| 2907 | } while (likely(size)); | ||
| 2785 | } | 2908 | } |
| 2786 | EXPORT_SYMBOL(kmem_cache_free_bulk); | 2909 | EXPORT_SYMBOL(kmem_cache_free_bulk); |
| 2787 | 2910 | ||
| 2788 | /* Note that interrupts must be enabled when calling this function. */ | 2911 | /* Note that interrupts must be enabled when calling this function. */ |
| 2789 | bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | 2912 | int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, |
| 2790 | void **p) | 2913 | void **p) |
| 2791 | { | 2914 | { |
| 2792 | struct kmem_cache_cpu *c; | 2915 | struct kmem_cache_cpu *c; |
| 2793 | int i; | 2916 | int i; |
| 2794 | 2917 | ||
| 2918 | /* memcg and kmem_cache debug support */ | ||
| 2919 | s = slab_pre_alloc_hook(s, flags); | ||
| 2920 | if (unlikely(!s)) | ||
| 2921 | return false; | ||
| 2795 | /* | 2922 | /* |
| 2796 | * Drain objects in the per cpu slab, while disabling local | 2923 | * Drain objects in the per cpu slab, while disabling local |
| 2797 | * IRQs, which protects against PREEMPT and interrupts | 2924 | * IRQs, which protects against PREEMPT and interrupts |
| @@ -2804,36 +2931,20 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
| 2804 | void *object = c->freelist; | 2931 | void *object = c->freelist; |
| 2805 | 2932 | ||
| 2806 | if (unlikely(!object)) { | 2933 | if (unlikely(!object)) { |
| 2807 | local_irq_enable(); | ||
| 2808 | /* | 2934 | /* |
| 2809 | * Invoking slow path likely have side-effect | 2935 | * Invoking slow path likely have side-effect |
| 2810 | * of re-populating per CPU c->freelist | 2936 | * of re-populating per CPU c->freelist |
| 2811 | */ | 2937 | */ |
| 2812 | p[i] = __slab_alloc(s, flags, NUMA_NO_NODE, | 2938 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
| 2813 | _RET_IP_, c); | 2939 | _RET_IP_, c); |
| 2814 | if (unlikely(!p[i])) { | 2940 | if (unlikely(!p[i])) |
| 2815 | __kmem_cache_free_bulk(s, i, p); | 2941 | goto error; |
| 2816 | return false; | 2942 | |
| 2817 | } | ||
| 2818 | local_irq_disable(); | ||
| 2819 | c = this_cpu_ptr(s->cpu_slab); | 2943 | c = this_cpu_ptr(s->cpu_slab); |
| 2820 | continue; /* goto for-loop */ | 2944 | continue; /* goto for-loop */ |
| 2821 | } | 2945 | } |
| 2822 | |||
| 2823 | /* kmem_cache debug support */ | ||
| 2824 | s = slab_pre_alloc_hook(s, flags); | ||
| 2825 | if (unlikely(!s)) { | ||
| 2826 | __kmem_cache_free_bulk(s, i, p); | ||
| 2827 | c->tid = next_tid(c->tid); | ||
| 2828 | local_irq_enable(); | ||
| 2829 | return false; | ||
| 2830 | } | ||
| 2831 | |||
| 2832 | c->freelist = get_freepointer(s, object); | 2946 | c->freelist = get_freepointer(s, object); |
| 2833 | p[i] = object; | 2947 | p[i] = object; |
| 2834 | |||
| 2835 | /* kmem_cache debug support */ | ||
| 2836 | slab_post_alloc_hook(s, flags, object); | ||
| 2837 | } | 2948 | } |
| 2838 | c->tid = next_tid(c->tid); | 2949 | c->tid = next_tid(c->tid); |
| 2839 | local_irq_enable(); | 2950 | local_irq_enable(); |
| @@ -2846,7 +2957,14 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, | |||
| 2846 | memset(p[j], 0, s->object_size); | 2957 | memset(p[j], 0, s->object_size); |
| 2847 | } | 2958 | } |
| 2848 | 2959 | ||
| 2849 | return true; | 2960 | /* memcg and kmem_cache debug support */ |
| 2961 | slab_post_alloc_hook(s, flags, size, p); | ||
| 2962 | return i; | ||
| 2963 | error: | ||
| 2964 | local_irq_enable(); | ||
| 2965 | slab_post_alloc_hook(s, flags, i, p); | ||
| 2966 | __kmem_cache_free_bulk(s, i, p); | ||
| 2967 | return 0; | ||
| 2850 | } | 2968 | } |
| 2851 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); | 2969 | EXPORT_SYMBOL(kmem_cache_alloc_bulk); |
| 2852 | 2970 | ||
| @@ -3511,7 +3629,7 @@ void kfree(const void *x) | |||
| 3511 | __free_kmem_pages(page, compound_order(page)); | 3629 | __free_kmem_pages(page, compound_order(page)); |
| 3512 | return; | 3630 | return; |
| 3513 | } | 3631 | } |
| 3514 | slab_free(page->slab_cache, page, object, _RET_IP_); | 3632 | slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_); |
| 3515 | } | 3633 | } |
| 3516 | EXPORT_SYMBOL(kfree); | 3634 | EXPORT_SYMBOL(kfree); |
| 3517 | 3635 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d04563480c94..8e3c9c5a3042 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1443,7 +1443,6 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
| 1443 | vmap_debug_free_range(va->va_start, va->va_end); | 1443 | vmap_debug_free_range(va->va_start, va->va_end); |
| 1444 | kasan_free_shadow(vm); | 1444 | kasan_free_shadow(vm); |
| 1445 | free_unmap_vmap_area(va); | 1445 | free_unmap_vmap_area(va); |
| 1446 | vm->size -= PAGE_SIZE; | ||
| 1447 | 1446 | ||
| 1448 | return vm; | 1447 | return vm; |
| 1449 | } | 1448 | } |
| @@ -1468,8 +1467,8 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
| 1468 | return; | 1467 | return; |
| 1469 | } | 1468 | } |
| 1470 | 1469 | ||
| 1471 | debug_check_no_locks_freed(addr, area->size); | 1470 | debug_check_no_locks_freed(addr, get_vm_area_size(area)); |
| 1472 | debug_check_no_obj_freed(addr, area->size); | 1471 | debug_check_no_obj_freed(addr, get_vm_area_size(area)); |
| 1473 | 1472 | ||
| 1474 | if (deallocate_pages) { | 1473 | if (deallocate_pages) { |
| 1475 | int i; | 1474 | int i; |
diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 125b906cd1d4..638a38e1b419 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc | |||
| @@ -2711,7 +2711,7 @@ $kernelversion = get_kernel_version(); | |||
| 2711 | 2711 | ||
| 2712 | # generate a sequence of code that will splice in highlighting information | 2712 | # generate a sequence of code that will splice in highlighting information |
| 2713 | # using the s// operator. | 2713 | # using the s// operator. |
| 2714 | foreach my $k (keys @highlights) { | 2714 | for (my $k = 0; $k < @highlights; $k++) { |
| 2715 | my $pattern = $highlights[$k][0]; | 2715 | my $pattern = $highlights[$k][0]; |
| 2716 | my $result = $highlights[$k][1]; | 2716 | my $result = $highlights[$k][1]; |
| 2717 | # print STDERR "scanning pattern:$pattern, highlight:($result)\n"; | 2717 | # print STDERR "scanning pattern:$pattern, highlight:($result)\n"; |
diff --git a/tools/Makefile b/tools/Makefile index d6f307dfb1a3..7dc820a8c1f1 100644 --- a/tools/Makefile +++ b/tools/Makefile | |||
| @@ -32,6 +32,10 @@ help: | |||
| 32 | @echo ' from the kernel command line to build and install one of' | 32 | @echo ' from the kernel command line to build and install one of' |
| 33 | @echo ' the tools above' | 33 | @echo ' the tools above' |
| 34 | @echo '' | 34 | @echo '' |
| 35 | @echo ' $$ make tools/all' | ||
| 36 | @echo '' | ||
| 37 | @echo ' builds all tools.' | ||
| 38 | @echo '' | ||
| 35 | @echo ' $$ make tools/install' | 39 | @echo ' $$ make tools/install' |
| 36 | @echo '' | 40 | @echo '' |
| 37 | @echo ' installs all tools.' | 41 | @echo ' installs all tools.' |
| @@ -77,6 +81,11 @@ tmon: FORCE | |||
| 77 | freefall: FORCE | 81 | freefall: FORCE |
| 78 | $(call descend,laptop/$@) | 82 | $(call descend,laptop/$@) |
| 79 | 83 | ||
| 84 | all: acpi cgroup cpupower hv firewire lguest \ | ||
| 85 | perf selftests turbostat usb \ | ||
| 86 | virtio vm net x86_energy_perf_policy \ | ||
| 87 | tmon freefall | ||
| 88 | |||
| 80 | acpi_install: | 89 | acpi_install: |
| 81 | $(call descend,power/$(@:_install=),install) | 90 | $(call descend,power/$(@:_install=),install) |
| 82 | 91 | ||
| @@ -101,7 +110,7 @@ freefall_install: | |||
| 101 | install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ | 110 | install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \ |
| 102 | perf_install selftests_install turbostat_install usb_install \ | 111 | perf_install selftests_install turbostat_install usb_install \ |
| 103 | virtio_install vm_install net_install x86_energy_perf_policy_install \ | 112 | virtio_install vm_install net_install x86_energy_perf_policy_install \ |
| 104 | tmon freefall_install | 113 | tmon_install freefall_install |
| 105 | 114 | ||
| 106 | acpi_clean: | 115 | acpi_clean: |
| 107 | $(call descend,power/acpi,clean) | 116 | $(call descend,power/acpi,clean) |
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index 0a945d2e8ca5..99d127fe9c35 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c | |||
| @@ -675,6 +675,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 675 | .fork = perf_event__repipe, | 675 | .fork = perf_event__repipe, |
| 676 | .exit = perf_event__repipe, | 676 | .exit = perf_event__repipe, |
| 677 | .lost = perf_event__repipe, | 677 | .lost = perf_event__repipe, |
| 678 | .lost_samples = perf_event__repipe, | ||
| 678 | .aux = perf_event__repipe, | 679 | .aux = perf_event__repipe, |
| 679 | .itrace_start = perf_event__repipe, | 680 | .itrace_start = perf_event__repipe, |
| 680 | .context_switch = perf_event__repipe, | 681 | .context_switch = perf_event__repipe, |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 2853ad2bd435..f256fac1e722 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | struct report { | 44 | struct report { |
| 45 | struct perf_tool tool; | 45 | struct perf_tool tool; |
| 46 | struct perf_session *session; | 46 | struct perf_session *session; |
| 47 | bool force, use_tui, use_gtk, use_stdio; | 47 | bool use_tui, use_gtk, use_stdio; |
| 48 | bool hide_unresolved; | 48 | bool hide_unresolved; |
| 49 | bool dont_use_callchains; | 49 | bool dont_use_callchains; |
| 50 | bool show_full_info; | 50 | bool show_full_info; |
| @@ -678,7 +678,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 678 | "file", "vmlinux pathname"), | 678 | "file", "vmlinux pathname"), |
| 679 | OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, | 679 | OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, |
| 680 | "file", "kallsyms pathname"), | 680 | "file", "kallsyms pathname"), |
| 681 | OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"), | 681 | OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"), |
| 682 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, | 682 | OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, |
| 683 | "load module symbols - WARNING: use only with -k and LIVE kernel"), | 683 | "load module symbols - WARNING: use only with -k and LIVE kernel"), |
| 684 | OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, | 684 | OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, |
| @@ -832,7 +832,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) | |||
| 832 | } | 832 | } |
| 833 | 833 | ||
| 834 | file.path = input_name; | 834 | file.path = input_name; |
| 835 | file.force = report.force; | 835 | file.force = symbol_conf.force; |
| 836 | 836 | ||
| 837 | repeat: | 837 | repeat: |
| 838 | session = perf_session__new(&file, false, &report.tool); | 838 | session = perf_session__new(&file, false, &report.tool); |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index e5afb8936040..fa9eb92c9e24 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
| @@ -1430,7 +1430,6 @@ close_file_and_continue: | |||
| 1430 | 1430 | ||
| 1431 | struct popup_action { | 1431 | struct popup_action { |
| 1432 | struct thread *thread; | 1432 | struct thread *thread; |
| 1433 | struct dso *dso; | ||
| 1434 | struct map_symbol ms; | 1433 | struct map_symbol ms; |
| 1435 | int socket; | 1434 | int socket; |
| 1436 | 1435 | ||
| @@ -1565,7 +1564,6 @@ add_dso_opt(struct hist_browser *browser, struct popup_action *act, | |||
| 1565 | return 0; | 1564 | return 0; |
| 1566 | 1565 | ||
| 1567 | act->ms.map = map; | 1566 | act->ms.map = map; |
| 1568 | act->dso = map->dso; | ||
| 1569 | act->fn = do_zoom_dso; | 1567 | act->fn = do_zoom_dso; |
| 1570 | return 1; | 1568 | return 1; |
| 1571 | } | 1569 | } |
| @@ -1827,7 +1825,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1827 | 1825 | ||
| 1828 | while (1) { | 1826 | while (1) { |
| 1829 | struct thread *thread = NULL; | 1827 | struct thread *thread = NULL; |
| 1830 | struct dso *dso = NULL; | ||
| 1831 | struct map *map = NULL; | 1828 | struct map *map = NULL; |
| 1832 | int choice = 0; | 1829 | int choice = 0; |
| 1833 | int socked_id = -1; | 1830 | int socked_id = -1; |
| @@ -1839,8 +1836,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1839 | if (browser->he_selection != NULL) { | 1836 | if (browser->he_selection != NULL) { |
| 1840 | thread = hist_browser__selected_thread(browser); | 1837 | thread = hist_browser__selected_thread(browser); |
| 1841 | map = browser->selection->map; | 1838 | map = browser->selection->map; |
| 1842 | if (map) | ||
| 1843 | dso = map->dso; | ||
| 1844 | socked_id = browser->he_selection->socket; | 1839 | socked_id = browser->he_selection->socket; |
| 1845 | } | 1840 | } |
| 1846 | switch (key) { | 1841 | switch (key) { |
| @@ -1874,7 +1869,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, | |||
| 1874 | hist_browser__dump(browser); | 1869 | hist_browser__dump(browser); |
| 1875 | continue; | 1870 | continue; |
| 1876 | case 'd': | 1871 | case 'd': |
| 1877 | actions->dso = dso; | 1872 | actions->ms.map = map; |
| 1878 | do_zoom_dso(browser, actions); | 1873 | do_zoom_dso(browser, actions); |
| 1879 | continue; | 1874 | continue; |
| 1880 | case 'V': | 1875 | case 'V': |
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index d909459fb54c..217b5a60e2ab 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c | |||
| @@ -76,6 +76,7 @@ struct perf_tool build_id__mark_dso_hit_ops = { | |||
| 76 | .exit = perf_event__exit_del_thread, | 76 | .exit = perf_event__exit_del_thread, |
| 77 | .attr = perf_event__process_attr, | 77 | .attr = perf_event__process_attr, |
| 78 | .build_id = perf_event__process_build_id, | 78 | .build_id = perf_event__process_build_id, |
| 79 | .ordered_events = true, | ||
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 81 | int build_id__sprintf(const u8 *build_id, int len, char *bf) | 82 | int build_id__sprintf(const u8 *build_id, int len, char *bf) |
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 7c0c08386a1d..425df5c86c9c 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c | |||
| @@ -933,6 +933,7 @@ static struct dso *__dso__findlink_by_longname(struct rb_root *root, | |||
| 933 | /* Add new node and rebalance tree */ | 933 | /* Add new node and rebalance tree */ |
| 934 | rb_link_node(&dso->rb_node, parent, p); | 934 | rb_link_node(&dso->rb_node, parent, p); |
| 935 | rb_insert_color(&dso->rb_node, root); | 935 | rb_insert_color(&dso->rb_node, root); |
| 936 | dso->root = root; | ||
| 936 | } | 937 | } |
| 937 | return NULL; | 938 | return NULL; |
| 938 | } | 939 | } |
| @@ -945,15 +946,30 @@ static inline struct dso *__dso__find_by_longname(struct rb_root *root, | |||
| 945 | 946 | ||
| 946 | void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) | 947 | void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) |
| 947 | { | 948 | { |
| 949 | struct rb_root *root = dso->root; | ||
| 950 | |||
| 948 | if (name == NULL) | 951 | if (name == NULL) |
| 949 | return; | 952 | return; |
| 950 | 953 | ||
| 951 | if (dso->long_name_allocated) | 954 | if (dso->long_name_allocated) |
| 952 | free((char *)dso->long_name); | 955 | free((char *)dso->long_name); |
| 953 | 956 | ||
| 957 | if (root) { | ||
| 958 | rb_erase(&dso->rb_node, root); | ||
| 959 | /* | ||
| 960 | * __dso__findlink_by_longname() isn't guaranteed to add it | ||
| 961 | * back, so a clean removal is required here. | ||
| 962 | */ | ||
| 963 | RB_CLEAR_NODE(&dso->rb_node); | ||
| 964 | dso->root = NULL; | ||
| 965 | } | ||
| 966 | |||
| 954 | dso->long_name = name; | 967 | dso->long_name = name; |
| 955 | dso->long_name_len = strlen(name); | 968 | dso->long_name_len = strlen(name); |
| 956 | dso->long_name_allocated = name_allocated; | 969 | dso->long_name_allocated = name_allocated; |
| 970 | |||
| 971 | if (root) | ||
| 972 | __dso__findlink_by_longname(root, dso, NULL); | ||
| 957 | } | 973 | } |
| 958 | 974 | ||
| 959 | void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) | 975 | void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) |
| @@ -1046,6 +1062,7 @@ struct dso *dso__new(const char *name) | |||
| 1046 | dso->kernel = DSO_TYPE_USER; | 1062 | dso->kernel = DSO_TYPE_USER; |
| 1047 | dso->needs_swap = DSO_SWAP__UNSET; | 1063 | dso->needs_swap = DSO_SWAP__UNSET; |
| 1048 | RB_CLEAR_NODE(&dso->rb_node); | 1064 | RB_CLEAR_NODE(&dso->rb_node); |
| 1065 | dso->root = NULL; | ||
| 1049 | INIT_LIST_HEAD(&dso->node); | 1066 | INIT_LIST_HEAD(&dso->node); |
| 1050 | INIT_LIST_HEAD(&dso->data.open_entry); | 1067 | INIT_LIST_HEAD(&dso->data.open_entry); |
| 1051 | pthread_mutex_init(&dso->lock, NULL); | 1068 | pthread_mutex_init(&dso->lock, NULL); |
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index fc8db9c764ac..45ec4d0a50ed 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h | |||
| @@ -135,6 +135,7 @@ struct dso { | |||
| 135 | pthread_mutex_t lock; | 135 | pthread_mutex_t lock; |
| 136 | struct list_head node; | 136 | struct list_head node; |
| 137 | struct rb_node rb_node; /* rbtree node sorted by long name */ | 137 | struct rb_node rb_node; /* rbtree node sorted by long name */ |
| 138 | struct rb_root *root; /* root of rbtree that rb_node is in */ | ||
| 138 | struct rb_root symbols[MAP__NR_TYPES]; | 139 | struct rb_root symbols[MAP__NR_TYPES]; |
| 139 | struct rb_root symbol_names[MAP__NR_TYPES]; | 140 | struct rb_root symbol_names[MAP__NR_TYPES]; |
| 140 | struct { | 141 | struct { |
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 5ef90be2a249..8b303ff20289 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
| @@ -91,6 +91,7 @@ static void dsos__purge(struct dsos *dsos) | |||
| 91 | 91 | ||
| 92 | list_for_each_entry_safe(pos, n, &dsos->head, node) { | 92 | list_for_each_entry_safe(pos, n, &dsos->head, node) { |
| 93 | RB_CLEAR_NODE(&pos->rb_node); | 93 | RB_CLEAR_NODE(&pos->rb_node); |
| 94 | pos->root = NULL; | ||
| 94 | list_del_init(&pos->node); | 95 | list_del_init(&pos->node); |
| 95 | dso__put(pos); | 96 | dso__put(pos); |
| 96 | } | 97 | } |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index bd8f03de5e40..05012bb178d7 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
| @@ -1183,7 +1183,7 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1183 | container_of(pf, struct trace_event_finder, pf); | 1183 | container_of(pf, struct trace_event_finder, pf); |
| 1184 | struct perf_probe_point *pp = &pf->pev->point; | 1184 | struct perf_probe_point *pp = &pf->pev->point; |
| 1185 | struct probe_trace_event *tev; | 1185 | struct probe_trace_event *tev; |
| 1186 | struct perf_probe_arg *args; | 1186 | struct perf_probe_arg *args = NULL; |
| 1187 | int ret, i; | 1187 | int ret, i; |
| 1188 | 1188 | ||
| 1189 | /* Check number of tevs */ | 1189 | /* Check number of tevs */ |
| @@ -1198,19 +1198,23 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1198 | ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, | 1198 | ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, |
| 1199 | pp->retprobe, pp->function, &tev->point); | 1199 | pp->retprobe, pp->function, &tev->point); |
| 1200 | if (ret < 0) | 1200 | if (ret < 0) |
| 1201 | return ret; | 1201 | goto end; |
| 1202 | 1202 | ||
| 1203 | tev->point.realname = strdup(dwarf_diename(sc_die)); | 1203 | tev->point.realname = strdup(dwarf_diename(sc_die)); |
| 1204 | if (!tev->point.realname) | 1204 | if (!tev->point.realname) { |
| 1205 | return -ENOMEM; | 1205 | ret = -ENOMEM; |
| 1206 | goto end; | ||
| 1207 | } | ||
| 1206 | 1208 | ||
| 1207 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, | 1209 | pr_debug("Probe point found: %s+%lu\n", tev->point.symbol, |
| 1208 | tev->point.offset); | 1210 | tev->point.offset); |
| 1209 | 1211 | ||
| 1210 | /* Expand special probe argument if exist */ | 1212 | /* Expand special probe argument if exist */ |
| 1211 | args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); | 1213 | args = zalloc(sizeof(struct perf_probe_arg) * MAX_PROBE_ARGS); |
| 1212 | if (args == NULL) | 1214 | if (args == NULL) { |
| 1213 | return -ENOMEM; | 1215 | ret = -ENOMEM; |
| 1216 | goto end; | ||
| 1217 | } | ||
| 1214 | 1218 | ||
| 1215 | ret = expand_probe_args(sc_die, pf, args); | 1219 | ret = expand_probe_args(sc_die, pf, args); |
| 1216 | if (ret < 0) | 1220 | if (ret < 0) |
| @@ -1234,6 +1238,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
| 1234 | } | 1238 | } |
| 1235 | 1239 | ||
| 1236 | end: | 1240 | end: |
| 1241 | if (ret) { | ||
| 1242 | clear_probe_trace_event(tev); | ||
| 1243 | tf->ntevs--; | ||
| 1244 | } | ||
| 1237 | free(args); | 1245 | free(args); |
| 1238 | return ret; | 1246 | return ret; |
| 1239 | } | 1247 | } |
| @@ -1246,7 +1254,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, | |||
| 1246 | struct trace_event_finder tf = { | 1254 | struct trace_event_finder tf = { |
| 1247 | .pf = {.pev = pev, .callback = add_probe_trace_event}, | 1255 | .pf = {.pev = pev, .callback = add_probe_trace_event}, |
| 1248 | .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; | 1256 | .max_tevs = probe_conf.max_probes, .mod = dbg->mod}; |
| 1249 | int ret; | 1257 | int ret, i; |
| 1250 | 1258 | ||
| 1251 | /* Allocate result tevs array */ | 1259 | /* Allocate result tevs array */ |
| 1252 | *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); | 1260 | *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs); |
| @@ -1258,6 +1266,8 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, | |||
| 1258 | 1266 | ||
| 1259 | ret = debuginfo__find_probes(dbg, &tf.pf); | 1267 | ret = debuginfo__find_probes(dbg, &tf.pf); |
| 1260 | if (ret < 0) { | 1268 | if (ret < 0) { |
| 1269 | for (i = 0; i < tf.ntevs; i++) | ||
| 1270 | clear_probe_trace_event(&tf.tevs[i]); | ||
| 1261 | zfree(tevs); | 1271 | zfree(tevs); |
| 1262 | return ret; | 1272 | return ret; |
| 1263 | } | 1273 | } |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index b4cc7662677e..cd08027a6d2c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -654,19 +654,24 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, | |||
| 654 | struct map_groups *kmaps = map__kmaps(map); | 654 | struct map_groups *kmaps = map__kmaps(map); |
| 655 | struct map *curr_map; | 655 | struct map *curr_map; |
| 656 | struct symbol *pos; | 656 | struct symbol *pos; |
| 657 | int count = 0, moved = 0; | 657 | int count = 0; |
| 658 | struct rb_root old_root = dso->symbols[map->type]; | ||
| 658 | struct rb_root *root = &dso->symbols[map->type]; | 659 | struct rb_root *root = &dso->symbols[map->type]; |
| 659 | struct rb_node *next = rb_first(root); | 660 | struct rb_node *next = rb_first(root); |
| 660 | 661 | ||
| 661 | if (!kmaps) | 662 | if (!kmaps) |
| 662 | return -1; | 663 | return -1; |
| 663 | 664 | ||
| 665 | *root = RB_ROOT; | ||
| 666 | |||
| 664 | while (next) { | 667 | while (next) { |
| 665 | char *module; | 668 | char *module; |
| 666 | 669 | ||
| 667 | pos = rb_entry(next, struct symbol, rb_node); | 670 | pos = rb_entry(next, struct symbol, rb_node); |
| 668 | next = rb_next(&pos->rb_node); | 671 | next = rb_next(&pos->rb_node); |
| 669 | 672 | ||
| 673 | rb_erase_init(&pos->rb_node, &old_root); | ||
| 674 | |||
| 670 | module = strchr(pos->name, '\t'); | 675 | module = strchr(pos->name, '\t'); |
| 671 | if (module) | 676 | if (module) |
| 672 | *module = '\0'; | 677 | *module = '\0'; |
| @@ -674,28 +679,21 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, | |||
| 674 | curr_map = map_groups__find(kmaps, map->type, pos->start); | 679 | curr_map = map_groups__find(kmaps, map->type, pos->start); |
| 675 | 680 | ||
| 676 | if (!curr_map || (filter && filter(curr_map, pos))) { | 681 | if (!curr_map || (filter && filter(curr_map, pos))) { |
| 677 | rb_erase_init(&pos->rb_node, root); | ||
| 678 | symbol__delete(pos); | 682 | symbol__delete(pos); |
| 679 | } else { | 683 | continue; |
| 680 | pos->start -= curr_map->start - curr_map->pgoff; | ||
| 681 | if (pos->end) | ||
| 682 | pos->end -= curr_map->start - curr_map->pgoff; | ||
| 683 | if (curr_map->dso != map->dso) { | ||
| 684 | rb_erase_init(&pos->rb_node, root); | ||
| 685 | symbols__insert( | ||
| 686 | &curr_map->dso->symbols[curr_map->type], | ||
| 687 | pos); | ||
| 688 | ++moved; | ||
| 689 | } else { | ||
| 690 | ++count; | ||
| 691 | } | ||
| 692 | } | 684 | } |
| 685 | |||
| 686 | pos->start -= curr_map->start - curr_map->pgoff; | ||
| 687 | if (pos->end) | ||
| 688 | pos->end -= curr_map->start - curr_map->pgoff; | ||
| 689 | symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); | ||
| 690 | ++count; | ||
| 693 | } | 691 | } |
| 694 | 692 | ||
| 695 | /* Symbols have been adjusted */ | 693 | /* Symbols have been adjusted */ |
| 696 | dso->adjust_symbols = 1; | 694 | dso->adjust_symbols = 1; |
| 697 | 695 | ||
| 698 | return count + moved; | 696 | return count; |
| 699 | } | 697 | } |
| 700 | 698 | ||
| 701 | /* | 699 | /* |
| @@ -1438,9 +1436,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) | |||
| 1438 | if (lstat(dso->name, &st) < 0) | 1436 | if (lstat(dso->name, &st) < 0) |
| 1439 | goto out; | 1437 | goto out; |
| 1440 | 1438 | ||
| 1441 | if (st.st_uid && (st.st_uid != geteuid())) { | 1439 | if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { |
| 1442 | pr_warning("File %s not owned by current user or root, " | 1440 | pr_warning("File %s not owned by current user or root, " |
| 1443 | "ignoring it.\n", dso->name); | 1441 | "ignoring it (use -f to override).\n", dso->name); |
| 1444 | goto out; | 1442 | goto out; |
| 1445 | } | 1443 | } |
| 1446 | 1444 | ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index 40073c60b83d..dcd786e364f2 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h | |||
| @@ -84,6 +84,7 @@ struct symbol_conf { | |||
| 84 | unsigned short priv_size; | 84 | unsigned short priv_size; |
| 85 | unsigned short nr_events; | 85 | unsigned short nr_events; |
| 86 | bool try_vmlinux_path, | 86 | bool try_vmlinux_path, |
| 87 | force, | ||
| 87 | ignore_vmlinux, | 88 | ignore_vmlinux, |
| 88 | ignore_vmlinux_buildid, | 89 | ignore_vmlinux_buildid, |
| 89 | show_kernel_path, | 90 | show_kernel_path, |
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index d8e4b20b6d54..0dac7e05a6ac 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
| @@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void) | |||
| 1173 | unsigned long long msr; | 1173 | unsigned long long msr; |
| 1174 | unsigned int ratio; | 1174 | unsigned int ratio; |
| 1175 | 1175 | ||
| 1176 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | 1176 | get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); |
| 1177 | 1177 | ||
| 1178 | fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); | 1178 | fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr); |
| 1179 | 1179 | ||
| 1180 | ratio = (msr >> 40) & 0xFF; | 1180 | ratio = (msr >> 40) & 0xFF; |
| 1181 | fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", | 1181 | fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n", |
| @@ -1807,7 +1807,7 @@ void check_permissions() | |||
| 1807 | * | 1807 | * |
| 1808 | * MSR_SMI_COUNT 0x00000034 | 1808 | * MSR_SMI_COUNT 0x00000034 |
| 1809 | * | 1809 | * |
| 1810 | * MSR_NHM_PLATFORM_INFO 0x000000ce | 1810 | * MSR_PLATFORM_INFO 0x000000ce |
| 1811 | * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | 1811 | * MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 |
| 1812 | * | 1812 | * |
| 1813 | * MSR_PKG_C3_RESIDENCY 0x000003f8 | 1813 | * MSR_PKG_C3_RESIDENCY 0x000003f8 |
| @@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) | |||
| 1876 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); | 1876 | get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr); |
| 1877 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; | 1877 | pkg_cstate_limit = pkg_cstate_limits[msr & 0xF]; |
| 1878 | 1878 | ||
| 1879 | get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); | 1879 | get_msr(base_cpu, MSR_PLATFORM_INFO, &msr); |
| 1880 | base_ratio = (msr >> 8) & 0xFF; | 1880 | base_ratio = (msr >> 8) & 0xFF; |
| 1881 | 1881 | ||
| 1882 | base_hz = base_ratio * bclk * 1000000; | 1882 | base_hz = base_ratio * bclk * 1000000; |
diff --git a/tools/testing/selftests/futex/README b/tools/testing/selftests/futex/README index 3224a049b196..0558bb9ce0a6 100644 --- a/tools/testing/selftests/futex/README +++ b/tools/testing/selftests/futex/README | |||
| @@ -27,7 +27,7 @@ o The build system shall remain as simple as possible, avoiding any archive or | |||
| 27 | o Where possible, any helper functions or other package-wide code shall be | 27 | o Where possible, any helper functions or other package-wide code shall be |
| 28 | implemented in header files, avoiding the need to compile intermediate object | 28 | implemented in header files, avoiding the need to compile intermediate object |
| 29 | files. | 29 | files. |
| 30 | o External dependendencies shall remain as minimal as possible. Currently gcc | 30 | o External dependencies shall remain as minimal as possible. Currently gcc |
| 31 | and glibc are the only dependencies. | 31 | and glibc are the only dependencies. |
| 32 | o Tests return 0 for success and < 0 for failure. | 32 | o Tests return 0 for success and < 0 for failure. |
| 33 | 33 | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index e38cc54942db..882fe83a3554 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
| @@ -492,6 +492,9 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) | |||
| 492 | pid_t parent = getppid(); | 492 | pid_t parent = getppid(); |
| 493 | int fd; | 493 | int fd; |
| 494 | void *map1, *map2; | 494 | void *map1, *map2; |
| 495 | int page_size = sysconf(_SC_PAGESIZE); | ||
| 496 | |||
| 497 | ASSERT_LT(0, page_size); | ||
| 495 | 498 | ||
| 496 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | 499 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); |
| 497 | ASSERT_EQ(0, ret); | 500 | ASSERT_EQ(0, ret); |
| @@ -504,16 +507,16 @@ TEST_SIGNAL(KILL_one_arg_six, SIGSYS) | |||
| 504 | 507 | ||
| 505 | EXPECT_EQ(parent, syscall(__NR_getppid)); | 508 | EXPECT_EQ(parent, syscall(__NR_getppid)); |
| 506 | map1 = (void *)syscall(sysno, | 509 | map1 = (void *)syscall(sysno, |
| 507 | NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, PAGE_SIZE); | 510 | NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); |
| 508 | EXPECT_NE(MAP_FAILED, map1); | 511 | EXPECT_NE(MAP_FAILED, map1); |
| 509 | /* mmap2() should never return. */ | 512 | /* mmap2() should never return. */ |
| 510 | map2 = (void *)syscall(sysno, | 513 | map2 = (void *)syscall(sysno, |
| 511 | NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); | 514 | NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); |
| 512 | EXPECT_EQ(MAP_FAILED, map2); | 515 | EXPECT_EQ(MAP_FAILED, map2); |
| 513 | 516 | ||
| 514 | /* The test failed, so clean up the resources. */ | 517 | /* The test failed, so clean up the resources. */ |
| 515 | munmap(map1, PAGE_SIZE); | 518 | munmap(map1, page_size); |
| 516 | munmap(map2, PAGE_SIZE); | 519 | munmap(map2, page_size); |
| 517 | close(fd); | 520 | close(fd); |
| 518 | } | 521 | } |
| 519 | 522 | ||
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index bcf5ec760eb9..5a6016224bb9 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c | |||
| @@ -128,6 +128,7 @@ static const char * const page_flag_names[] = { | |||
| 128 | [KPF_THP] = "t:thp", | 128 | [KPF_THP] = "t:thp", |
| 129 | [KPF_BALLOON] = "o:balloon", | 129 | [KPF_BALLOON] = "o:balloon", |
| 130 | [KPF_ZERO_PAGE] = "z:zero_page", | 130 | [KPF_ZERO_PAGE] = "z:zero_page", |
| 131 | [KPF_IDLE] = "i:idle_page", | ||
| 131 | 132 | ||
| 132 | [KPF_RESERVED] = "r:reserved", | 133 | [KPF_RESERVED] = "r:reserved", |
| 133 | [KPF_MLOCKED] = "m:mlocked", | 134 | [KPF_MLOCKED] = "m:mlocked", |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 21a0ab2d8919..69bca185c471 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
| @@ -221,17 +221,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 221 | kvm_timer_update_state(vcpu); | 221 | kvm_timer_update_state(vcpu); |
| 222 | 222 | ||
| 223 | /* | 223 | /* |
| 224 | * If we enter the guest with the virtual input level to the VGIC | 224 | * If we enter the guest with the virtual input level to the VGIC |
| 225 | * asserted, then we have already told the VGIC what we need to, and | 225 | * asserted, then we have already told the VGIC what we need to, and |
| 226 | * we don't need to exit from the guest until the guest deactivates | 226 | * we don't need to exit from the guest until the guest deactivates |
| 227 | * the already injected interrupt, so therefore we should set the | 227 | * the already injected interrupt, so therefore we should set the |
| 228 | * hardware active state to prevent unnecessary exits from the guest. | 228 | * hardware active state to prevent unnecessary exits from the guest. |
| 229 | * | 229 | * |
| 230 | * Conversely, if the virtual input level is deasserted, then always | 230 | * Also, if we enter the guest with the virtual timer interrupt active, |
| 231 | * clear the hardware active state to ensure that hardware interrupts | 231 | * then it must be active on the physical distributor, because we set |
| 232 | * from the timer triggers a guest exit. | 232 | * the HW bit and the guest must be able to deactivate the virtual and |
| 233 | */ | 233 | * physical interrupt at the same time. |
| 234 | if (timer->irq.level) | 234 | * |
| 235 | * Conversely, if the virtual input level is deasserted and the virtual | ||
| 236 | * interrupt is not active, then always clear the hardware active state | ||
| 237 | * to ensure that hardware interrupts from the timer triggers a guest | ||
| 238 | * exit. | ||
| 239 | */ | ||
| 240 | if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map)) | ||
| 235 | phys_active = true; | 241 | phys_active = true; |
| 236 | else | 242 | else |
| 237 | phys_active = false; | 243 | phys_active = false; |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 533538385d5d..65461f821a75 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
| @@ -1096,6 +1096,27 @@ static void vgic_retire_lr(int lr_nr, struct kvm_vcpu *vcpu) | |||
| 1096 | vgic_set_lr(vcpu, lr_nr, vlr); | 1096 | vgic_set_lr(vcpu, lr_nr, vlr); |
| 1097 | } | 1097 | } |
| 1098 | 1098 | ||
| 1099 | static bool dist_active_irq(struct kvm_vcpu *vcpu) | ||
| 1100 | { | ||
| 1101 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1102 | |||
| 1103 | return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map) | ||
| 1107 | { | ||
| 1108 | int i; | ||
| 1109 | |||
| 1110 | for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) { | ||
| 1111 | struct vgic_lr vlr = vgic_get_lr(vcpu, i); | ||
| 1112 | |||
| 1113 | if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE) | ||
| 1114 | return true; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | return dist_active_irq(vcpu); | ||
| 1118 | } | ||
| 1119 | |||
| 1099 | /* | 1120 | /* |
| 1100 | * An interrupt may have been disabled after being made pending on the | 1121 | * An interrupt may have been disabled after being made pending on the |
| 1101 | * CPU interface (the classic case is a timer running while we're | 1122 | * CPU interface (the classic case is a timer running while we're |
| @@ -1248,7 +1269,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
| 1248 | * may have been serviced from another vcpu. In all cases, | 1269 | * may have been serviced from another vcpu. In all cases, |
| 1249 | * move along. | 1270 | * move along. |
| 1250 | */ | 1271 | */ |
| 1251 | if (!kvm_vgic_vcpu_pending_irq(vcpu) && !kvm_vgic_vcpu_active_irq(vcpu)) | 1272 | if (!kvm_vgic_vcpu_pending_irq(vcpu) && !dist_active_irq(vcpu)) |
| 1252 | goto epilog; | 1273 | goto epilog; |
| 1253 | 1274 | ||
| 1254 | /* SGIs */ | 1275 | /* SGIs */ |
| @@ -1396,25 +1417,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
| 1396 | static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) | 1417 | static bool vgic_sync_hwirq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr) |
| 1397 | { | 1418 | { |
| 1398 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1419 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 1399 | struct irq_phys_map *map; | ||
| 1400 | bool phys_active; | ||
| 1401 | bool level_pending; | 1420 | bool level_pending; |
| 1402 | int ret; | ||
| 1403 | 1421 | ||
| 1404 | if (!(vlr.state & LR_HW)) | 1422 | if (!(vlr.state & LR_HW)) |
| 1405 | return false; | 1423 | return false; |
| 1406 | 1424 | ||
| 1407 | map = vgic_irq_map_search(vcpu, vlr.irq); | 1425 | if (vlr.state & LR_STATE_ACTIVE) |
| 1408 | BUG_ON(!map); | 1426 | return false; |
| 1409 | |||
| 1410 | ret = irq_get_irqchip_state(map->irq, | ||
| 1411 | IRQCHIP_STATE_ACTIVE, | ||
| 1412 | &phys_active); | ||
| 1413 | |||
| 1414 | WARN_ON(ret); | ||
| 1415 | |||
| 1416 | if (phys_active) | ||
| 1417 | return 0; | ||
| 1418 | 1427 | ||
| 1419 | spin_lock(&dist->lock); | 1428 | spin_lock(&dist->lock); |
| 1420 | level_pending = process_queued_irq(vcpu, lr, vlr); | 1429 | level_pending = process_queued_irq(vcpu, lr, vlr); |
| @@ -1479,17 +1488,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
| 1479 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); | 1488 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
| 1480 | } | 1489 | } |
| 1481 | 1490 | ||
| 1482 | int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu) | ||
| 1483 | { | ||
| 1484 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
| 1485 | |||
| 1486 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
| 1487 | return 0; | ||
| 1488 | |||
| 1489 | return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu); | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | |||
| 1493 | void vgic_kick_vcpus(struct kvm *kvm) | 1491 | void vgic_kick_vcpus(struct kvm *kvm) |
| 1494 | { | 1492 | { |
| 1495 | struct kvm_vcpu *vcpu; | 1493 | struct kvm_vcpu *vcpu; |
