diff options
179 files changed, 2395 insertions, 761 deletions
| @@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com> | |||
| 21 | Andrew Morton <akpm@linux-foundation.org> | 21 | Andrew Morton <akpm@linux-foundation.org> |
| 22 | Andrew Vasquez <andrew.vasquez@qlogic.com> | 22 | Andrew Vasquez <andrew.vasquez@qlogic.com> |
| 23 | Andy Adamson <andros@citi.umich.edu> | 23 | Andy Adamson <andros@citi.umich.edu> |
| 24 | Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> | ||
| 24 | Archit Taneja <archit@ti.com> | 25 | Archit Taneja <archit@ti.com> |
| 25 | Arnaud Patard <arnaud.patard@rtp-net.org> | 26 | Arnaud Patard <arnaud.patard@rtp-net.org> |
| 26 | Arnd Bergmann <arnd@arndb.de> | 27 | Arnd Bergmann <arnd@arndb.de> |
diff --git a/Documentation/Intel-IOMMU.txt b/Documentation/Intel-IOMMU.txt index 7b57fc087088..49585b6e1ea2 100644 --- a/Documentation/Intel-IOMMU.txt +++ b/Documentation/Intel-IOMMU.txt | |||
| @@ -3,7 +3,7 @@ Linux IOMMU Support | |||
| 3 | 3 | ||
| 4 | The architecture spec can be obtained from the below location. | 4 | The architecture spec can be obtained from the below location. |
| 5 | 5 | ||
| 6 | http://www.intel.com/technology/virtualization/ | 6 | http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf |
| 7 | 7 | ||
| 8 | This guide gives a quick cheat sheet for some basic understanding. | 8 | This guide gives a quick cheat sheet for some basic understanding. |
| 9 | 9 | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 053f613fc9a9..07e4cdf02407 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -3025,7 +3025,7 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0 | |||
| 3025 | and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), | 3025 | and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), |
| 3026 | which is the maximum number of possibly pending cpu-local interrupts. | 3026 | which is the maximum number of possibly pending cpu-local interrupts. |
| 3027 | 3027 | ||
| 3028 | 4.90 KVM_SMI | 3028 | 4.96 KVM_SMI |
| 3029 | 3029 | ||
| 3030 | Capability: KVM_CAP_X86_SMM | 3030 | Capability: KVM_CAP_X86_SMM |
| 3031 | Architectures: x86 | 3031 | Architectures: x86 |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index cd822d8454c0..307237cfe728 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
| @@ -27,6 +27,8 @@ $(warning LSE atomics not supported by binutils) | |||
| 27 | endif | 27 | endif |
| 28 | 28 | ||
| 29 | KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) | 29 | KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) |
| 30 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | ||
| 31 | KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) | ||
| 30 | KBUILD_AFLAGS += $(lseinstr) | 32 | KBUILD_AFLAGS += $(lseinstr) |
| 31 | 33 | ||
| 32 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) | 34 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 18ca9fb9e65f..86581f793e39 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
| @@ -16,7 +16,6 @@ CONFIG_IKCONFIG_PROC=y | |||
| 16 | CONFIG_LOG_BUF_SHIFT=14 | 16 | CONFIG_LOG_BUF_SHIFT=14 |
| 17 | CONFIG_MEMCG=y | 17 | CONFIG_MEMCG=y |
| 18 | CONFIG_MEMCG_SWAP=y | 18 | CONFIG_MEMCG_SWAP=y |
| 19 | CONFIG_MEMCG_KMEM=y | ||
| 20 | CONFIG_CGROUP_HUGETLB=y | 19 | CONFIG_CGROUP_HUGETLB=y |
| 21 | # CONFIG_UTS_NS is not set | 20 | # CONFIG_UTS_NS is not set |
| 22 | # CONFIG_IPC_NS is not set | 21 | # CONFIG_IPC_NS is not set |
| @@ -37,15 +36,13 @@ CONFIG_ARCH_EXYNOS7=y | |||
| 37 | CONFIG_ARCH_LAYERSCAPE=y | 36 | CONFIG_ARCH_LAYERSCAPE=y |
| 38 | CONFIG_ARCH_HISI=y | 37 | CONFIG_ARCH_HISI=y |
| 39 | CONFIG_ARCH_MEDIATEK=y | 38 | CONFIG_ARCH_MEDIATEK=y |
| 39 | CONFIG_ARCH_QCOM=y | ||
| 40 | CONFIG_ARCH_ROCKCHIP=y | 40 | CONFIG_ARCH_ROCKCHIP=y |
| 41 | CONFIG_ARCH_SEATTLE=y | 41 | CONFIG_ARCH_SEATTLE=y |
| 42 | CONFIG_ARCH_RENESAS=y | 42 | CONFIG_ARCH_RENESAS=y |
| 43 | CONFIG_ARCH_R8A7795=y | 43 | CONFIG_ARCH_R8A7795=y |
| 44 | CONFIG_ARCH_STRATIX10=y | 44 | CONFIG_ARCH_STRATIX10=y |
| 45 | CONFIG_ARCH_TEGRA=y | 45 | CONFIG_ARCH_TEGRA=y |
| 46 | CONFIG_ARCH_TEGRA_132_SOC=y | ||
| 47 | CONFIG_ARCH_TEGRA_210_SOC=y | ||
| 48 | CONFIG_ARCH_QCOM=y | ||
| 49 | CONFIG_ARCH_SPRD=y | 46 | CONFIG_ARCH_SPRD=y |
| 50 | CONFIG_ARCH_THUNDER=y | 47 | CONFIG_ARCH_THUNDER=y |
| 51 | CONFIG_ARCH_UNIPHIER=y | 48 | CONFIG_ARCH_UNIPHIER=y |
| @@ -54,14 +51,19 @@ CONFIG_ARCH_XGENE=y | |||
| 54 | CONFIG_ARCH_ZYNQMP=y | 51 | CONFIG_ARCH_ZYNQMP=y |
| 55 | CONFIG_PCI=y | 52 | CONFIG_PCI=y |
| 56 | CONFIG_PCI_MSI=y | 53 | CONFIG_PCI_MSI=y |
| 54 | CONFIG_PCI_IOV=y | ||
| 55 | CONFIG_PCI_RCAR_GEN2_PCIE=y | ||
| 57 | CONFIG_PCI_HOST_GENERIC=y | 56 | CONFIG_PCI_HOST_GENERIC=y |
| 58 | CONFIG_PCI_XGENE=y | 57 | CONFIG_PCI_XGENE=y |
| 59 | CONFIG_SMP=y | 58 | CONFIG_PCI_LAYERSCAPE=y |
| 59 | CONFIG_PCI_HISI=y | ||
| 60 | CONFIG_PCIE_QCOM=y | ||
| 60 | CONFIG_SCHED_MC=y | 61 | CONFIG_SCHED_MC=y |
| 61 | CONFIG_PREEMPT=y | 62 | CONFIG_PREEMPT=y |
| 62 | CONFIG_KSM=y | 63 | CONFIG_KSM=y |
| 63 | CONFIG_TRANSPARENT_HUGEPAGE=y | 64 | CONFIG_TRANSPARENT_HUGEPAGE=y |
| 64 | CONFIG_CMA=y | 65 | CONFIG_CMA=y |
| 66 | CONFIG_XEN=y | ||
| 65 | CONFIG_CMDLINE="console=ttyAMA0" | 67 | CONFIG_CMDLINE="console=ttyAMA0" |
| 66 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 68 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
| 67 | CONFIG_COMPAT=y | 69 | CONFIG_COMPAT=y |
| @@ -100,7 +102,11 @@ CONFIG_PATA_OF_PLATFORM=y | |||
| 100 | CONFIG_NETDEVICES=y | 102 | CONFIG_NETDEVICES=y |
| 101 | CONFIG_TUN=y | 103 | CONFIG_TUN=y |
| 102 | CONFIG_VIRTIO_NET=y | 104 | CONFIG_VIRTIO_NET=y |
| 105 | CONFIG_AMD_XGBE=y | ||
| 103 | CONFIG_NET_XGENE=y | 106 | CONFIG_NET_XGENE=y |
| 107 | CONFIG_E1000E=y | ||
| 108 | CONFIG_IGB=y | ||
| 109 | CONFIG_IGBVF=y | ||
| 104 | CONFIG_SKY2=y | 110 | CONFIG_SKY2=y |
| 105 | CONFIG_RAVB=y | 111 | CONFIG_RAVB=y |
| 106 | CONFIG_SMC91X=y | 112 | CONFIG_SMC91X=y |
| @@ -117,25 +123,23 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
| 117 | CONFIG_SERIAL_8250_DW=y | 123 | CONFIG_SERIAL_8250_DW=y |
| 118 | CONFIG_SERIAL_8250_MT6577=y | 124 | CONFIG_SERIAL_8250_MT6577=y |
| 119 | CONFIG_SERIAL_8250_UNIPHIER=y | 125 | CONFIG_SERIAL_8250_UNIPHIER=y |
| 126 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 120 | CONFIG_SERIAL_AMBA_PL011=y | 127 | CONFIG_SERIAL_AMBA_PL011=y |
| 121 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 128 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
| 122 | CONFIG_SERIAL_SAMSUNG=y | 129 | CONFIG_SERIAL_SAMSUNG=y |
| 123 | CONFIG_SERIAL_SAMSUNG_UARTS_4=y | ||
| 124 | CONFIG_SERIAL_SAMSUNG_UARTS=4 | ||
| 125 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y | 130 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y |
| 131 | CONFIG_SERIAL_TEGRA=y | ||
| 126 | CONFIG_SERIAL_SH_SCI=y | 132 | CONFIG_SERIAL_SH_SCI=y |
| 127 | CONFIG_SERIAL_SH_SCI_NR_UARTS=11 | 133 | CONFIG_SERIAL_SH_SCI_NR_UARTS=11 |
| 128 | CONFIG_SERIAL_SH_SCI_CONSOLE=y | 134 | CONFIG_SERIAL_SH_SCI_CONSOLE=y |
| 129 | CONFIG_SERIAL_TEGRA=y | ||
| 130 | CONFIG_SERIAL_MSM=y | 135 | CONFIG_SERIAL_MSM=y |
| 131 | CONFIG_SERIAL_MSM_CONSOLE=y | 136 | CONFIG_SERIAL_MSM_CONSOLE=y |
| 132 | CONFIG_SERIAL_OF_PLATFORM=y | ||
| 133 | CONFIG_SERIAL_XILINX_PS_UART=y | 137 | CONFIG_SERIAL_XILINX_PS_UART=y |
| 134 | CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y | 138 | CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y |
| 135 | CONFIG_VIRTIO_CONSOLE=y | 139 | CONFIG_VIRTIO_CONSOLE=y |
| 136 | # CONFIG_HW_RANDOM is not set | 140 | # CONFIG_HW_RANDOM is not set |
| 137 | CONFIG_I2C=y | ||
| 138 | CONFIG_I2C_QUP=y | 141 | CONFIG_I2C_QUP=y |
| 142 | CONFIG_I2C_UNIPHIER_F=y | ||
| 139 | CONFIG_I2C_RCAR=y | 143 | CONFIG_I2C_RCAR=y |
| 140 | CONFIG_SPI=y | 144 | CONFIG_SPI=y |
| 141 | CONFIG_SPI_PL022=y | 145 | CONFIG_SPI_PL022=y |
| @@ -176,8 +180,6 @@ CONFIG_MMC_SDHCI_PLTFM=y | |||
| 176 | CONFIG_MMC_SDHCI_TEGRA=y | 180 | CONFIG_MMC_SDHCI_TEGRA=y |
| 177 | CONFIG_MMC_SPI=y | 181 | CONFIG_MMC_SPI=y |
| 178 | CONFIG_MMC_DW=y | 182 | CONFIG_MMC_DW=y |
| 179 | CONFIG_MMC_DW_IDMAC=y | ||
| 180 | CONFIG_MMC_DW_PLTFM=y | ||
| 181 | CONFIG_MMC_DW_EXYNOS=y | 183 | CONFIG_MMC_DW_EXYNOS=y |
| 182 | CONFIG_NEW_LEDS=y | 184 | CONFIG_NEW_LEDS=y |
| 183 | CONFIG_LEDS_CLASS=y | 185 | CONFIG_LEDS_CLASS=y |
| @@ -187,28 +189,33 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y | |||
| 187 | CONFIG_LEDS_TRIGGER_CPU=y | 189 | CONFIG_LEDS_TRIGGER_CPU=y |
| 188 | CONFIG_RTC_CLASS=y | 190 | CONFIG_RTC_CLASS=y |
| 189 | CONFIG_RTC_DRV_EFI=y | 191 | CONFIG_RTC_DRV_EFI=y |
| 192 | CONFIG_RTC_DRV_PL031=y | ||
| 190 | CONFIG_RTC_DRV_XGENE=y | 193 | CONFIG_RTC_DRV_XGENE=y |
| 191 | CONFIG_DMADEVICES=y | 194 | CONFIG_DMADEVICES=y |
| 192 | CONFIG_RCAR_DMAC=y | ||
| 193 | CONFIG_QCOM_BAM_DMA=y | 195 | CONFIG_QCOM_BAM_DMA=y |
| 194 | CONFIG_TEGRA20_APB_DMA=y | 196 | CONFIG_TEGRA20_APB_DMA=y |
| 197 | CONFIG_RCAR_DMAC=y | ||
| 198 | CONFIG_VFIO=y | ||
| 199 | CONFIG_VFIO_PCI=y | ||
| 195 | CONFIG_VIRTIO_PCI=y | 200 | CONFIG_VIRTIO_PCI=y |
| 196 | CONFIG_VIRTIO_BALLOON=y | 201 | CONFIG_VIRTIO_BALLOON=y |
| 197 | CONFIG_VIRTIO_MMIO=y | 202 | CONFIG_VIRTIO_MMIO=y |
| 203 | CONFIG_XEN_GNTDEV=y | ||
| 204 | CONFIG_XEN_GRANT_DEV_ALLOC=y | ||
| 198 | CONFIG_COMMON_CLK_CS2000_CP=y | 205 | CONFIG_COMMON_CLK_CS2000_CP=y |
| 199 | CONFIG_COMMON_CLK_QCOM=y | 206 | CONFIG_COMMON_CLK_QCOM=y |
| 200 | CONFIG_MSM_GCC_8916=y | 207 | CONFIG_MSM_GCC_8916=y |
| 201 | CONFIG_HWSPINLOCK_QCOM=y | 208 | CONFIG_HWSPINLOCK_QCOM=y |
| 202 | # CONFIG_IOMMU_SUPPORT is not set | 209 | CONFIG_ARM_SMMU=y |
| 203 | CONFIG_QCOM_SMEM=y | 210 | CONFIG_QCOM_SMEM=y |
| 204 | CONFIG_QCOM_SMD=y | 211 | CONFIG_QCOM_SMD=y |
| 205 | CONFIG_QCOM_SMD_RPM=y | 212 | CONFIG_QCOM_SMD_RPM=y |
| 213 | CONFIG_ARCH_TEGRA_132_SOC=y | ||
| 214 | CONFIG_ARCH_TEGRA_210_SOC=y | ||
| 215 | CONFIG_HISILICON_IRQ_MBIGEN=y | ||
| 206 | CONFIG_PHY_XGENE=y | 216 | CONFIG_PHY_XGENE=y |
| 207 | CONFIG_EXT2_FS=y | 217 | CONFIG_EXT2_FS=y |
| 208 | CONFIG_EXT3_FS=y | 218 | CONFIG_EXT3_FS=y |
| 209 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
| 210 | # CONFIG_EXT3_FS_XATTR is not set | ||
| 211 | CONFIG_EXT4_FS=y | ||
| 212 | CONFIG_FANOTIFY=y | 219 | CONFIG_FANOTIFY=y |
| 213 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y | 220 | CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y |
| 214 | CONFIG_QUOTA=y | 221 | CONFIG_QUOTA=y |
| @@ -239,6 +246,7 @@ CONFIG_LOCKUP_DETECTOR=y | |||
| 239 | # CONFIG_FTRACE is not set | 246 | # CONFIG_FTRACE is not set |
| 240 | CONFIG_MEMTEST=y | 247 | CONFIG_MEMTEST=y |
| 241 | CONFIG_SECURITY=y | 248 | CONFIG_SECURITY=y |
| 249 | CONFIG_CRYPTO_ECHAINIV=y | ||
| 242 | CONFIG_CRYPTO_ANSI_CPRNG=y | 250 | CONFIG_CRYPTO_ANSI_CPRNG=y |
| 243 | CONFIG_ARM64_CRYPTO=y | 251 | CONFIG_ARM64_CRYPTO=y |
| 244 | CONFIG_CRYPTO_SHA1_ARM64_CE=y | 252 | CONFIG_CRYPTO_SHA1_ARM64_CE=y |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 2d545d7aa80b..bf464de33f52 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -67,11 +67,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
| 67 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 67 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
| 68 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 68 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
| 69 | 69 | ||
| 70 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) | 70 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
| 71 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 71 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
| 72 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) | 72 | #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) |
| 73 | #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT)) | 73 | #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) |
| 74 | #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) | 74 | #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) |
| 75 | 75 | ||
| 76 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) | 76 | #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) |
| 77 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | 77 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
| @@ -81,7 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
| 81 | 81 | ||
| 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 82 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 83 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
| 84 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 84 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) |
| 85 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 85 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 86 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) | 86 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) |
| 87 | 87 | ||
| @@ -153,6 +153,7 @@ extern struct page *empty_zero_page; | |||
| 153 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) | 153 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
| 154 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 154 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
| 155 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) | 155 | #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) |
| 156 | #define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) | ||
| 156 | 157 | ||
| 157 | #ifdef CONFIG_ARM64_HW_AFDBM | 158 | #ifdef CONFIG_ARM64_HW_AFDBM |
| 158 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) | 159 | #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) |
| @@ -163,8 +164,6 @@ extern struct page *empty_zero_page; | |||
| 163 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) | 164 | #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) |
| 164 | 165 | ||
| 165 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) | 166 | #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) |
| 166 | #define pte_valid_user(pte) \ | ||
| 167 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) | ||
| 168 | #define pte_valid_not_user(pte) \ | 167 | #define pte_valid_not_user(pte) \ |
| 169 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | 168 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) |
| 170 | #define pte_valid_young(pte) \ | 169 | #define pte_valid_young(pte) \ |
| @@ -278,13 +277,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | |||
| 278 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | 277 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 279 | pte_t *ptep, pte_t pte) | 278 | pte_t *ptep, pte_t pte) |
| 280 | { | 279 | { |
| 281 | if (pte_valid_user(pte)) { | 280 | if (pte_valid(pte)) { |
| 282 | if (!pte_special(pte) && pte_exec(pte)) | ||
| 283 | __sync_icache_dcache(pte, addr); | ||
| 284 | if (pte_sw_dirty(pte) && pte_write(pte)) | 281 | if (pte_sw_dirty(pte) && pte_write(pte)) |
| 285 | pte_val(pte) &= ~PTE_RDONLY; | 282 | pte_val(pte) &= ~PTE_RDONLY; |
| 286 | else | 283 | else |
| 287 | pte_val(pte) |= PTE_RDONLY; | 284 | pte_val(pte) |= PTE_RDONLY; |
| 285 | if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) | ||
| 286 | __sync_icache_dcache(pte, addr); | ||
| 288 | } | 287 | } |
| 289 | 288 | ||
| 290 | /* | 289 | /* |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ffe9c2b6431b..917d98108b3f 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
| @@ -514,9 +514,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |||
| 514 | #endif | 514 | #endif |
| 515 | 515 | ||
| 516 | /* EL2 debug */ | 516 | /* EL2 debug */ |
| 517 | mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer | ||
| 518 | sbfx x0, x0, #8, #4 | ||
| 519 | cmp x0, #1 | ||
| 520 | b.lt 4f // Skip if no PMU present | ||
| 517 | mrs x0, pmcr_el0 // Disable debug access traps | 521 | mrs x0, pmcr_el0 // Disable debug access traps |
| 518 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | 522 | ubfx x0, x0, #11, #5 // to EL2 and allow access to |
| 519 | msr mdcr_el2, x0 // all PMU counters from EL1 | 523 | msr mdcr_el2, x0 // all PMU counters from EL1 |
| 524 | 4: | ||
| 520 | 525 | ||
| 521 | /* Stage-2 translation */ | 526 | /* Stage-2 translation */ |
| 522 | msr vttbr_el2, xzr | 527 | msr vttbr_el2, xzr |
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index bc2abb8b1599..999633bd7294 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h | |||
| @@ -65,6 +65,16 @@ | |||
| 65 | #ifdef CONFIG_EFI | 65 | #ifdef CONFIG_EFI |
| 66 | 66 | ||
| 67 | /* | 67 | /* |
| 68 | * Prevent the symbol aliases below from being emitted into the kallsyms | ||
| 69 | * table, by forcing them to be absolute symbols (which are conveniently | ||
| 70 | * ignored by scripts/kallsyms) rather than section relative symbols. | ||
| 71 | * The distinction is only relevant for partial linking, and only for symbols | ||
| 72 | * that are defined within a section declaration (which is not the case for | ||
| 73 | * the definitions below) so the resulting values will be identical. | ||
| 74 | */ | ||
| 75 | #define KALLSYMS_HIDE(sym) ABSOLUTE(sym) | ||
| 76 | |||
| 77 | /* | ||
| 68 | * The EFI stub has its own symbol namespace prefixed by __efistub_, to | 78 | * The EFI stub has its own symbol namespace prefixed by __efistub_, to |
| 69 | * isolate it from the kernel proper. The following symbols are legally | 79 | * isolate it from the kernel proper. The following symbols are legally |
| 70 | * accessed by the stub, so provide some aliases to make them accessible. | 80 | * accessed by the stub, so provide some aliases to make them accessible. |
| @@ -73,25 +83,25 @@ | |||
| 73 | * linked at. The routines below are all implemented in assembler in a | 83 | * linked at. The routines below are all implemented in assembler in a |
| 74 | * position independent manner | 84 | * position independent manner |
| 75 | */ | 85 | */ |
| 76 | __efistub_memcmp = __pi_memcmp; | 86 | __efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); |
| 77 | __efistub_memchr = __pi_memchr; | 87 | __efistub_memchr = KALLSYMS_HIDE(__pi_memchr); |
| 78 | __efistub_memcpy = __pi_memcpy; | 88 | __efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); |
| 79 | __efistub_memmove = __pi_memmove; | 89 | __efistub_memmove = KALLSYMS_HIDE(__pi_memmove); |
| 80 | __efistub_memset = __pi_memset; | 90 | __efistub_memset = KALLSYMS_HIDE(__pi_memset); |
| 81 | __efistub_strlen = __pi_strlen; | 91 | __efistub_strlen = KALLSYMS_HIDE(__pi_strlen); |
| 82 | __efistub_strcmp = __pi_strcmp; | 92 | __efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); |
| 83 | __efistub_strncmp = __pi_strncmp; | 93 | __efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); |
| 84 | __efistub___flush_dcache_area = __pi___flush_dcache_area; | 94 | __efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); |
| 85 | 95 | ||
| 86 | #ifdef CONFIG_KASAN | 96 | #ifdef CONFIG_KASAN |
| 87 | __efistub___memcpy = __pi_memcpy; | 97 | __efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); |
| 88 | __efistub___memmove = __pi_memmove; | 98 | __efistub___memmove = KALLSYMS_HIDE(__pi_memmove); |
| 89 | __efistub___memset = __pi_memset; | 99 | __efistub___memset = KALLSYMS_HIDE(__pi_memset); |
| 90 | #endif | 100 | #endif |
| 91 | 101 | ||
| 92 | __efistub__text = _text; | 102 | __efistub__text = KALLSYMS_HIDE(_text); |
| 93 | __efistub__end = _end; | 103 | __efistub__end = KALLSYMS_HIDE(_end); |
| 94 | __efistub__edata = _edata; | 104 | __efistub__edata = KALLSYMS_HIDE(_edata); |
| 95 | 105 | ||
| 96 | #endif | 106 | #endif |
| 97 | 107 | ||
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 5a22a119a74c..0adbebbc2803 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
| @@ -46,7 +46,7 @@ enum address_markers_idx { | |||
| 46 | PCI_START_NR, | 46 | PCI_START_NR, |
| 47 | PCI_END_NR, | 47 | PCI_END_NR, |
| 48 | MODULES_START_NR, | 48 | MODULES_START_NR, |
| 49 | MODUELS_END_NR, | 49 | MODULES_END_NR, |
| 50 | KERNEL_SPACE_NR, | 50 | KERNEL_SPACE_NR, |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index cf038c7d9fa9..cab7a5be40aa 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
| @@ -120,6 +120,7 @@ static void __init cpu_set_ttbr1(unsigned long ttbr1) | |||
| 120 | void __init kasan_init(void) | 120 | void __init kasan_init(void) |
| 121 | { | 121 | { |
| 122 | struct memblock_region *reg; | 122 | struct memblock_region *reg; |
| 123 | int i; | ||
| 123 | 124 | ||
| 124 | /* | 125 | /* |
| 125 | * We are going to perform proper setup of shadow memory. | 126 | * We are going to perform proper setup of shadow memory. |
| @@ -155,6 +156,14 @@ void __init kasan_init(void) | |||
| 155 | pfn_to_nid(virt_to_pfn(start))); | 156 | pfn_to_nid(virt_to_pfn(start))); |
| 156 | } | 157 | } |
| 157 | 158 | ||
| 159 | /* | ||
| 160 | * KAsan may reuse the contents of kasan_zero_pte directly, so we | ||
| 161 | * should make sure that it maps the zero page read-only. | ||
| 162 | */ | ||
| 163 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
| 164 | set_pte(&kasan_zero_pte[i], | ||
| 165 | pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); | ||
| 166 | |||
| 158 | memset(kasan_zero_page, 0, PAGE_SIZE); | 167 | memset(kasan_zero_page, 0, PAGE_SIZE); |
| 159 | cpu_set_ttbr1(__pa(swapper_pg_dir)); | 168 | cpu_set_ttbr1(__pa(swapper_pg_dir)); |
| 160 | flush_tlb_all(); | 169 | flush_tlb_all(); |
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 3571c7309c5e..cf6240741134 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c | |||
| @@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages, | |||
| 57 | if (end < MODULES_VADDR || end >= MODULES_END) | 57 | if (end < MODULES_VADDR || end >= MODULES_END) |
| 58 | return -EINVAL; | 58 | return -EINVAL; |
| 59 | 59 | ||
| 60 | if (!numpages) | ||
| 61 | return 0; | ||
| 62 | |||
| 60 | data.set_mask = set_mask; | 63 | data.set_mask = set_mask; |
| 61 | data.clear_mask = clear_mask; | 64 | data.clear_mask = clear_mask; |
| 62 | 65 | ||
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 146bd99a7532..e6a30e1268a8 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S | |||
| @@ -84,3 +84,15 @@ | |||
| 84 | b.lo 9998b | 84 | b.lo 9998b |
| 85 | dsb \domain | 85 | dsb \domain |
| 86 | .endm | 86 | .endm |
| 87 | |||
| 88 | /* | ||
| 89 | * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present | ||
| 90 | */ | ||
| 91 | .macro reset_pmuserenr_el0, tmpreg | ||
| 92 | mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer | ||
| 93 | sbfx \tmpreg, \tmpreg, #8, #4 | ||
| 94 | cmp \tmpreg, #1 // Skip if no PMU present | ||
| 95 | b.lt 9000f | ||
| 96 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 | ||
| 97 | 9000: | ||
| 98 | .endm | ||
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a3d867e723b4..c164d2cb35c0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
| @@ -117,7 +117,7 @@ ENTRY(cpu_do_resume) | |||
| 117 | */ | 117 | */ |
| 118 | ubfx x11, x11, #1, #1 | 118 | ubfx x11, x11, #1, #1 |
| 119 | msr oslar_el1, x11 | 119 | msr oslar_el1, x11 |
| 120 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 | 120 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 |
| 121 | mov x0, x12 | 121 | mov x0, x12 |
| 122 | dsb nsh // Make sure local tlb invalidation completed | 122 | dsb nsh // Make sure local tlb invalidation completed |
| 123 | isb | 123 | isb |
| @@ -154,7 +154,7 @@ ENTRY(__cpu_setup) | |||
| 154 | msr cpacr_el1, x0 // Enable FP/ASIMD | 154 | msr cpacr_el1, x0 // Enable FP/ASIMD |
| 155 | mov x0, #1 << 12 // Reset mdscr_el1 and disable | 155 | mov x0, #1 << 12 // Reset mdscr_el1 and disable |
| 156 | msr mdscr_el1, x0 // access to the DCC from EL0 | 156 | msr mdscr_el1, x0 // access to the DCC from EL0 |
| 157 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 | 157 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 |
| 158 | /* | 158 | /* |
| 159 | * Memory region attributes for LPAE: | 159 | * Memory region attributes for LPAE: |
| 160 | * | 160 | * |
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 06f17e778c27..8d1c8162f0c1 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h | |||
| @@ -50,7 +50,9 @@ | |||
| 50 | * set of bits not changed in pmd_modify. | 50 | * set of bits not changed in pmd_modify. |
| 51 | */ | 51 | */ |
| 52 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | 52 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
| 53 | _PAGE_ACCESSED | _PAGE_THP_HUGE) | 53 | _PAGE_ACCESSED | _PAGE_THP_HUGE | _PAGE_PTE | \ |
| 54 | _PAGE_SOFT_DIRTY) | ||
| 55 | |||
| 54 | 56 | ||
| 55 | #ifdef CONFIG_PPC_64K_PAGES | 57 | #ifdef CONFIG_PPC_64K_PAGES |
| 56 | #include <asm/book3s/64/hash-64k.h> | 58 | #include <asm/book3s/64/hash-64k.h> |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 8204b0c393aa..8d1c41d28318 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
| @@ -223,7 +223,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) | |||
| 223 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) | 223 | #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) |
| 224 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | 224 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) |
| 225 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) | 225 | #define pmd_young(pmd) pte_young(pmd_pte(pmd)) |
| 226 | #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) | ||
| 227 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) | 226 | #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) |
| 228 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) | 227 | #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) |
| 229 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) | 228 | #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 271fefbbe521..9d08d8cbed1a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
| @@ -38,8 +38,7 @@ | |||
| 38 | 38 | ||
| 39 | #define KVM_MAX_VCPUS NR_CPUS | 39 | #define KVM_MAX_VCPUS NR_CPUS |
| 40 | #define KVM_MAX_VCORES NR_CPUS | 40 | #define KVM_MAX_VCORES NR_CPUS |
| 41 | #define KVM_USER_MEM_SLOTS 32 | 41 | #define KVM_USER_MEM_SLOTS 512 |
| 42 | #define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS | ||
| 43 | 42 | ||
| 44 | #ifdef CONFIG_KVM_MMIO | 43 | #ifdef CONFIG_KVM_MMIO |
| 45 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | 44 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 5654ece02c0d..3fa9df70aa20 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -383,3 +383,4 @@ SYSCALL(ni_syscall) | |||
| 383 | SYSCALL(ni_syscall) | 383 | SYSCALL(ni_syscall) |
| 384 | SYSCALL(ni_syscall) | 384 | SYSCALL(ni_syscall) |
| 385 | SYSCALL(mlock2) | 385 | SYSCALL(mlock2) |
| 386 | SYSCALL(copy_file_range) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6a5ace5fa0c8..1f2594d45605 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | #define NR_syscalls 379 | 15 | #define NR_syscalls 380 |
| 16 | 16 | ||
| 17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
| 18 | 18 | ||
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 12a05652377a..940290d45b08 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
| @@ -389,5 +389,6 @@ | |||
| 389 | #define __NR_userfaultfd 364 | 389 | #define __NR_userfaultfd 364 |
| 390 | #define __NR_membarrier 365 | 390 | #define __NR_membarrier 365 |
| 391 | #define __NR_mlock2 378 | 391 | #define __NR_mlock2 378 |
| 392 | #define __NR_copy_file_range 379 | ||
| 392 | 393 | ||
| 393 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 394 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 8654cb166c19..ca9e5371930e 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c | |||
| @@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe) | |||
| 883 | const char *eeh_pe_loc_get(struct eeh_pe *pe) | 883 | const char *eeh_pe_loc_get(struct eeh_pe *pe) |
| 884 | { | 884 | { |
| 885 | struct pci_bus *bus = eeh_pe_bus_get(pe); | 885 | struct pci_bus *bus = eeh_pe_bus_get(pe); |
| 886 | struct device_node *dn = pci_bus_to_OF_node(bus); | 886 | struct device_node *dn; |
| 887 | const char *loc = NULL; | 887 | const char *loc = NULL; |
| 888 | 888 | ||
| 889 | if (!dn) | 889 | while (bus) { |
| 890 | goto out; | 890 | dn = pci_bus_to_OF_node(bus); |
| 891 | if (!dn) { | ||
| 892 | bus = bus->parent; | ||
| 893 | continue; | ||
| 894 | } | ||
| 891 | 895 | ||
| 892 | /* PHB PE or root PE ? */ | 896 | if (pci_is_root_bus(bus)) |
| 893 | if (pci_is_root_bus(bus)) { | ||
| 894 | loc = of_get_property(dn, "ibm,loc-code", NULL); | ||
| 895 | if (!loc) | ||
| 896 | loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); | 897 | loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); |
| 898 | else | ||
| 899 | loc = of_get_property(dn, "ibm,slot-location-code", | ||
| 900 | NULL); | ||
| 901 | |||
| 897 | if (loc) | 902 | if (loc) |
| 898 | goto out; | 903 | return loc; |
| 899 | 904 | ||
| 900 | /* Check the root port */ | 905 | bus = bus->parent; |
| 901 | dn = dn->child; | ||
| 902 | if (!dn) | ||
| 903 | goto out; | ||
| 904 | } | 906 | } |
| 905 | 907 | ||
| 906 | loc = of_get_property(dn, "ibm,loc-code", NULL); | 908 | return "N/A"; |
| 907 | if (!loc) | ||
| 908 | loc = of_get_property(dn, "ibm,slot-location-code", NULL); | ||
| 909 | |||
| 910 | out: | ||
| 911 | return loc ? loc : "N/A"; | ||
| 912 | } | 909 | } |
| 913 | 910 | ||
| 914 | /** | 911 | /** |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index db475d41b57a..f28754c497e5 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
| @@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence) | |||
| 701 | li r5,0 | 701 | li r5,0 |
| 702 | blr /* image->start(physid, image->start, 0); */ | 702 | blr /* image->start(physid, image->start, 0); */ |
| 703 | #endif /* CONFIG_KEXEC */ | 703 | #endif /* CONFIG_KEXEC */ |
| 704 | |||
| 705 | #ifdef CONFIG_MODULES | ||
| 706 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
| 707 | |||
| 708 | #ifdef CONFIG_MODVERSIONS | ||
| 709 | .weak __crc_TOC. | ||
| 710 | .section "___kcrctab+TOC.","a" | ||
| 711 | .globl __kcrctab_TOC. | ||
| 712 | __kcrctab_TOC.: | ||
| 713 | .llong __crc_TOC. | ||
| 714 | #endif | ||
| 715 | |||
| 716 | /* | ||
| 717 | * Export a fake .TOC. since both modpost and depmod will complain otherwise. | ||
| 718 | * Both modpost and depmod strip the leading . so we do the same here. | ||
| 719 | */ | ||
| 720 | .section "__ksymtab_strings","a" | ||
| 721 | __kstrtab_TOC.: | ||
| 722 | .asciz "TOC." | ||
| 723 | |||
| 724 | .section "___ksymtab+TOC.","a" | ||
| 725 | /* This symbol name is important: it's used by modpost to find exported syms */ | ||
| 726 | .globl __ksymtab_TOC. | ||
| 727 | __ksymtab_TOC.: | ||
| 728 | .llong 0 /* .value */ | ||
| 729 | .llong __kstrtab_TOC. | ||
| 730 | #endif /* ELFv2 */ | ||
| 731 | #endif /* MODULES */ | ||
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 59663af9315f..ac64ffdb52c8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c | |||
| @@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers, | |||
| 326 | } | 326 | } |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ | 329 | /* |
| 330 | * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. | ||
| 331 | * seem to be defined (value set later). | ||
| 332 | */ | ||
| 330 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | 333 | static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) |
| 331 | { | 334 | { |
| 332 | unsigned int i; | 335 | unsigned int i; |
| @@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) | |||
| 334 | for (i = 1; i < numsyms; i++) { | 337 | for (i = 1; i < numsyms; i++) { |
| 335 | if (syms[i].st_shndx == SHN_UNDEF) { | 338 | if (syms[i].st_shndx == SHN_UNDEF) { |
| 336 | char *name = strtab + syms[i].st_name; | 339 | char *name = strtab + syms[i].st_name; |
| 337 | if (name[0] == '.') | 340 | if (name[0] == '.') { |
| 341 | if (strcmp(name+1, "TOC.") == 0) | ||
| 342 | syms[i].st_shndx = SHN_ABS; | ||
| 338 | memmove(name, name+1, strlen(name)); | 343 | memmove(name, name+1, strlen(name)); |
| 344 | } | ||
| 339 | } | 345 | } |
| 340 | } | 346 | } |
| 341 | } | 347 | } |
| @@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, | |||
| 351 | numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); | 357 | numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); |
| 352 | 358 | ||
| 353 | for (i = 1; i < numsyms; i++) { | 359 | for (i = 1; i < numsyms; i++) { |
| 354 | if (syms[i].st_shndx == SHN_UNDEF | 360 | if (syms[i].st_shndx == SHN_ABS |
| 355 | && strcmp(strtab + syms[i].st_name, "TOC.") == 0) | 361 | && strcmp(strtab + syms[i].st_name, "TOC.") == 0) |
| 356 | return &syms[i]; | 362 | return &syms[i]; |
| 357 | } | 363 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 774a253ca4e1..9bf7031a67ff 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -377,15 +377,12 @@ no_seg_found: | |||
| 377 | 377 | ||
| 378 | static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | 378 | static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) |
| 379 | { | 379 | { |
| 380 | struct kvmppc_vcpu_book3s *vcpu_book3s; | ||
| 381 | u64 esid, esid_1t; | 380 | u64 esid, esid_1t; |
| 382 | int slb_nr; | 381 | int slb_nr; |
| 383 | struct kvmppc_slb *slbe; | 382 | struct kvmppc_slb *slbe; |
| 384 | 383 | ||
| 385 | dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); | 384 | dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); |
| 386 | 385 | ||
| 387 | vcpu_book3s = to_book3s(vcpu); | ||
| 388 | |||
| 389 | esid = GET_ESID(rb); | 386 | esid = GET_ESID(rb); |
| 390 | esid_1t = GET_ESID_1T(rb); | 387 | esid_1t = GET_ESID_1T(rb); |
| 391 | slb_nr = rb & 0xfff; | 388 | slb_nr = rb & 0xfff; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index cff207b72c46..baeddb06811d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -833,6 +833,24 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 833 | 833 | ||
| 834 | vcpu->stat.sum_exits++; | 834 | vcpu->stat.sum_exits++; |
| 835 | 835 | ||
| 836 | /* | ||
| 837 | * This can happen if an interrupt occurs in the last stages | ||
| 838 | * of guest entry or the first stages of guest exit (i.e. after | ||
| 839 | * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV | ||
| 840 | * and before setting it to KVM_GUEST_MODE_HOST_HV). | ||
| 841 | * That can happen due to a bug, or due to a machine check | ||
| 842 | * occurring at just the wrong time. | ||
| 843 | */ | ||
| 844 | if (vcpu->arch.shregs.msr & MSR_HV) { | ||
| 845 | printk(KERN_EMERG "KVM trap in HV mode!\n"); | ||
| 846 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | ||
| 847 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | ||
| 848 | vcpu->arch.shregs.msr); | ||
| 849 | kvmppc_dump_regs(vcpu); | ||
| 850 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
| 851 | run->hw.hardware_exit_reason = vcpu->arch.trap; | ||
| 852 | return RESUME_HOST; | ||
| 853 | } | ||
| 836 | run->exit_reason = KVM_EXIT_UNKNOWN; | 854 | run->exit_reason = KVM_EXIT_UNKNOWN; |
| 837 | run->ready_for_interrupt_injection = 1; | 855 | run->ready_for_interrupt_injection = 1; |
| 838 | switch (vcpu->arch.trap) { | 856 | switch (vcpu->arch.trap) { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3c6badcd53ef..6ee26de9a1de 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
| 2153 | 2153 | ||
| 2154 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ | 2154 | /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ |
| 2155 | 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW | 2155 | 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW |
| 2156 | rlwimi r5, r4, 1, DAWRX_WT | 2156 | rlwimi r5, r4, 2, DAWRX_WT |
| 2157 | clrrdi r4, r4, 3 | 2157 | clrrdi r4, r4, 3 |
| 2158 | std r4, VCPU_DAWR(r3) | 2158 | std r4, VCPU_DAWR(r3) |
| 2159 | std r5, VCPU_DAWRX(r3) | 2159 | std r5, VCPU_DAWRX(r3) |
| @@ -2404,6 +2404,8 @@ machine_check_realmode: | |||
| 2404 | * guest as machine check causing guest to crash. | 2404 | * guest as machine check causing guest to crash. |
| 2405 | */ | 2405 | */ |
| 2406 | ld r11, VCPU_MSR(r9) | 2406 | ld r11, VCPU_MSR(r9) |
| 2407 | rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ | ||
| 2408 | bne mc_cont /* if so, exit to host */ | ||
| 2407 | andi. r10, r11, MSR_RI /* check for unrecoverable exception */ | 2409 | andi. r10, r11, MSR_RI /* check for unrecoverable exception */ |
| 2408 | beq 1f /* Deliver a machine check to guest */ | 2410 | beq 1f /* Deliver a machine check to guest */ |
| 2409 | ld r10, VCPU_PC(r9) | 2411 | ld r10, VCPU_PC(r9) |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6fd2405c7f4a..a3b182dcb823 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
| @@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
| 919 | r = -ENXIO; | 919 | r = -ENXIO; |
| 920 | break; | 920 | break; |
| 921 | } | 921 | } |
| 922 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; | 922 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
| 923 | break; | 923 | break; |
| 924 | case KVM_REG_PPC_VSCR: | 924 | case KVM_REG_PPC_VSCR: |
| 925 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | 925 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 926 | r = -ENXIO; | 926 | r = -ENXIO; |
| 927 | break; | 927 | break; |
| 928 | } | 928 | } |
| 929 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); | 929 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
| 930 | break; | 930 | break; |
| 931 | case KVM_REG_PPC_VRSAVE: | 931 | case KVM_REG_PPC_VRSAVE: |
| 932 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | 932 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
| 933 | r = -ENXIO; | ||
| 934 | break; | ||
| 935 | } | ||
| 936 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | ||
| 937 | break; | 933 | break; |
| 938 | #endif /* CONFIG_ALTIVEC */ | 934 | #endif /* CONFIG_ALTIVEC */ |
| 939 | default: | 935 | default: |
| @@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |||
| 974 | r = -ENXIO; | 970 | r = -ENXIO; |
| 975 | break; | 971 | break; |
| 976 | } | 972 | } |
| 977 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; | 973 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
| 978 | break; | 974 | break; |
| 979 | case KVM_REG_PPC_VSCR: | 975 | case KVM_REG_PPC_VSCR: |
| 980 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | 976 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 981 | r = -ENXIO; | 977 | r = -ENXIO; |
| 982 | break; | 978 | break; |
| 983 | } | 979 | } |
| 984 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); | 980 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
| 985 | break; | 981 | break; |
| 986 | case KVM_REG_PPC_VRSAVE: | 982 | case KVM_REG_PPC_VRSAVE: |
| 987 | val = get_reg_val(reg->id, vcpu->arch.vrsave); | 983 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
| 984 | r = -ENXIO; | ||
| 985 | break; | ||
| 986 | } | ||
| 987 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | ||
| 988 | break; | 988 | break; |
| 989 | #endif /* CONFIG_ALTIVEC */ | 989 | #endif /* CONFIG_ALTIVEC */ |
| 990 | default: | 990 | default: |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 22d94c3e6fc4..d0f0a514b04e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -560,12 +560,12 @@ subsys_initcall(add_system_ram_resources); | |||
| 560 | */ | 560 | */ |
| 561 | int devmem_is_allowed(unsigned long pfn) | 561 | int devmem_is_allowed(unsigned long pfn) |
| 562 | { | 562 | { |
| 563 | if (page_is_rtas_user_buf(pfn)) | ||
| 564 | return 1; | ||
| 563 | if (iomem_is_exclusive(PFN_PHYS(pfn))) | 565 | if (iomem_is_exclusive(PFN_PHYS(pfn))) |
| 564 | return 0; | 566 | return 0; |
| 565 | if (!page_is_ram(pfn)) | 567 | if (!page_is_ram(pfn)) |
| 566 | return 1; | 568 | return 1; |
| 567 | if (page_is_rtas_user_buf(pfn)) | ||
| 568 | return 1; | ||
| 569 | return 0; | 569 | return 0; |
| 570 | } | 570 | } |
| 571 | #endif /* CONFIG_STRICT_DEVMEM */ | 571 | #endif /* CONFIG_STRICT_DEVMEM */ |
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 7d5e295255b7..9958ba8bf0d2 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c | |||
| @@ -816,7 +816,7 @@ static struct power_pmu power8_pmu = { | |||
| 816 | .get_constraint = power8_get_constraint, | 816 | .get_constraint = power8_get_constraint, |
| 817 | .get_alternatives = power8_get_alternatives, | 817 | .get_alternatives = power8_get_alternatives, |
| 818 | .disable_pmc = power8_disable_pmc, | 818 | .disable_pmc = power8_disable_pmc, |
| 819 | .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, | 819 | .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, |
| 820 | .n_generic = ARRAY_SIZE(power8_generic_events), | 820 | .n_generic = ARRAY_SIZE(power8_generic_events), |
| 821 | .generic_events = power8_generic_events, | 821 | .generic_events = power8_generic_events, |
| 822 | .cache_events = &power8_cache_events, | 822 | .cache_events = &power8_cache_events, |
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 16aa0c779e07..595a275c36f8 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h | |||
| @@ -8,6 +8,8 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
| 10 | 10 | ||
| 11 | #define ARCH_IRQ_ENABLED (3UL << (BITS_PER_LONG - 8)) | ||
| 12 | |||
| 11 | /* store then OR system mask. */ | 13 | /* store then OR system mask. */ |
| 12 | #define __arch_local_irq_stosm(__or) \ | 14 | #define __arch_local_irq_stosm(__or) \ |
| 13 | ({ \ | 15 | ({ \ |
| @@ -54,14 +56,17 @@ static inline notrace void arch_local_irq_enable(void) | |||
| 54 | __arch_local_irq_stosm(0x03); | 56 | __arch_local_irq_stosm(0x03); |
| 55 | } | 57 | } |
| 56 | 58 | ||
| 59 | /* This only restores external and I/O interrupt state */ | ||
| 57 | static inline notrace void arch_local_irq_restore(unsigned long flags) | 60 | static inline notrace void arch_local_irq_restore(unsigned long flags) |
| 58 | { | 61 | { |
| 59 | __arch_local_irq_ssm(flags); | 62 | /* only disabled->disabled and disabled->enabled is valid */ |
| 63 | if (flags & ARCH_IRQ_ENABLED) | ||
| 64 | arch_local_irq_enable(); | ||
| 60 | } | 65 | } |
| 61 | 66 | ||
| 62 | static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) | 67 | static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) |
| 63 | { | 68 | { |
| 64 | return !(flags & (3UL << (BITS_PER_LONG - 8))); | 69 | return !(flags & ARCH_IRQ_ENABLED); |
| 65 | } | 70 | } |
| 66 | 71 | ||
| 67 | static inline notrace bool arch_irqs_disabled(void) | 72 | static inline notrace bool arch_irqs_disabled(void) |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 6742414dbd6f..8959ebb6d2c9 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
| @@ -546,7 +546,6 @@ struct kvm_vcpu_arch { | |||
| 546 | struct kvm_s390_sie_block *sie_block; | 546 | struct kvm_s390_sie_block *sie_block; |
| 547 | unsigned int host_acrs[NUM_ACRS]; | 547 | unsigned int host_acrs[NUM_ACRS]; |
| 548 | struct fpu host_fpregs; | 548 | struct fpu host_fpregs; |
| 549 | struct fpu guest_fpregs; | ||
| 550 | struct kvm_s390_local_interrupt local_int; | 549 | struct kvm_s390_local_interrupt local_int; |
| 551 | struct hrtimer ckc_timer; | 550 | struct hrtimer ckc_timer; |
| 552 | struct kvm_s390_pgm_info pgm; | 551 | struct kvm_s390_pgm_info pgm; |
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 1a9a98de5bde..69aa18be61af 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h | |||
| @@ -8,10 +8,13 @@ | |||
| 8 | #include <asm/pci_insn.h> | 8 | #include <asm/pci_insn.h> |
| 9 | 9 | ||
| 10 | /* I/O Map */ | 10 | /* I/O Map */ |
| 11 | #define ZPCI_IOMAP_MAX_ENTRIES 0x7fff | 11 | #define ZPCI_IOMAP_SHIFT 48 |
| 12 | #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL | 12 | #define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL |
| 13 | #define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL | 13 | #define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) |
| 14 | #define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL | 14 | #define ZPCI_IOMAP_MAX_ENTRIES \ |
| 15 | ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT)) | ||
| 16 | #define ZPCI_IOMAP_ADDR_IDX_MASK \ | ||
| 17 | (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE) | ||
| 15 | 18 | ||
| 16 | struct zpci_iomap_entry { | 19 | struct zpci_iomap_entry { |
| 17 | u32 fh; | 20 | u32 fh; |
| @@ -21,8 +24,9 @@ struct zpci_iomap_entry { | |||
| 21 | 24 | ||
| 22 | extern struct zpci_iomap_entry *zpci_iomap_start; | 25 | extern struct zpci_iomap_entry *zpci_iomap_start; |
| 23 | 26 | ||
| 27 | #define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT)) | ||
| 24 | #define ZPCI_IDX(addr) \ | 28 | #define ZPCI_IDX(addr) \ |
| 25 | (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48) | 29 | (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT) |
| 26 | #define ZPCI_OFFSET(addr) \ | 30 | #define ZPCI_OFFSET(addr) \ |
| 27 | ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK) | 31 | ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK) |
| 28 | 32 | ||
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index f16debf6a612..1c4fe129486d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
| @@ -166,14 +166,14 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS]; | |||
| 166 | */ | 166 | */ |
| 167 | #define start_thread(regs, new_psw, new_stackp) do { \ | 167 | #define start_thread(regs, new_psw, new_stackp) do { \ |
| 168 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ | 168 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ |
| 169 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 169 | regs->psw.addr = new_psw; \ |
| 170 | regs->gprs[15] = new_stackp; \ | 170 | regs->gprs[15] = new_stackp; \ |
| 171 | execve_tail(); \ | 171 | execve_tail(); \ |
| 172 | } while (0) | 172 | } while (0) |
| 173 | 173 | ||
| 174 | #define start_thread31(regs, new_psw, new_stackp) do { \ | 174 | #define start_thread31(regs, new_psw, new_stackp) do { \ |
| 175 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ | 175 | regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ |
| 176 | regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ | 176 | regs->psw.addr = new_psw; \ |
| 177 | regs->gprs[15] = new_stackp; \ | 177 | regs->gprs[15] = new_stackp; \ |
| 178 | crst_table_downgrade(current->mm, 1UL << 31); \ | 178 | crst_table_downgrade(current->mm, 1UL << 31); \ |
| 179 | execve_tail(); \ | 179 | execve_tail(); \ |
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f00cd35c8ac4..99bc456cc26a 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h | |||
| @@ -149,7 +149,7 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) | |||
| 149 | #define arch_has_block_step() (1) | 149 | #define arch_has_block_step() (1) |
| 150 | 150 | ||
| 151 | #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) | 151 | #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) |
| 152 | #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) | 152 | #define instruction_pointer(regs) ((regs)->psw.addr) |
| 153 | #define user_stack_pointer(regs)((regs)->gprs[15]) | 153 | #define user_stack_pointer(regs)((regs)->gprs[15]) |
| 154 | #define profile_pc(regs) instruction_pointer(regs) | 154 | #define profile_pc(regs) instruction_pointer(regs) |
| 155 | 155 | ||
| @@ -161,7 +161,7 @@ static inline long regs_return_value(struct pt_regs *regs) | |||
| 161 | static inline void instruction_pointer_set(struct pt_regs *regs, | 161 | static inline void instruction_pointer_set(struct pt_regs *regs, |
| 162 | unsigned long val) | 162 | unsigned long val) |
| 163 | { | 163 | { |
| 164 | regs->psw.addr = val | PSW_ADDR_AMODE; | 164 | regs->psw.addr = val; |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | int regs_query_register_offset(const char *name); | 167 | int regs_query_register_offset(const char *name); |
| @@ -171,7 +171,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); | |||
| 171 | 171 | ||
| 172 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | 172 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
| 173 | { | 173 | { |
| 174 | return regs->gprs[15] & PSW_ADDR_INSN; | 174 | return regs->gprs[15]; |
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | #endif /* __ASSEMBLY__ */ | 177 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 34ec202472c6..ab3aa6875a59 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h | |||
| @@ -310,7 +310,8 @@ | |||
| 310 | #define __NR_recvmsg 372 | 310 | #define __NR_recvmsg 372 |
| 311 | #define __NR_shutdown 373 | 311 | #define __NR_shutdown 373 |
| 312 | #define __NR_mlock2 374 | 312 | #define __NR_mlock2 374 |
| 313 | #define NR_syscalls 375 | 313 | #define __NR_copy_file_range 375 |
| 314 | #define NR_syscalls 376 | ||
| 314 | 315 | ||
| 315 | /* | 316 | /* |
| 316 | * There are some system calls that are not present on 64 bit, some | 317 | * There are some system calls that are not present on 64 bit, some |
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index fac4eeddef91..ae2cda5eee5a 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c | |||
| @@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, | |||
| 177 | COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); | 177 | COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); |
| 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); | 178 | COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); |
| 179 | COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); | 179 | COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); |
| 180 | COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); | ||
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index a92b39fd0e63..3986c9f62191 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c | |||
| @@ -59,8 +59,6 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) | |||
| 59 | struct save_area *sa; | 59 | struct save_area *sa; |
| 60 | 60 | ||
| 61 | sa = (void *) memblock_alloc(sizeof(*sa), 8); | 61 | sa = (void *) memblock_alloc(sizeof(*sa), 8); |
| 62 | if (!sa) | ||
| 63 | return NULL; | ||
| 64 | if (is_boot_cpu) | 62 | if (is_boot_cpu) |
| 65 | list_add(&sa->list, &dump_save_areas); | 63 | list_add(&sa->list, &dump_save_areas); |
| 66 | else | 64 | else |
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6fca0e46464e..c890a5589e59 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c | |||
| @@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, | |||
| 1470 | except_str = "*"; | 1470 | except_str = "*"; |
| 1471 | else | 1471 | else |
| 1472 | except_str = "-"; | 1472 | except_str = "-"; |
| 1473 | caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN; | 1473 | caller = (unsigned long) entry->caller; |
| 1474 | rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ", | 1474 | rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ", |
| 1475 | area, (long long)time_spec.tv_sec, | 1475 | area, (long long)time_spec.tv_sec, |
| 1476 | time_spec.tv_nsec / 1000, level, except_str, | 1476 | time_spec.tv_nsec / 1000, level, except_str, |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index dc8e20473484..02bd02ff648b 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c | |||
| @@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) | |||
| 34 | unsigned long addr; | 34 | unsigned long addr; |
| 35 | 35 | ||
| 36 | while (1) { | 36 | while (1) { |
| 37 | sp = sp & PSW_ADDR_INSN; | ||
| 38 | if (sp < low || sp > high - sizeof(*sf)) | 37 | if (sp < low || sp > high - sizeof(*sf)) |
| 39 | return sp; | 38 | return sp; |
| 40 | sf = (struct stack_frame *) sp; | 39 | sf = (struct stack_frame *) sp; |
| 41 | addr = sf->gprs[8] & PSW_ADDR_INSN; | 40 | addr = sf->gprs[8]; |
| 42 | printk("([<%016lx>] %pSR)\n", addr, (void *)addr); | 41 | printk("([<%016lx>] %pSR)\n", addr, (void *)addr); |
| 43 | /* Follow the backchain. */ | 42 | /* Follow the backchain. */ |
| 44 | while (1) { | 43 | while (1) { |
| 45 | low = sp; | 44 | low = sp; |
| 46 | sp = sf->back_chain & PSW_ADDR_INSN; | 45 | sp = sf->back_chain; |
| 47 | if (!sp) | 46 | if (!sp) |
| 48 | break; | 47 | break; |
| 49 | if (sp <= low || sp > high - sizeof(*sf)) | 48 | if (sp <= low || sp > high - sizeof(*sf)) |
| 50 | return sp; | 49 | return sp; |
| 51 | sf = (struct stack_frame *) sp; | 50 | sf = (struct stack_frame *) sp; |
| 52 | addr = sf->gprs[8] & PSW_ADDR_INSN; | 51 | addr = sf->gprs[8]; |
| 53 | printk(" [<%016lx>] %pSR\n", addr, (void *)addr); | 52 | printk(" [<%016lx>] %pSR\n", addr, (void *)addr); |
| 54 | } | 53 | } |
| 55 | /* Zero backchain detected, check for interrupt frame. */ | 54 | /* Zero backchain detected, check for interrupt frame. */ |
| @@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) | |||
| 57 | if (sp <= low || sp > high - sizeof(*regs)) | 56 | if (sp <= low || sp > high - sizeof(*regs)) |
| 58 | return sp; | 57 | return sp; |
| 59 | regs = (struct pt_regs *) sp; | 58 | regs = (struct pt_regs *) sp; |
| 60 | addr = regs->psw.addr & PSW_ADDR_INSN; | 59 | addr = regs->psw.addr; |
| 61 | printk(" [<%016lx>] %pSR\n", addr, (void *)addr); | 60 | printk(" [<%016lx>] %pSR\n", addr, (void *)addr); |
| 62 | low = sp; | 61 | low = sp; |
| 63 | sp = regs->gprs[15]; | 62 | sp = regs->gprs[15]; |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 20a5caf6d981..c55576bbaa1f 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
| @@ -252,14 +252,14 @@ static void early_pgm_check_handler(void) | |||
| 252 | unsigned long addr; | 252 | unsigned long addr; |
| 253 | 253 | ||
| 254 | addr = S390_lowcore.program_old_psw.addr; | 254 | addr = S390_lowcore.program_old_psw.addr; |
| 255 | fixup = search_exception_tables(addr & PSW_ADDR_INSN); | 255 | fixup = search_exception_tables(addr); |
| 256 | if (!fixup) | 256 | if (!fixup) |
| 257 | disabled_wait(0); | 257 | disabled_wait(0); |
| 258 | /* Disable low address protection before storing into lowcore. */ | 258 | /* Disable low address protection before storing into lowcore. */ |
| 259 | __ctl_store(cr0, 0, 0); | 259 | __ctl_store(cr0, 0, 0); |
| 260 | cr0_new = cr0 & ~(1UL << 28); | 260 | cr0_new = cr0 & ~(1UL << 28); |
| 261 | __ctl_load(cr0_new, 0, 0); | 261 | __ctl_load(cr0_new, 0, 0); |
| 262 | S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; | 262 | S390_lowcore.program_old_psw.addr = extable_fixup(fixup); |
| 263 | __ctl_load(cr0, 0, 0); | 263 | __ctl_load(cr0, 0, 0); |
| 264 | } | 264 | } |
| 265 | 265 | ||
| @@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void) | |||
| 268 | psw_t psw; | 268 | psw_t psw; |
| 269 | 269 | ||
| 270 | psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; | 270 | psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; |
| 271 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; | 271 | psw.addr = (unsigned long) s390_base_ext_handler; |
| 272 | S390_lowcore.external_new_psw = psw; | 272 | S390_lowcore.external_new_psw = psw; |
| 273 | psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 273 | psw.addr = (unsigned long) s390_base_pgm_handler; |
| 274 | S390_lowcore.program_new_psw = psw; | 274 | S390_lowcore.program_new_psw = psw; |
| 275 | s390_base_pgm_handler_fn = early_pgm_check_handler; | 275 | s390_base_pgm_handler_fn = early_pgm_check_handler; |
| 276 | } | 276 | } |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index e0eaf11134b4..0f7bfeba6da6 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
| @@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) | |||
| 203 | goto out; | 203 | goto out; |
| 204 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 204 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 205 | goto out; | 205 | goto out; |
| 206 | ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; | 206 | ip -= MCOUNT_INSN_SIZE; |
| 207 | trace.func = ip; | 207 | trace.func = ip; |
| 208 | trace.depth = current->curr_ret_stack + 1; | 208 | trace.depth = current->curr_ret_stack + 1; |
| 209 | /* Only trace if the calling function expects to. */ | 209 | /* Only trace if the calling function expects to. */ |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 0a5a6b661b93..f20abdb5630a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
| @@ -2057,12 +2057,12 @@ void s390_reset_system(void) | |||
| 2057 | /* Set new machine check handler */ | 2057 | /* Set new machine check handler */ |
| 2058 | S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; | 2058 | S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; |
| 2059 | S390_lowcore.mcck_new_psw.addr = | 2059 | S390_lowcore.mcck_new_psw.addr = |
| 2060 | PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; | 2060 | (unsigned long) s390_base_mcck_handler; |
| 2061 | 2061 | ||
| 2062 | /* Set new program check handler */ | 2062 | /* Set new program check handler */ |
| 2063 | S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; | 2063 | S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; |
| 2064 | S390_lowcore.program_new_psw.addr = | 2064 | S390_lowcore.program_new_psw.addr = |
| 2065 | PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; | 2065 | (unsigned long) s390_base_pgm_handler; |
| 2066 | 2066 | ||
| 2067 | /* | 2067 | /* |
| 2068 | * Clear subchannel ID and number to signal new kernel that no CCW or | 2068 | * Clear subchannel ID and number to signal new kernel that no CCW or |
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 389db56a2208..250f5972536a 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
| @@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb, | |||
| 226 | __ctl_load(per_kprobe, 9, 11); | 226 | __ctl_load(per_kprobe, 9, 11); |
| 227 | regs->psw.mask |= PSW_MASK_PER; | 227 | regs->psw.mask |= PSW_MASK_PER; |
| 228 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | 228 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
| 229 | regs->psw.addr = ip | PSW_ADDR_AMODE; | 229 | regs->psw.addr = ip; |
| 230 | } | 230 | } |
| 231 | NOKPROBE_SYMBOL(enable_singlestep); | 231 | NOKPROBE_SYMBOL(enable_singlestep); |
| 232 | 232 | ||
| @@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb, | |||
| 238 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); | 238 | __ctl_load(kcb->kprobe_saved_ctl, 9, 11); |
| 239 | regs->psw.mask &= ~PSW_MASK_PER; | 239 | regs->psw.mask &= ~PSW_MASK_PER; |
| 240 | regs->psw.mask |= kcb->kprobe_saved_imask; | 240 | regs->psw.mask |= kcb->kprobe_saved_imask; |
| 241 | regs->psw.addr = ip | PSW_ADDR_AMODE; | 241 | regs->psw.addr = ip; |
| 242 | } | 242 | } |
| 243 | NOKPROBE_SYMBOL(disable_singlestep); | 243 | NOKPROBE_SYMBOL(disable_singlestep); |
| 244 | 244 | ||
| @@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs) | |||
| 310 | */ | 310 | */ |
| 311 | preempt_disable(); | 311 | preempt_disable(); |
| 312 | kcb = get_kprobe_ctlblk(); | 312 | kcb = get_kprobe_ctlblk(); |
| 313 | p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); | 313 | p = get_kprobe((void *)(regs->psw.addr - 2)); |
| 314 | 314 | ||
| 315 | if (p) { | 315 | if (p) { |
| 316 | if (kprobe_running()) { | 316 | if (kprobe_running()) { |
| @@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 460 | break; | 460 | break; |
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; | 463 | regs->psw.addr = orig_ret_address; |
| 464 | 464 | ||
| 465 | pop_kprobe(get_kprobe_ctlblk()); | 465 | pop_kprobe(get_kprobe_ctlblk()); |
| 466 | kretprobe_hash_unlock(current, &flags); | 466 | kretprobe_hash_unlock(current, &flags); |
| @@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler); | |||
| 490 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 490 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
| 491 | { | 491 | { |
| 492 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 492 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 493 | unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; | 493 | unsigned long ip = regs->psw.addr; |
| 494 | int fixup = probe_get_fixup_type(p->ainsn.insn); | 494 | int fixup = probe_get_fixup_type(p->ainsn.insn); |
| 495 | 495 | ||
| 496 | /* Check if the kprobes location is an enabled ftrace caller */ | 496 | /* Check if the kprobes location is an enabled ftrace caller */ |
| @@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) | |||
| 605 | * In case the user-specified fault handler returned | 605 | * In case the user-specified fault handler returned |
| 606 | * zero, try to fix up. | 606 | * zero, try to fix up. |
| 607 | */ | 607 | */ |
| 608 | entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 608 | entry = search_exception_tables(regs->psw.addr); |
| 609 | if (entry) { | 609 | if (entry) { |
| 610 | regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; | 610 | regs->psw.addr = extable_fixup(entry); |
| 611 | return 1; | 611 | return 1; |
| 612 | } | 612 | } |
| 613 | 613 | ||
| @@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 683 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); | 683 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
| 684 | 684 | ||
| 685 | /* setup return addr to the jprobe handler routine */ | 685 | /* setup return addr to the jprobe handler routine */ |
| 686 | regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; | 686 | regs->psw.addr = (unsigned long) jp->entry; |
| 687 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); | 687 | regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); |
| 688 | 688 | ||
| 689 | /* r15 is the stack pointer */ | 689 | /* r15 is the stack pointer */ |
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 61595c1f0a0f..cfcba2dd9bb5 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c | |||
| @@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs) | |||
| 74 | 74 | ||
| 75 | static unsigned long instruction_pointer_guest(struct pt_regs *regs) | 75 | static unsigned long instruction_pointer_guest(struct pt_regs *regs) |
| 76 | { | 76 | { |
| 77 | return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN; | 77 | return sie_block(regs)->gpsw.addr; |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | unsigned long perf_instruction_pointer(struct pt_regs *regs) | 80 | unsigned long perf_instruction_pointer(struct pt_regs *regs) |
| @@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry, | |||
| 231 | struct pt_regs *regs; | 231 | struct pt_regs *regs; |
| 232 | 232 | ||
| 233 | while (1) { | 233 | while (1) { |
| 234 | sp = sp & PSW_ADDR_INSN; | ||
| 235 | if (sp < low || sp > high - sizeof(*sf)) | 234 | if (sp < low || sp > high - sizeof(*sf)) |
| 236 | return sp; | 235 | return sp; |
| 237 | sf = (struct stack_frame *) sp; | 236 | sf = (struct stack_frame *) sp; |
| 238 | perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); | 237 | perf_callchain_store(entry, sf->gprs[8]); |
| 239 | /* Follow the backchain. */ | 238 | /* Follow the backchain. */ |
| 240 | while (1) { | 239 | while (1) { |
| 241 | low = sp; | 240 | low = sp; |
| 242 | sp = sf->back_chain & PSW_ADDR_INSN; | 241 | sp = sf->back_chain; |
| 243 | if (!sp) | 242 | if (!sp) |
| 244 | break; | 243 | break; |
| 245 | if (sp <= low || sp > high - sizeof(*sf)) | 244 | if (sp <= low || sp > high - sizeof(*sf)) |
| 246 | return sp; | 245 | return sp; |
| 247 | sf = (struct stack_frame *) sp; | 246 | sf = (struct stack_frame *) sp; |
| 248 | perf_callchain_store(entry, | 247 | perf_callchain_store(entry, sf->gprs[8]); |
| 249 | sf->gprs[8] & PSW_ADDR_INSN); | ||
| 250 | } | 248 | } |
| 251 | /* Zero backchain detected, check for interrupt frame. */ | 249 | /* Zero backchain detected, check for interrupt frame. */ |
| 252 | sp = (unsigned long) (sf + 1); | 250 | sp = (unsigned long) (sf + 1); |
| 253 | if (sp <= low || sp > high - sizeof(*regs)) | 251 | if (sp <= low || sp > high - sizeof(*regs)) |
| 254 | return sp; | 252 | return sp; |
| 255 | regs = (struct pt_regs *) sp; | 253 | regs = (struct pt_regs *) sp; |
| 256 | perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); | 254 | perf_callchain_store(entry, sf->gprs[8]); |
| 257 | low = sp; | 255 | low = sp; |
| 258 | sp = regs->gprs[15]; | 256 | sp = regs->gprs[15]; |
| 259 | } | 257 | } |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 114ee8b96f17..2bba7df4ac51 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
| @@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk) | |||
| 56 | return 0; | 56 | return 0; |
| 57 | low = task_stack_page(tsk); | 57 | low = task_stack_page(tsk); |
| 58 | high = (struct stack_frame *) task_pt_regs(tsk); | 58 | high = (struct stack_frame *) task_pt_regs(tsk); |
| 59 | sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); | 59 | sf = (struct stack_frame *) tsk->thread.ksp; |
| 60 | if (sf <= low || sf > high) | 60 | if (sf <= low || sf > high) |
| 61 | return 0; | 61 | return 0; |
| 62 | sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); | 62 | sf = (struct stack_frame *) sf->back_chain; |
| 63 | if (sf <= low || sf > high) | 63 | if (sf <= low || sf > high) |
| 64 | return 0; | 64 | return 0; |
| 65 | return sf->gprs[8]; | 65 | return sf->gprs[8]; |
| @@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, | |||
| 154 | memset(&frame->childregs, 0, sizeof(struct pt_regs)); | 154 | memset(&frame->childregs, 0, sizeof(struct pt_regs)); |
| 155 | frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | | 155 | frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | |
| 156 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | 156 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; |
| 157 | frame->childregs.psw.addr = PSW_ADDR_AMODE | | 157 | frame->childregs.psw.addr = |
| 158 | (unsigned long) kernel_thread_starter; | 158 | (unsigned long) kernel_thread_starter; |
| 159 | frame->childregs.gprs[9] = new_stackp; /* function */ | 159 | frame->childregs.gprs[9] = new_stackp; /* function */ |
| 160 | frame->childregs.gprs[10] = arg; | 160 | frame->childregs.gprs[10] = arg; |
| @@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p) | |||
| 220 | return 0; | 220 | return 0; |
| 221 | low = task_stack_page(p); | 221 | low = task_stack_page(p); |
| 222 | high = (struct stack_frame *) task_pt_regs(p); | 222 | high = (struct stack_frame *) task_pt_regs(p); |
| 223 | sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); | 223 | sf = (struct stack_frame *) p->thread.ksp; |
| 224 | if (sf <= low || sf > high) | 224 | if (sf <= low || sf > high) |
| 225 | return 0; | 225 | return 0; |
| 226 | for (count = 0; count < 16; count++) { | 226 | for (count = 0; count < 16; count++) { |
| 227 | sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); | 227 | sf = (struct stack_frame *) sf->back_chain; |
| 228 | if (sf <= low || sf > high) | 228 | if (sf <= low || sf > high) |
| 229 | return 0; | 229 | return 0; |
| 230 | return_address = sf->gprs[8] & PSW_ADDR_INSN; | 230 | return_address = sf->gprs[8]; |
| 231 | if (!in_sched_functions(return_address)) | 231 | if (!in_sched_functions(return_address)) |
| 232 | return return_address; | 232 | return return_address; |
| 233 | } | 233 | } |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 01c37b36caf9..49b1c13bf6c9 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
| @@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task) | |||
| 84 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) | 84 | if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) |
| 85 | new.control |= PER_EVENT_IFETCH; | 85 | new.control |= PER_EVENT_IFETCH; |
| 86 | new.start = 0; | 86 | new.start = 0; |
| 87 | new.end = PSW_ADDR_INSN; | 87 | new.end = -1UL; |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | /* Take care of the PER enablement bit in the PSW. */ | 90 | /* Take care of the PER enablement bit in the PSW. */ |
| @@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child, | |||
| 148 | else if (addr == (addr_t) &dummy->cr11) | 148 | else if (addr == (addr_t) &dummy->cr11) |
| 149 | /* End address of the active per set. */ | 149 | /* End address of the active per set. */ |
| 150 | return test_thread_flag(TIF_SINGLE_STEP) ? | 150 | return test_thread_flag(TIF_SINGLE_STEP) ? |
| 151 | PSW_ADDR_INSN : child->thread.per_user.end; | 151 | -1UL : child->thread.per_user.end; |
| 152 | else if (addr == (addr_t) &dummy->bits) | 152 | else if (addr == (addr_t) &dummy->bits) |
| 153 | /* Single-step bit. */ | 153 | /* Single-step bit. */ |
| 154 | return test_thread_flag(TIF_SINGLE_STEP) ? | 154 | return test_thread_flag(TIF_SINGLE_STEP) ? |
| @@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request, | |||
| 495 | } | 495 | } |
| 496 | return 0; | 496 | return 0; |
| 497 | default: | 497 | default: |
| 498 | /* Removing high order bit from addr (only for 31 bit). */ | ||
| 499 | addr &= PSW_ADDR_INSN; | ||
| 500 | return ptrace_request(child, request, addr, data); | 498 | return ptrace_request(child, request, addr, data); |
| 501 | } | 499 | } |
| 502 | } | 500 | } |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c6878fbbcf13..9220db5c996a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -301,25 +301,21 @@ static void __init setup_lowcore(void) | |||
| 301 | BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); | 301 | BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); |
| 302 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); | 302 | lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); |
| 303 | lc->restart_psw.mask = PSW_KERNEL_BITS; | 303 | lc->restart_psw.mask = PSW_KERNEL_BITS; |
| 304 | lc->restart_psw.addr = | 304 | lc->restart_psw.addr = (unsigned long) restart_int_handler; |
| 305 | PSW_ADDR_AMODE | (unsigned long) restart_int_handler; | ||
| 306 | lc->external_new_psw.mask = PSW_KERNEL_BITS | | 305 | lc->external_new_psw.mask = PSW_KERNEL_BITS | |
| 307 | PSW_MASK_DAT | PSW_MASK_MCHECK; | 306 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
| 308 | lc->external_new_psw.addr = | 307 | lc->external_new_psw.addr = (unsigned long) ext_int_handler; |
| 309 | PSW_ADDR_AMODE | (unsigned long) ext_int_handler; | ||
| 310 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | | 308 | lc->svc_new_psw.mask = PSW_KERNEL_BITS | |
| 311 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; | 309 | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; |
| 312 | lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; | 310 | lc->svc_new_psw.addr = (unsigned long) system_call; |
| 313 | lc->program_new_psw.mask = PSW_KERNEL_BITS | | 311 | lc->program_new_psw.mask = PSW_KERNEL_BITS | |
| 314 | PSW_MASK_DAT | PSW_MASK_MCHECK; | 312 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
| 315 | lc->program_new_psw.addr = | 313 | lc->program_new_psw.addr = (unsigned long) pgm_check_handler; |
| 316 | PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; | ||
| 317 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; | 314 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; |
| 318 | lc->mcck_new_psw.addr = | 315 | lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; |
| 319 | PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; | ||
| 320 | lc->io_new_psw.mask = PSW_KERNEL_BITS | | 316 | lc->io_new_psw.mask = PSW_KERNEL_BITS | |
| 321 | PSW_MASK_DAT | PSW_MASK_MCHECK; | 317 | PSW_MASK_DAT | PSW_MASK_MCHECK; |
| 322 | lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; | 318 | lc->io_new_psw.addr = (unsigned long) io_int_handler; |
| 323 | lc->clock_comparator = -1ULL; | 319 | lc->clock_comparator = -1ULL; |
| 324 | lc->kernel_stack = ((unsigned long) &init_thread_union) | 320 | lc->kernel_stack = ((unsigned long) &init_thread_union) |
| 325 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); | 321 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 028cc46cb82a..d82562cf0a0e 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c | |||
| @@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
| 331 | /* Set up to return from userspace. If provided, use a stub | 331 | /* Set up to return from userspace. If provided, use a stub |
| 332 | already in userspace. */ | 332 | already in userspace. */ |
| 333 | if (ka->sa.sa_flags & SA_RESTORER) { | 333 | if (ka->sa.sa_flags & SA_RESTORER) { |
| 334 | restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; | 334 | restorer = (unsigned long) ka->sa.sa_restorer; |
| 335 | } else { | 335 | } else { |
| 336 | /* Signal frame without vector registers are short ! */ | 336 | /* Signal frame without vector registers are short ! */ |
| 337 | __u16 __user *svc = (void __user *) frame + frame_size - 2; | 337 | __u16 __user *svc = (void __user *) frame + frame_size - 2; |
| 338 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) | 338 | if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) |
| 339 | return -EFAULT; | 339 | return -EFAULT; |
| 340 | restorer = (unsigned long) svc | PSW_ADDR_AMODE; | 340 | restorer = (unsigned long) svc; |
| 341 | } | 341 | } |
| 342 | 342 | ||
| 343 | /* Set up registers for signal handler */ | 343 | /* Set up registers for signal handler */ |
| @@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, | |||
| 347 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | | 347 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | |
| 348 | (PSW_USER_BITS & PSW_MASK_ASC) | | 348 | (PSW_USER_BITS & PSW_MASK_ASC) | |
| 349 | (regs->psw.mask & ~PSW_MASK_ASC); | 349 | (regs->psw.mask & ~PSW_MASK_ASC); |
| 350 | regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; | 350 | regs->psw.addr = (unsigned long) ka->sa.sa_handler; |
| 351 | 351 | ||
| 352 | regs->gprs[2] = sig; | 352 | regs->gprs[2] = sig; |
| 353 | regs->gprs[3] = (unsigned long) &frame->sc; | 353 | regs->gprs[3] = (unsigned long) &frame->sc; |
| @@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
| 394 | /* Set up to return from userspace. If provided, use a stub | 394 | /* Set up to return from userspace. If provided, use a stub |
| 395 | already in userspace. */ | 395 | already in userspace. */ |
| 396 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { | 396 | if (ksig->ka.sa.sa_flags & SA_RESTORER) { |
| 397 | restorer = (unsigned long) | 397 | restorer = (unsigned long) ksig->ka.sa.sa_restorer; |
| 398 | ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; | ||
| 399 | } else { | 398 | } else { |
| 400 | __u16 __user *svc = &frame->svc_insn; | 399 | __u16 __user *svc = &frame->svc_insn; |
| 401 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc)) | 400 | if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc)) |
| 402 | return -EFAULT; | 401 | return -EFAULT; |
| 403 | restorer = (unsigned long) svc | PSW_ADDR_AMODE; | 402 | restorer = (unsigned long) svc; |
| 404 | } | 403 | } |
| 405 | 404 | ||
| 406 | /* Create siginfo on the signal stack */ | 405 | /* Create siginfo on the signal stack */ |
| @@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, | |||
| 426 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | | 425 | regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | |
| 427 | (PSW_USER_BITS & PSW_MASK_ASC) | | 426 | (PSW_USER_BITS & PSW_MASK_ASC) | |
| 428 | (regs->psw.mask & ~PSW_MASK_ASC); | 427 | (regs->psw.mask & ~PSW_MASK_ASC); |
| 429 | regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE; | 428 | regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler; |
| 430 | 429 | ||
| 431 | regs->gprs[2] = ksig->sig; | 430 | regs->gprs[2] = ksig->sig; |
| 432 | regs->gprs[3] = (unsigned long) &frame->info; | 431 | regs->gprs[3] = (unsigned long) &frame->info; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a13468b9a913..3c65a8eae34d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
| @@ -623,8 +623,6 @@ void __init smp_save_dump_cpus(void) | |||
| 623 | return; | 623 | return; |
| 624 | /* Allocate a page as dumping area for the store status sigps */ | 624 | /* Allocate a page as dumping area for the store status sigps */ |
| 625 | page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); | 625 | page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); |
| 626 | if (!page) | ||
| 627 | panic("could not allocate memory for save area\n"); | ||
| 628 | /* Set multi-threading state to the previous system. */ | 626 | /* Set multi-threading state to the previous system. */ |
| 629 | pcpu_set_smt(sclp.mtid_prev); | 627 | pcpu_set_smt(sclp.mtid_prev); |
| 630 | boot_cpu_addr = stap(); | 628 | boot_cpu_addr = stap(); |
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 1785cd82253c..5acba3cb7220 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c | |||
| @@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace, | |||
| 21 | unsigned long addr; | 21 | unsigned long addr; |
| 22 | 22 | ||
| 23 | while(1) { | 23 | while(1) { |
| 24 | sp &= PSW_ADDR_INSN; | ||
| 25 | if (sp < low || sp > high) | 24 | if (sp < low || sp > high) |
| 26 | return sp; | 25 | return sp; |
| 27 | sf = (struct stack_frame *)sp; | 26 | sf = (struct stack_frame *)sp; |
| 28 | while(1) { | 27 | while(1) { |
| 29 | addr = sf->gprs[8] & PSW_ADDR_INSN; | 28 | addr = sf->gprs[8]; |
| 30 | if (!trace->skip) | 29 | if (!trace->skip) |
| 31 | trace->entries[trace->nr_entries++] = addr; | 30 | trace->entries[trace->nr_entries++] = addr; |
| 32 | else | 31 | else |
| @@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, | |||
| 34 | if (trace->nr_entries >= trace->max_entries) | 33 | if (trace->nr_entries >= trace->max_entries) |
| 35 | return sp; | 34 | return sp; |
| 36 | low = sp; | 35 | low = sp; |
| 37 | sp = sf->back_chain & PSW_ADDR_INSN; | 36 | sp = sf->back_chain; |
| 38 | if (!sp) | 37 | if (!sp) |
| 39 | break; | 38 | break; |
| 40 | if (sp <= low || sp > high - sizeof(*sf)) | 39 | if (sp <= low || sp > high - sizeof(*sf)) |
| @@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, | |||
| 46 | if (sp <= low || sp > high - sizeof(*regs)) | 45 | if (sp <= low || sp > high - sizeof(*regs)) |
| 47 | return sp; | 46 | return sp; |
| 48 | regs = (struct pt_regs *)sp; | 47 | regs = (struct pt_regs *)sp; |
| 49 | addr = regs->psw.addr & PSW_ADDR_INSN; | 48 | addr = regs->psw.addr; |
| 50 | if (savesched || !in_sched_functions(addr)) { | 49 | if (savesched || !in_sched_functions(addr)) { |
| 51 | if (!trace->skip) | 50 | if (!trace->skip) |
| 52 | trace->entries[trace->nr_entries++] = addr; | 51 | trace->entries[trace->nr_entries++] = addr; |
| @@ -65,7 +64,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
| 65 | register unsigned long sp asm ("15"); | 64 | register unsigned long sp asm ("15"); |
| 66 | unsigned long orig_sp, new_sp; | 65 | unsigned long orig_sp, new_sp; |
| 67 | 66 | ||
| 68 | orig_sp = sp & PSW_ADDR_INSN; | 67 | orig_sp = sp; |
| 69 | new_sp = save_context_stack(trace, orig_sp, | 68 | new_sp = save_context_stack(trace, orig_sp, |
| 70 | S390_lowcore.panic_stack - PAGE_SIZE, | 69 | S390_lowcore.panic_stack - PAGE_SIZE, |
| 71 | S390_lowcore.panic_stack, 1); | 70 | S390_lowcore.panic_stack, 1); |
| @@ -86,7 +85,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
| 86 | { | 85 | { |
| 87 | unsigned long sp, low, high; | 86 | unsigned long sp, low, high; |
| 88 | 87 | ||
| 89 | sp = tsk->thread.ksp & PSW_ADDR_INSN; | 88 | sp = tsk->thread.ksp; |
| 90 | low = (unsigned long) task_stack_page(tsk); | 89 | low = (unsigned long) task_stack_page(tsk); |
| 91 | high = (unsigned long) task_pt_regs(tsk); | 90 | high = (unsigned long) task_pt_regs(tsk); |
| 92 | save_context_stack(trace, sp, low, high, 0); | 91 | save_context_stack(trace, sp, low, high, 0); |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 5378c3ea1b98..293d8b98fd52 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
| @@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom) | |||
| 383 | SYSCALL(sys_recvmsg,compat_sys_recvmsg) | 383 | SYSCALL(sys_recvmsg,compat_sys_recvmsg) |
| 384 | SYSCALL(sys_shutdown,sys_shutdown) | 384 | SYSCALL(sys_shutdown,sys_shutdown) |
| 385 | SYSCALL(sys_mlock2,compat_sys_mlock2) | 385 | SYSCALL(sys_mlock2,compat_sys_mlock2) |
| 386 | SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ | ||
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index d69d648759c9..017eb03daee2 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
| @@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) | |||
| 32 | address = *(unsigned long *)(current->thread.trap_tdb + 24); | 32 | address = *(unsigned long *)(current->thread.trap_tdb + 24); |
| 33 | else | 33 | else |
| 34 | address = regs->psw.addr; | 34 | address = regs->psw.addr; |
| 35 | return (void __user *) | 35 | return (void __user *) (address - (regs->int_code >> 16)); |
| 36 | ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); | ||
| 37 | } | 36 | } |
| 38 | 37 | ||
| 39 | static inline void report_user_fault(struct pt_regs *regs, int signr) | 38 | static inline void report_user_fault(struct pt_regs *regs, int signr) |
| @@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr) | |||
| 46 | return; | 45 | return; |
| 47 | printk("User process fault: interruption code %04x ilc:%d ", | 46 | printk("User process fault: interruption code %04x ilc:%d ", |
| 48 | regs->int_code & 0xffff, regs->int_code >> 17); | 47 | regs->int_code & 0xffff, regs->int_code >> 17); |
| 49 | print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); | 48 | print_vma_addr("in ", regs->psw.addr); |
| 50 | printk("\n"); | 49 | printk("\n"); |
| 51 | show_regs(regs); | 50 | show_regs(regs); |
| 52 | } | 51 | } |
| @@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) | |||
| 69 | report_user_fault(regs, si_signo); | 68 | report_user_fault(regs, si_signo); |
| 70 | } else { | 69 | } else { |
| 71 | const struct exception_table_entry *fixup; | 70 | const struct exception_table_entry *fixup; |
| 72 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 71 | fixup = search_exception_tables(regs->psw.addr); |
| 73 | if (fixup) | 72 | if (fixup) |
| 74 | regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; | 73 | regs->psw.addr = extable_fixup(fixup); |
| 75 | else { | 74 | else { |
| 76 | enum bug_trap_type btt; | 75 | enum bug_trap_type btt; |
| 77 | 76 | ||
| 78 | btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); | 77 | btt = report_bug(regs->psw.addr, regs); |
| 79 | if (btt == BUG_TRAP_TYPE_WARN) | 78 | if (btt == BUG_TRAP_TYPE_WARN) |
| 80 | return; | 79 | return; |
| 81 | die(regs, str); | 80 | die(regs, str); |
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 5fce52cf0e57..5ea5af3c7db7 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig | |||
| @@ -29,6 +29,7 @@ config KVM | |||
| 29 | select HAVE_KVM_IRQFD | 29 | select HAVE_KVM_IRQFD |
| 30 | select HAVE_KVM_IRQ_ROUTING | 30 | select HAVE_KVM_IRQ_ROUTING |
| 31 | select SRCU | 31 | select SRCU |
| 32 | select KVM_VFIO | ||
| 32 | ---help--- | 33 | ---help--- |
| 33 | Support hosting paravirtualized guest machines using the SIE | 34 | Support hosting paravirtualized guest machines using the SIE |
| 34 | virtualization capability on the mainframe. This should work | 35 | virtualization capability on the mainframe. This should work |
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index b3b553469650..d42fa38c2429 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | # as published by the Free Software Foundation. | 7 | # as published by the Free Software Foundation. |
| 8 | 8 | ||
| 9 | KVM := ../../../virt/kvm | 9 | KVM := ../../../virt/kvm |
| 10 | common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o | 10 | common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o |
| 11 | 11 | ||
| 12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm | 12 | ccflags-y := -Ivirt/kvm -Iarch/s390/kvm |
| 13 | 13 | ||
diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 47518a324d75..d697312ce9ee 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c | |||
| @@ -116,7 +116,7 @@ static void enable_all_hw_wp(struct kvm_vcpu *vcpu) | |||
| 116 | if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { | 116 | if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { |
| 117 | *cr9 &= ~PER_CONTROL_ALTERATION; | 117 | *cr9 &= ~PER_CONTROL_ALTERATION; |
| 118 | *cr10 = 0; | 118 | *cr10 = 0; |
| 119 | *cr11 = PSW_ADDR_INSN; | 119 | *cr11 = -1UL; |
| 120 | } else { | 120 | } else { |
| 121 | *cr9 &= ~PER_CONTROL_ALTERATION; | 121 | *cr9 &= ~PER_CONTROL_ALTERATION; |
| 122 | *cr9 |= PER_EVENT_STORE; | 122 | *cr9 |= PER_EVENT_STORE; |
| @@ -159,7 +159,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) | |||
| 159 | vcpu->arch.sie_block->gcr[0] &= ~0x800ul; | 159 | vcpu->arch.sie_block->gcr[0] &= ~0x800ul; |
| 160 | vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; | 160 | vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; |
| 161 | vcpu->arch.sie_block->gcr[10] = 0; | 161 | vcpu->arch.sie_block->gcr[10] = 0; |
| 162 | vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN; | 162 | vcpu->arch.sie_block->gcr[11] = -1UL; |
| 163 | } | 163 | } |
| 164 | 164 | ||
| 165 | if (guestdbg_hw_bp_enabled(vcpu)) { | 165 | if (guestdbg_hw_bp_enabled(vcpu)) { |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 835d60bedb54..4af21c771f9b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -1423,44 +1423,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
| 1423 | return 0; | 1423 | return 0; |
| 1424 | } | 1424 | } |
| 1425 | 1425 | ||
| 1426 | /* | ||
| 1427 | * Backs up the current FP/VX register save area on a particular | ||
| 1428 | * destination. Used to switch between different register save | ||
| 1429 | * areas. | ||
| 1430 | */ | ||
| 1431 | static inline void save_fpu_to(struct fpu *dst) | ||
| 1432 | { | ||
| 1433 | dst->fpc = current->thread.fpu.fpc; | ||
| 1434 | dst->regs = current->thread.fpu.regs; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | /* | ||
| 1438 | * Switches the FP/VX register save area from which to lazy | ||
| 1439 | * restore register contents. | ||
| 1440 | */ | ||
| 1441 | static inline void load_fpu_from(struct fpu *from) | ||
| 1442 | { | ||
| 1443 | current->thread.fpu.fpc = from->fpc; | ||
| 1444 | current->thread.fpu.regs = from->regs; | ||
| 1445 | } | ||
| 1446 | |||
| 1447 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 1426 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 1448 | { | 1427 | { |
| 1449 | /* Save host register state */ | 1428 | /* Save host register state */ |
| 1450 | save_fpu_regs(); | 1429 | save_fpu_regs(); |
| 1451 | save_fpu_to(&vcpu->arch.host_fpregs); | 1430 | vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; |
| 1452 | 1431 | vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; | |
| 1453 | if (test_kvm_facility(vcpu->kvm, 129)) { | ||
| 1454 | current->thread.fpu.fpc = vcpu->run->s.regs.fpc; | ||
| 1455 | /* | ||
| 1456 | * Use the register save area in the SIE-control block | ||
| 1457 | * for register restore and save in kvm_arch_vcpu_put() | ||
| 1458 | */ | ||
| 1459 | current->thread.fpu.vxrs = | ||
| 1460 | (__vector128 *)&vcpu->run->s.regs.vrs; | ||
| 1461 | } else | ||
| 1462 | load_fpu_from(&vcpu->arch.guest_fpregs); | ||
| 1463 | 1432 | ||
| 1433 | /* Depending on MACHINE_HAS_VX, data stored to vrs either | ||
| 1434 | * has vector register or floating point register format. | ||
| 1435 | */ | ||
| 1436 | current->thread.fpu.regs = vcpu->run->s.regs.vrs; | ||
| 1437 | current->thread.fpu.fpc = vcpu->run->s.regs.fpc; | ||
| 1464 | if (test_fp_ctl(current->thread.fpu.fpc)) | 1438 | if (test_fp_ctl(current->thread.fpu.fpc)) |
| 1465 | /* User space provided an invalid FPC, let's clear it */ | 1439 | /* User space provided an invalid FPC, let's clear it */ |
| 1466 | current->thread.fpu.fpc = 0; | 1440 | current->thread.fpu.fpc = 0; |
| @@ -1476,19 +1450,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
| 1476 | atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); | 1450 | atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); |
| 1477 | gmap_disable(vcpu->arch.gmap); | 1451 | gmap_disable(vcpu->arch.gmap); |
| 1478 | 1452 | ||
| 1453 | /* Save guest register state */ | ||
| 1479 | save_fpu_regs(); | 1454 | save_fpu_regs(); |
| 1455 | vcpu->run->s.regs.fpc = current->thread.fpu.fpc; | ||
| 1480 | 1456 | ||
| 1481 | if (test_kvm_facility(vcpu->kvm, 129)) | 1457 | /* Restore host register state */ |
| 1482 | /* | 1458 | current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; |
| 1483 | * kvm_arch_vcpu_load() set up the register save area to | 1459 | current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; |
| 1484 | * the &vcpu->run->s.regs.vrs and, thus, the vector registers | ||
| 1485 | * are already saved. Only the floating-point control must be | ||
| 1486 | * copied. | ||
| 1487 | */ | ||
| 1488 | vcpu->run->s.regs.fpc = current->thread.fpu.fpc; | ||
| 1489 | else | ||
| 1490 | save_fpu_to(&vcpu->arch.guest_fpregs); | ||
| 1491 | load_fpu_from(&vcpu->arch.host_fpregs); | ||
| 1492 | 1460 | ||
| 1493 | save_access_regs(vcpu->run->s.regs.acrs); | 1461 | save_access_regs(vcpu->run->s.regs.acrs); |
| 1494 | restore_access_regs(vcpu->arch.host_acrs); | 1462 | restore_access_regs(vcpu->arch.host_acrs); |
| @@ -1506,8 +1474,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
| 1506 | memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); | 1474 | memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); |
| 1507 | vcpu->arch.sie_block->gcr[0] = 0xE0UL; | 1475 | vcpu->arch.sie_block->gcr[0] = 0xE0UL; |
| 1508 | vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; | 1476 | vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; |
| 1509 | vcpu->arch.guest_fpregs.fpc = 0; | 1477 | /* make sure the new fpc will be lazily loaded */ |
| 1510 | asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); | 1478 | save_fpu_regs(); |
| 1479 | current->thread.fpu.fpc = 0; | ||
| 1511 | vcpu->arch.sie_block->gbea = 1; | 1480 | vcpu->arch.sie_block->gbea = 1; |
| 1512 | vcpu->arch.sie_block->pp = 0; | 1481 | vcpu->arch.sie_block->pp = 0; |
| 1513 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 1482 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
| @@ -1648,17 +1617,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, | |||
| 1648 | vcpu->arch.local_int.wq = &vcpu->wq; | 1617 | vcpu->arch.local_int.wq = &vcpu->wq; |
| 1649 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; | 1618 | vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; |
| 1650 | 1619 | ||
| 1651 | /* | ||
| 1652 | * Allocate a save area for floating-point registers. If the vector | ||
| 1653 | * extension is available, register contents are saved in the SIE | ||
| 1654 | * control block. The allocated save area is still required in | ||
| 1655 | * particular places, for example, in kvm_s390_vcpu_store_status(). | ||
| 1656 | */ | ||
| 1657 | vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, | ||
| 1658 | GFP_KERNEL); | ||
| 1659 | if (!vcpu->arch.guest_fpregs.fprs) | ||
| 1660 | goto out_free_sie_block; | ||
| 1661 | |||
| 1662 | rc = kvm_vcpu_init(vcpu, kvm, id); | 1620 | rc = kvm_vcpu_init(vcpu, kvm, id); |
| 1663 | if (rc) | 1621 | if (rc) |
| 1664 | goto out_free_sie_block; | 1622 | goto out_free_sie_block; |
| @@ -1879,19 +1837,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
| 1879 | 1837 | ||
| 1880 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 1838 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 1881 | { | 1839 | { |
| 1840 | /* make sure the new values will be lazily loaded */ | ||
| 1841 | save_fpu_regs(); | ||
| 1882 | if (test_fp_ctl(fpu->fpc)) | 1842 | if (test_fp_ctl(fpu->fpc)) |
| 1883 | return -EINVAL; | 1843 | return -EINVAL; |
| 1884 | memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); | 1844 | current->thread.fpu.fpc = fpu->fpc; |
| 1885 | vcpu->arch.guest_fpregs.fpc = fpu->fpc; | 1845 | if (MACHINE_HAS_VX) |
| 1886 | save_fpu_regs(); | 1846 | convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs); |
| 1887 | load_fpu_from(&vcpu->arch.guest_fpregs); | 1847 | else |
| 1848 | memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs)); | ||
| 1888 | return 0; | 1849 | return 0; |
| 1889 | } | 1850 | } |
| 1890 | 1851 | ||
| 1891 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 1852 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
| 1892 | { | 1853 | { |
| 1893 | memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); | 1854 | /* make sure we have the latest values */ |
| 1894 | fpu->fpc = vcpu->arch.guest_fpregs.fpc; | 1855 | save_fpu_regs(); |
| 1856 | if (MACHINE_HAS_VX) | ||
| 1857 | convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs); | ||
| 1858 | else | ||
| 1859 | memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs)); | ||
| 1860 | fpu->fpc = current->thread.fpu.fpc; | ||
| 1895 | return 0; | 1861 | return 0; |
| 1896 | } | 1862 | } |
| 1897 | 1863 | ||
| @@ -2396,6 +2362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
| 2396 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) | 2362 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) |
| 2397 | { | 2363 | { |
| 2398 | unsigned char archmode = 1; | 2364 | unsigned char archmode = 1; |
| 2365 | freg_t fprs[NUM_FPRS]; | ||
| 2399 | unsigned int px; | 2366 | unsigned int px; |
| 2400 | u64 clkcomp; | 2367 | u64 clkcomp; |
| 2401 | int rc; | 2368 | int rc; |
| @@ -2411,8 +2378,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) | |||
| 2411 | gpa = px; | 2378 | gpa = px; |
| 2412 | } else | 2379 | } else |
| 2413 | gpa -= __LC_FPREGS_SAVE_AREA; | 2380 | gpa -= __LC_FPREGS_SAVE_AREA; |
| 2414 | rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, | 2381 | |
| 2415 | vcpu->arch.guest_fpregs.fprs, 128); | 2382 | /* manually convert vector registers if necessary */ |
| 2383 | if (MACHINE_HAS_VX) { | ||
| 2384 | convert_vx_to_fp(fprs, current->thread.fpu.vxrs); | ||
| 2385 | rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, | ||
| 2386 | fprs, 128); | ||
| 2387 | } else { | ||
| 2388 | rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, | ||
| 2389 | vcpu->run->s.regs.vrs, 128); | ||
| 2390 | } | ||
| 2416 | rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, | 2391 | rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, |
| 2417 | vcpu->run->s.regs.gprs, 128); | 2392 | vcpu->run->s.regs.gprs, 128); |
| 2418 | rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, | 2393 | rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, |
| @@ -2420,7 +2395,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) | |||
| 2420 | rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, | 2395 | rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, |
| 2421 | &px, 4); | 2396 | &px, 4); |
| 2422 | rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, | 2397 | rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, |
| 2423 | &vcpu->arch.guest_fpregs.fpc, 4); | 2398 | &vcpu->run->s.regs.fpc, 4); |
| 2424 | rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, | 2399 | rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, |
| 2425 | &vcpu->arch.sie_block->todpr, 4); | 2400 | &vcpu->arch.sie_block->todpr, 4); |
| 2426 | rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, | 2401 | rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, |
| @@ -2443,19 +2418,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
| 2443 | * it into the save area | 2418 | * it into the save area |
| 2444 | */ | 2419 | */ |
| 2445 | save_fpu_regs(); | 2420 | save_fpu_regs(); |
| 2446 | if (test_kvm_facility(vcpu->kvm, 129)) { | 2421 | vcpu->run->s.regs.fpc = current->thread.fpu.fpc; |
| 2447 | /* | ||
| 2448 | * If the vector extension is available, the vector registers | ||
| 2449 | * which overlaps with floating-point registers are saved in | ||
| 2450 | * the SIE-control block. Hence, extract the floating-point | ||
| 2451 | * registers and the FPC value and store them in the | ||
| 2452 | * guest_fpregs structure. | ||
| 2453 | */ | ||
| 2454 | vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; | ||
| 2455 | convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, | ||
| 2456 | current->thread.fpu.vxrs); | ||
| 2457 | } else | ||
| 2458 | save_fpu_to(&vcpu->arch.guest_fpregs); | ||
| 2459 | save_access_regs(vcpu->run->s.regs.acrs); | 2422 | save_access_regs(vcpu->run->s.regs.acrs); |
| 2460 | 2423 | ||
| 2461 | return kvm_s390_store_status_unloaded(vcpu, addr); | 2424 | return kvm_s390_store_status_unloaded(vcpu, addr); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1b903f6ad54a..791a4146052c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
| @@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) | |||
| 228 | return; | 228 | return; |
| 229 | printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", | 229 | printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", |
| 230 | regs->int_code & 0xffff, regs->int_code >> 17); | 230 | regs->int_code & 0xffff, regs->int_code >> 17); |
| 231 | print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); | 231 | print_vma_addr(KERN_CONT "in ", regs->psw.addr); |
| 232 | printk(KERN_CONT "\n"); | 232 | printk(KERN_CONT "\n"); |
| 233 | printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", | 233 | printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", |
| 234 | regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); | 234 | regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); |
| @@ -256,9 +256,9 @@ static noinline void do_no_context(struct pt_regs *regs) | |||
| 256 | const struct exception_table_entry *fixup; | 256 | const struct exception_table_entry *fixup; |
| 257 | 257 | ||
| 258 | /* Are we prepared to handle this kernel fault? */ | 258 | /* Are we prepared to handle this kernel fault? */ |
| 259 | fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); | 259 | fixup = search_exception_tables(regs->psw.addr); |
| 260 | if (fixup) { | 260 | if (fixup) { |
| 261 | regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; | 261 | regs->psw.addr = extable_fixup(fixup); |
| 262 | return; | 262 | return; |
| 263 | } | 263 | } |
| 264 | 264 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c722400c7697..73e290337092 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -98,7 +98,7 @@ void __init paging_init(void) | |||
| 98 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); | 98 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 99 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); | 99 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 100 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 100 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
| 101 | arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); | 101 | __arch_local_irq_stosm(0x04); |
| 102 | 102 | ||
| 103 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 103 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
| 104 | sparse_init(); | 104 | sparse_init(); |
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index ea01477b4aa6..45c4daa49930 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c | |||
| @@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
| 169 | 169 | ||
| 170 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) | 170 | int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) |
| 171 | { | 171 | { |
| 172 | if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) | 172 | if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE) |
| 173 | return 0; | 173 | return 0; |
| 174 | if (!(flags & MAP_FIXED)) | 174 | if (!(flags & MAP_FIXED)) |
| 175 | addr = 0; | 175 | addr = 0; |
| 176 | if ((addr + len) >= TASK_SIZE) | 176 | if ((addr + len) >= TASK_SIZE) |
| 177 | return crst_table_upgrade(current->mm, 1UL << 53); | 177 | return crst_table_upgrade(current->mm, TASK_MAX_SIZE); |
| 178 | return 0; | 178 | return 0; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| @@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, | |||
| 189 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 189 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
| 190 | if (!(area & ~PAGE_MASK)) | 190 | if (!(area & ~PAGE_MASK)) |
| 191 | return area; | 191 | return area; |
| 192 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { | 192 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { |
| 193 | /* Upgrade the page table to 4 levels and retry. */ | 193 | /* Upgrade the page table to 4 levels and retry. */ |
| 194 | rc = crst_table_upgrade(mm, 1UL << 53); | 194 | rc = crst_table_upgrade(mm, TASK_MAX_SIZE); |
| 195 | if (rc) | 195 | if (rc) |
| 196 | return (unsigned long) rc; | 196 | return (unsigned long) rc; |
| 197 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); | 197 | area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); |
| @@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, | |||
| 211 | area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); | 211 | area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); |
| 212 | if (!(area & ~PAGE_MASK)) | 212 | if (!(area & ~PAGE_MASK)) |
| 213 | return area; | 213 | return area; |
| 214 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { | 214 | if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { |
| 215 | /* Upgrade the page table to 4 levels and retry. */ | 215 | /* Upgrade the page table to 4 levels and retry. */ |
| 216 | rc = crst_table_upgrade(mm, 1UL << 53); | 216 | rc = crst_table_upgrade(mm, TASK_MAX_SIZE); |
| 217 | if (rc) | 217 | if (rc) |
| 218 | return (unsigned long) rc; | 218 | return (unsigned long) rc; |
| 219 | area = arch_get_unmapped_area_topdown(filp, addr, len, | 219 | area = arch_get_unmapped_area_topdown(filp, addr, len, |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index a809fa8e6f8b..5109827883ac 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
| @@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) | |||
| 55 | unsigned long entry; | 55 | unsigned long entry; |
| 56 | int flush; | 56 | int flush; |
| 57 | 57 | ||
| 58 | BUG_ON(limit > (1UL << 53)); | 58 | BUG_ON(limit > TASK_MAX_SIZE); |
| 59 | flush = 0; | 59 | flush = 0; |
| 60 | repeat: | 60 | repeat: |
| 61 | table = crst_table_alloc(mm); | 61 | table = crst_table_alloc(mm); |
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 43f32ce60aa3..2794845061c6 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c | |||
| @@ -57,9 +57,7 @@ static __init pg_data_t *alloc_node_data(void) | |||
| 57 | { | 57 | { |
| 58 | pg_data_t *res; | 58 | pg_data_t *res; |
| 59 | 59 | ||
| 60 | res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1); | 60 | res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8); |
| 61 | if (!res) | ||
| 62 | panic("Could not allocate memory for node data!\n"); | ||
| 63 | memset(res, 0, sizeof(pg_data_t)); | 61 | memset(res, 0, sizeof(pg_data_t)); |
| 64 | return res; | 62 | return res; |
| 65 | } | 63 | } |
| @@ -162,7 +160,7 @@ static int __init numa_init_late(void) | |||
| 162 | register_one_node(nid); | 160 | register_one_node(nid); |
| 163 | return 0; | 161 | return 0; |
| 164 | } | 162 | } |
| 165 | device_initcall(numa_init_late); | 163 | arch_initcall(numa_init_late); |
| 166 | 164 | ||
| 167 | static int __init parse_debug(char *parm) | 165 | static int __init parse_debug(char *parm) |
| 168 | { | 166 | { |
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c index 8a6811b2cdb9..fe0bfe370c45 100644 --- a/arch/s390/oprofile/backtrace.c +++ b/arch/s390/oprofile/backtrace.c | |||
| @@ -16,24 +16,23 @@ __show_trace(unsigned int *depth, unsigned long sp, | |||
| 16 | struct pt_regs *regs; | 16 | struct pt_regs *regs; |
| 17 | 17 | ||
| 18 | while (*depth) { | 18 | while (*depth) { |
| 19 | sp = sp & PSW_ADDR_INSN; | ||
| 20 | if (sp < low || sp > high - sizeof(*sf)) | 19 | if (sp < low || sp > high - sizeof(*sf)) |
| 21 | return sp; | 20 | return sp; |
| 22 | sf = (struct stack_frame *) sp; | 21 | sf = (struct stack_frame *) sp; |
| 23 | (*depth)--; | 22 | (*depth)--; |
| 24 | oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); | 23 | oprofile_add_trace(sf->gprs[8]); |
| 25 | 24 | ||
| 26 | /* Follow the backchain. */ | 25 | /* Follow the backchain. */ |
| 27 | while (*depth) { | 26 | while (*depth) { |
| 28 | low = sp; | 27 | low = sp; |
| 29 | sp = sf->back_chain & PSW_ADDR_INSN; | 28 | sp = sf->back_chain; |
| 30 | if (!sp) | 29 | if (!sp) |
| 31 | break; | 30 | break; |
| 32 | if (sp <= low || sp > high - sizeof(*sf)) | 31 | if (sp <= low || sp > high - sizeof(*sf)) |
| 33 | return sp; | 32 | return sp; |
| 34 | sf = (struct stack_frame *) sp; | 33 | sf = (struct stack_frame *) sp; |
| 35 | (*depth)--; | 34 | (*depth)--; |
| 36 | oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); | 35 | oprofile_add_trace(sf->gprs[8]); |
| 37 | 36 | ||
| 38 | } | 37 | } |
| 39 | 38 | ||
| @@ -46,7 +45,7 @@ __show_trace(unsigned int *depth, unsigned long sp, | |||
| 46 | return sp; | 45 | return sp; |
| 47 | regs = (struct pt_regs *) sp; | 46 | regs = (struct pt_regs *) sp; |
| 48 | (*depth)--; | 47 | (*depth)--; |
| 49 | oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); | 48 | oprofile_add_trace(sf->gprs[8]); |
| 50 | low = sp; | 49 | low = sp; |
| 51 | sp = regs->gprs[15]; | 50 | sp = regs->gprs[15]; |
| 52 | } | 51 | } |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 11d4f277e9f6..8f19c8f9d660 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
| @@ -68,9 +68,12 @@ static struct airq_struct zpci_airq = { | |||
| 68 | .isc = PCI_ISC, | 68 | .isc = PCI_ISC, |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | /* I/O Map */ | 71 | #define ZPCI_IOMAP_ENTRIES \ |
| 72 | min(((unsigned long) CONFIG_PCI_NR_FUNCTIONS * PCI_BAR_COUNT), \ | ||
| 73 | ZPCI_IOMAP_MAX_ENTRIES) | ||
| 74 | |||
| 72 | static DEFINE_SPINLOCK(zpci_iomap_lock); | 75 | static DEFINE_SPINLOCK(zpci_iomap_lock); |
| 73 | static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | 76 | static unsigned long *zpci_iomap_bitmap; |
| 74 | struct zpci_iomap_entry *zpci_iomap_start; | 77 | struct zpci_iomap_entry *zpci_iomap_start; |
| 75 | EXPORT_SYMBOL_GPL(zpci_iomap_start); | 78 | EXPORT_SYMBOL_GPL(zpci_iomap_start); |
| 76 | 79 | ||
| @@ -265,27 +268,20 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, | |||
| 265 | unsigned long max) | 268 | unsigned long max) |
| 266 | { | 269 | { |
| 267 | struct zpci_dev *zdev = to_zpci(pdev); | 270 | struct zpci_dev *zdev = to_zpci(pdev); |
| 268 | u64 addr; | ||
| 269 | int idx; | 271 | int idx; |
| 270 | 272 | ||
| 271 | if ((bar & 7) != bar) | 273 | if (!pci_resource_len(pdev, bar)) |
| 272 | return NULL; | 274 | return NULL; |
| 273 | 275 | ||
| 274 | idx = zdev->bars[bar].map_idx; | 276 | idx = zdev->bars[bar].map_idx; |
| 275 | spin_lock(&zpci_iomap_lock); | 277 | spin_lock(&zpci_iomap_lock); |
| 276 | if (zpci_iomap_start[idx].count++) { | ||
| 277 | BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || | ||
| 278 | zpci_iomap_start[idx].bar != bar); | ||
| 279 | } else { | ||
| 280 | zpci_iomap_start[idx].fh = zdev->fh; | ||
| 281 | zpci_iomap_start[idx].bar = bar; | ||
| 282 | } | ||
| 283 | /* Detect overrun */ | 278 | /* Detect overrun */ |
| 284 | BUG_ON(!zpci_iomap_start[idx].count); | 279 | WARN_ON(!++zpci_iomap_start[idx].count); |
| 280 | zpci_iomap_start[idx].fh = zdev->fh; | ||
| 281 | zpci_iomap_start[idx].bar = bar; | ||
| 285 | spin_unlock(&zpci_iomap_lock); | 282 | spin_unlock(&zpci_iomap_lock); |
| 286 | 283 | ||
| 287 | addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); | 284 | return (void __iomem *) ZPCI_ADDR(idx) + offset; |
| 288 | return (void __iomem *) addr + offset; | ||
| 289 | } | 285 | } |
| 290 | EXPORT_SYMBOL(pci_iomap_range); | 286 | EXPORT_SYMBOL(pci_iomap_range); |
| 291 | 287 | ||
| @@ -297,12 +293,11 @@ EXPORT_SYMBOL(pci_iomap); | |||
| 297 | 293 | ||
| 298 | void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) | 294 | void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) |
| 299 | { | 295 | { |
| 300 | unsigned int idx; | 296 | unsigned int idx = ZPCI_IDX(addr); |
| 301 | 297 | ||
| 302 | idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; | ||
| 303 | spin_lock(&zpci_iomap_lock); | 298 | spin_lock(&zpci_iomap_lock); |
| 304 | /* Detect underrun */ | 299 | /* Detect underrun */ |
| 305 | BUG_ON(!zpci_iomap_start[idx].count); | 300 | WARN_ON(!zpci_iomap_start[idx].count); |
| 306 | if (!--zpci_iomap_start[idx].count) { | 301 | if (!--zpci_iomap_start[idx].count) { |
| 307 | zpci_iomap_start[idx].fh = 0; | 302 | zpci_iomap_start[idx].fh = 0; |
| 308 | zpci_iomap_start[idx].bar = 0; | 303 | zpci_iomap_start[idx].bar = 0; |
| @@ -544,15 +539,15 @@ static void zpci_irq_exit(void) | |||
| 544 | 539 | ||
| 545 | static int zpci_alloc_iomap(struct zpci_dev *zdev) | 540 | static int zpci_alloc_iomap(struct zpci_dev *zdev) |
| 546 | { | 541 | { |
| 547 | int entry; | 542 | unsigned long entry; |
| 548 | 543 | ||
| 549 | spin_lock(&zpci_iomap_lock); | 544 | spin_lock(&zpci_iomap_lock); |
| 550 | entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | 545 | entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES); |
| 551 | if (entry == ZPCI_IOMAP_MAX_ENTRIES) { | 546 | if (entry == ZPCI_IOMAP_ENTRIES) { |
| 552 | spin_unlock(&zpci_iomap_lock); | 547 | spin_unlock(&zpci_iomap_lock); |
| 553 | return -ENOSPC; | 548 | return -ENOSPC; |
| 554 | } | 549 | } |
| 555 | set_bit(entry, zpci_iomap); | 550 | set_bit(entry, zpci_iomap_bitmap); |
| 556 | spin_unlock(&zpci_iomap_lock); | 551 | spin_unlock(&zpci_iomap_lock); |
| 557 | return entry; | 552 | return entry; |
| 558 | } | 553 | } |
| @@ -561,7 +556,7 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) | |||
| 561 | { | 556 | { |
| 562 | spin_lock(&zpci_iomap_lock); | 557 | spin_lock(&zpci_iomap_lock); |
| 563 | memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); | 558 | memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); |
| 564 | clear_bit(entry, zpci_iomap); | 559 | clear_bit(entry, zpci_iomap_bitmap); |
| 565 | spin_unlock(&zpci_iomap_lock); | 560 | spin_unlock(&zpci_iomap_lock); |
| 566 | } | 561 | } |
| 567 | 562 | ||
| @@ -611,8 +606,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev, | |||
| 611 | if (zdev->bars[i].val & 4) | 606 | if (zdev->bars[i].val & 4) |
| 612 | flags |= IORESOURCE_MEM_64; | 607 | flags |= IORESOURCE_MEM_64; |
| 613 | 608 | ||
| 614 | addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); | 609 | addr = ZPCI_ADDR(entry); |
| 615 | |||
| 616 | size = 1UL << zdev->bars[i].size; | 610 | size = 1UL << zdev->bars[i].size; |
| 617 | 611 | ||
| 618 | res = __alloc_res(zdev, addr, size, flags); | 612 | res = __alloc_res(zdev, addr, size, flags); |
| @@ -873,23 +867,30 @@ static int zpci_mem_init(void) | |||
| 873 | zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), | 867 | zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), |
| 874 | 16, 0, NULL); | 868 | 16, 0, NULL); |
| 875 | if (!zdev_fmb_cache) | 869 | if (!zdev_fmb_cache) |
| 876 | goto error_zdev; | 870 | goto error_fmb; |
| 877 | 871 | ||
| 878 | /* TODO: use realloc */ | 872 | zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES, |
| 879 | zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), | 873 | sizeof(*zpci_iomap_start), GFP_KERNEL); |
| 880 | GFP_KERNEL); | ||
| 881 | if (!zpci_iomap_start) | 874 | if (!zpci_iomap_start) |
| 882 | goto error_iomap; | 875 | goto error_iomap; |
| 883 | return 0; | ||
| 884 | 876 | ||
| 877 | zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES), | ||
| 878 | sizeof(*zpci_iomap_bitmap), GFP_KERNEL); | ||
| 879 | if (!zpci_iomap_bitmap) | ||
| 880 | goto error_iomap_bitmap; | ||
| 881 | |||
| 882 | return 0; | ||
| 883 | error_iomap_bitmap: | ||
| 884 | kfree(zpci_iomap_start); | ||
| 885 | error_iomap: | 885 | error_iomap: |
| 886 | kmem_cache_destroy(zdev_fmb_cache); | 886 | kmem_cache_destroy(zdev_fmb_cache); |
| 887 | error_zdev: | 887 | error_fmb: |
| 888 | return -ENOMEM; | 888 | return -ENOMEM; |
| 889 | } | 889 | } |
| 890 | 890 | ||
| 891 | static void zpci_mem_exit(void) | 891 | static void zpci_mem_exit(void) |
| 892 | { | 892 | { |
| 893 | kfree(zpci_iomap_bitmap); | ||
| 893 | kfree(zpci_iomap_start); | 894 | kfree(zpci_iomap_start); |
| 894 | kmem_cache_destroy(zdev_fmb_cache); | 895 | kmem_cache_destroy(zdev_fmb_cache); |
| 895 | } | 896 | } |
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 369a3e05d468..b0e04751c5d5 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
| @@ -53,6 +53,11 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) | |||
| 53 | 53 | ||
| 54 | pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", | 54 | pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", |
| 55 | pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); | 55 | pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); |
| 56 | |||
| 57 | if (!pdev) | ||
| 58 | return; | ||
| 59 | |||
| 60 | pdev->error_state = pci_channel_io_perm_failure; | ||
| 56 | } | 61 | } |
| 57 | 62 | ||
| 58 | void zpci_event_error(void *data) | 63 | void zpci_event_error(void *data) |
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index f887c6465a82..8a84e05adb2e 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h | |||
| @@ -33,7 +33,6 @@ | |||
| 33 | #endif | 33 | #endif |
| 34 | 34 | ||
| 35 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) | 35 | #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) |
| 36 | #define smp_store_mb(var, value) __smp_store_mb(var, value) | ||
| 37 | 36 | ||
| 38 | #include <asm-generic/barrier.h> | 37 | #include <asm-generic/barrier.h> |
| 39 | 38 | ||
diff --git a/block/blk-merge.c b/block/blk-merge.c index 1699df5b0493..888a7fec81f7 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -70,6 +70,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q, | |||
| 70 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | 70 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static inline unsigned get_max_io_size(struct request_queue *q, | ||
| 74 | struct bio *bio) | ||
| 75 | { | ||
| 76 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | ||
| 77 | unsigned mask = queue_logical_block_size(q) - 1; | ||
| 78 | |||
| 79 | /* aligned to logical block size */ | ||
| 80 | sectors &= ~(mask >> 9); | ||
| 81 | |||
| 82 | return sectors; | ||
| 83 | } | ||
| 84 | |||
| 73 | static struct bio *blk_bio_segment_split(struct request_queue *q, | 85 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
| 74 | struct bio *bio, | 86 | struct bio *bio, |
| 75 | struct bio_set *bs, | 87 | struct bio_set *bs, |
| @@ -81,6 +93,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 81 | unsigned front_seg_size = bio->bi_seg_front_size; | 93 | unsigned front_seg_size = bio->bi_seg_front_size; |
| 82 | bool do_split = true; | 94 | bool do_split = true; |
| 83 | struct bio *new = NULL; | 95 | struct bio *new = NULL; |
| 96 | const unsigned max_sectors = get_max_io_size(q, bio); | ||
| 84 | 97 | ||
| 85 | bio_for_each_segment(bv, bio, iter) { | 98 | bio_for_each_segment(bv, bio, iter) { |
| 86 | /* | 99 | /* |
| @@ -90,20 +103,19 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
| 90 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) | 103 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
| 91 | goto split; | 104 | goto split; |
| 92 | 105 | ||
| 93 | if (sectors + (bv.bv_len >> 9) > | 106 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
| 94 | blk_max_size_offset(q, bio->bi_iter.bi_sector)) { | ||
| 95 | /* | 107 | /* |
| 96 | * Consider this a new segment if we're splitting in | 108 | * Consider this a new segment if we're splitting in |
| 97 | * the middle of this vector. | 109 | * the middle of this vector. |
| 98 | */ | 110 | */ |
| 99 | if (nsegs < queue_max_segments(q) && | 111 | if (nsegs < queue_max_segments(q) && |
| 100 | sectors < blk_max_size_offset(q, | 112 | sectors < max_sectors) { |
| 101 | bio->bi_iter.bi_sector)) { | ||
| 102 | nsegs++; | 113 | nsegs++; |
| 103 | sectors = blk_max_size_offset(q, | 114 | sectors = max_sectors; |
| 104 | bio->bi_iter.bi_sector); | ||
| 105 | } | 115 | } |
| 106 | goto split; | 116 | if (sectors) |
| 117 | goto split; | ||
| 118 | /* Make this single bvec as the 1st segment */ | ||
| 107 | } | 119 | } |
| 108 | 120 | ||
| 109 | if (bvprvp && blk_queue_cluster(q)) { | 121 | if (bvprvp && blk_queue_cluster(q)) { |
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 90e2d54be526..1316ddd92fac 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c | |||
| @@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = { | |||
| 135 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), | 135 | DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), |
| 136 | }, | 136 | }, |
| 137 | }, | 137 | }, |
| 138 | { | ||
| 139 | .callback = video_detect_force_vendor, | ||
| 140 | .ident = "Dell Inspiron 5737", | ||
| 141 | .matches = { | ||
| 142 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 143 | DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"), | ||
| 144 | }, | ||
| 145 | }, | ||
| 146 | 138 | ||
| 147 | /* | 139 | /* |
| 148 | * These models have a working acpi_video backlight control, and using | 140 | * These models have a working acpi_video backlight control, and using |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 6ac9a7f33b64..784dbe897a5e 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) | |||
| 162 | 162 | ||
| 163 | /** | 163 | /** |
| 164 | * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). | 164 | * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff(). |
| 165 | * @genpd: PM domait to power off. | 165 | * @genpd: PM domain to power off. |
| 166 | * | 166 | * |
| 167 | * Queue up the execution of genpd_poweroff() unless it's already been done | 167 | * Queue up the execution of genpd_poweroff() unless it's already been done |
| 168 | * before. | 168 | * before. |
| @@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
| 172 | queue_work(pm_wq, &genpd->power_off_work); | 172 | queue_work(pm_wq, &genpd->power_off_work); |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static int genpd_poweron(struct generic_pm_domain *genpd); | ||
| 176 | |||
| 177 | /** | 175 | /** |
| 178 | * __genpd_poweron - Restore power to a given PM domain and its masters. | 176 | * __genpd_poweron - Restore power to a given PM domain and its masters. |
| 179 | * @genpd: PM domain to power up. | 177 | * @genpd: PM domain to power up. |
| 178 | * @depth: nesting count for lockdep. | ||
| 180 | * | 179 | * |
| 181 | * Restore power to @genpd and all of its masters so that it is possible to | 180 | * Restore power to @genpd and all of its masters so that it is possible to |
| 182 | * resume a device belonging to it. | 181 | * resume a device belonging to it. |
| 183 | */ | 182 | */ |
| 184 | static int __genpd_poweron(struct generic_pm_domain *genpd) | 183 | static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) |
| 185 | { | 184 | { |
| 186 | struct gpd_link *link; | 185 | struct gpd_link *link; |
| 187 | int ret = 0; | 186 | int ret = 0; |
| @@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd) | |||
| 196 | * with it. | 195 | * with it. |
| 197 | */ | 196 | */ |
| 198 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | 197 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
| 199 | genpd_sd_counter_inc(link->master); | 198 | struct generic_pm_domain *master = link->master; |
| 199 | |||
| 200 | genpd_sd_counter_inc(master); | ||
| 201 | |||
| 202 | mutex_lock_nested(&master->lock, depth + 1); | ||
| 203 | ret = __genpd_poweron(master, depth + 1); | ||
| 204 | mutex_unlock(&master->lock); | ||
| 200 | 205 | ||
| 201 | ret = genpd_poweron(link->master); | ||
| 202 | if (ret) { | 206 | if (ret) { |
| 203 | genpd_sd_counter_dec(link->master); | 207 | genpd_sd_counter_dec(master); |
| 204 | goto err; | 208 | goto err; |
| 205 | } | 209 | } |
| 206 | } | 210 | } |
| @@ -232,11 +236,12 @@ static int genpd_poweron(struct generic_pm_domain *genpd) | |||
| 232 | int ret; | 236 | int ret; |
| 233 | 237 | ||
| 234 | mutex_lock(&genpd->lock); | 238 | mutex_lock(&genpd->lock); |
| 235 | ret = __genpd_poweron(genpd); | 239 | ret = __genpd_poweron(genpd, 0); |
| 236 | mutex_unlock(&genpd->lock); | 240 | mutex_unlock(&genpd->lock); |
| 237 | return ret; | 241 | return ret; |
| 238 | } | 242 | } |
| 239 | 243 | ||
| 244 | |||
| 240 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) | 245 | static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) |
| 241 | { | 246 | { |
| 242 | return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); | 247 | return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); |
| @@ -484,7 +489,7 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 484 | } | 489 | } |
| 485 | 490 | ||
| 486 | mutex_lock(&genpd->lock); | 491 | mutex_lock(&genpd->lock); |
| 487 | ret = __genpd_poweron(genpd); | 492 | ret = __genpd_poweron(genpd, 0); |
| 488 | mutex_unlock(&genpd->lock); | 493 | mutex_unlock(&genpd->lock); |
| 489 | 494 | ||
| 490 | if (ret) | 495 | if (ret) |
| @@ -1339,8 +1344,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
| 1339 | if (!link) | 1344 | if (!link) |
| 1340 | return -ENOMEM; | 1345 | return -ENOMEM; |
| 1341 | 1346 | ||
| 1342 | mutex_lock(&genpd->lock); | 1347 | mutex_lock(&subdomain->lock); |
| 1343 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1348 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); |
| 1344 | 1349 | ||
| 1345 | if (genpd->status == GPD_STATE_POWER_OFF | 1350 | if (genpd->status == GPD_STATE_POWER_OFF |
| 1346 | && subdomain->status != GPD_STATE_POWER_OFF) { | 1351 | && subdomain->status != GPD_STATE_POWER_OFF) { |
| @@ -1363,8 +1368,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
| 1363 | genpd_sd_counter_inc(genpd); | 1368 | genpd_sd_counter_inc(genpd); |
| 1364 | 1369 | ||
| 1365 | out: | 1370 | out: |
| 1366 | mutex_unlock(&subdomain->lock); | ||
| 1367 | mutex_unlock(&genpd->lock); | 1371 | mutex_unlock(&genpd->lock); |
| 1372 | mutex_unlock(&subdomain->lock); | ||
| 1368 | if (ret) | 1373 | if (ret) |
| 1369 | kfree(link); | 1374 | kfree(link); |
| 1370 | return ret; | 1375 | return ret; |
| @@ -1385,7 +1390,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1385 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) | 1390 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
| 1386 | return -EINVAL; | 1391 | return -EINVAL; |
| 1387 | 1392 | ||
| 1388 | mutex_lock(&genpd->lock); | 1393 | mutex_lock(&subdomain->lock); |
| 1394 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
| 1389 | 1395 | ||
| 1390 | if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { | 1396 | if (!list_empty(&subdomain->slave_links) || subdomain->device_count) { |
| 1391 | pr_warn("%s: unable to remove subdomain %s\n", genpd->name, | 1397 | pr_warn("%s: unable to remove subdomain %s\n", genpd->name, |
| @@ -1398,22 +1404,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
| 1398 | if (link->slave != subdomain) | 1404 | if (link->slave != subdomain) |
| 1399 | continue; | 1405 | continue; |
| 1400 | 1406 | ||
| 1401 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | ||
| 1402 | |||
| 1403 | list_del(&link->master_node); | 1407 | list_del(&link->master_node); |
| 1404 | list_del(&link->slave_node); | 1408 | list_del(&link->slave_node); |
| 1405 | kfree(link); | 1409 | kfree(link); |
| 1406 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1410 | if (subdomain->status != GPD_STATE_POWER_OFF) |
| 1407 | genpd_sd_counter_dec(genpd); | 1411 | genpd_sd_counter_dec(genpd); |
| 1408 | 1412 | ||
| 1409 | mutex_unlock(&subdomain->lock); | ||
| 1410 | |||
| 1411 | ret = 0; | 1413 | ret = 0; |
| 1412 | break; | 1414 | break; |
| 1413 | } | 1415 | } |
| 1414 | 1416 | ||
| 1415 | out: | 1417 | out: |
| 1416 | mutex_unlock(&genpd->lock); | 1418 | mutex_unlock(&genpd->lock); |
| 1419 | mutex_unlock(&subdomain->lock); | ||
| 1417 | 1420 | ||
| 1418 | return ret; | 1421 | return ret; |
| 1419 | } | 1422 | } |
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 9bc37c437874..0ca74d070058 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c | |||
| @@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev, | |||
| 142 | 142 | ||
| 143 | try_again: | 143 | try_again: |
| 144 | cpu_reg = regulator_get_optional(cpu_dev, reg); | 144 | cpu_reg = regulator_get_optional(cpu_dev, reg); |
| 145 | if (IS_ERR(cpu_reg)) { | 145 | ret = PTR_ERR_OR_ZERO(cpu_reg); |
| 146 | if (ret) { | ||
| 146 | /* | 147 | /* |
| 147 | * If cpu's regulator supply node is present, but regulator is | 148 | * If cpu's regulator supply node is present, but regulator is |
| 148 | * not yet registered, we should try defering probe. | 149 | * not yet registered, we should try defering probe. |
| 149 | */ | 150 | */ |
| 150 | if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { | 151 | if (ret == -EPROBE_DEFER) { |
| 151 | dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", | 152 | dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n", |
| 152 | cpu); | 153 | cpu); |
| 153 | return -EPROBE_DEFER; | 154 | return ret; |
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | /* Try with "cpu-supply" */ | 157 | /* Try with "cpu-supply" */ |
| @@ -159,18 +160,16 @@ try_again: | |||
| 159 | goto try_again; | 160 | goto try_again; |
| 160 | } | 161 | } |
| 161 | 162 | ||
| 162 | dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n", | 163 | dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret); |
| 163 | cpu, PTR_ERR(cpu_reg)); | ||
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | cpu_clk = clk_get(cpu_dev, NULL); | 166 | cpu_clk = clk_get(cpu_dev, NULL); |
| 167 | if (IS_ERR(cpu_clk)) { | 167 | ret = PTR_ERR_OR_ZERO(cpu_clk); |
| 168 | if (ret) { | ||
| 168 | /* put regulator */ | 169 | /* put regulator */ |
| 169 | if (!IS_ERR(cpu_reg)) | 170 | if (!IS_ERR(cpu_reg)) |
| 170 | regulator_put(cpu_reg); | 171 | regulator_put(cpu_reg); |
| 171 | 172 | ||
| 172 | ret = PTR_ERR(cpu_clk); | ||
| 173 | |||
| 174 | /* | 173 | /* |
| 175 | * If cpu's clk node is present, but clock is not yet | 174 | * If cpu's clk node is present, but clock is not yet |
| 176 | * registered, we should try defering probe. | 175 | * registered, we should try defering probe. |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index c35e7da1ed7a..e979ec78b695 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy, | |||
| 48 | bool active) | 48 | bool active) |
| 49 | { | 49 | { |
| 50 | do { | 50 | do { |
| 51 | policy = list_next_entry(policy, policy_list); | ||
| 52 | |||
| 53 | /* No more policies in the list */ | 51 | /* No more policies in the list */ |
| 54 | if (&policy->policy_list == &cpufreq_policy_list) | 52 | if (list_is_last(&policy->policy_list, &cpufreq_policy_list)) |
| 55 | return NULL; | 53 | return NULL; |
| 54 | |||
| 55 | policy = list_next_entry(policy, policy_list); | ||
| 56 | } while (!suitable_policy(policy, active)); | 56 | } while (!suitable_policy(policy, active)); |
| 57 | 57 | ||
| 58 | return policy; | 58 | return policy; |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index bab3a514ec12..e0d111024d48 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
| @@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy, | |||
| 387 | if (!have_governor_per_policy()) | 387 | if (!have_governor_per_policy()) |
| 388 | cdata->gdbs_data = dbs_data; | 388 | cdata->gdbs_data = dbs_data; |
| 389 | 389 | ||
| 390 | policy->governor_data = dbs_data; | ||
| 391 | |||
| 390 | ret = sysfs_create_group(get_governor_parent_kobj(policy), | 392 | ret = sysfs_create_group(get_governor_parent_kobj(policy), |
| 391 | get_sysfs_attr(dbs_data)); | 393 | get_sysfs_attr(dbs_data)); |
| 392 | if (ret) | 394 | if (ret) |
| 393 | goto reset_gdbs_data; | 395 | goto reset_gdbs_data; |
| 394 | 396 | ||
| 395 | policy->governor_data = dbs_data; | ||
| 396 | |||
| 397 | return 0; | 397 | return 0; |
| 398 | 398 | ||
| 399 | reset_gdbs_data: | 399 | reset_gdbs_data: |
| 400 | policy->governor_data = NULL; | ||
| 401 | |||
| 400 | if (!have_governor_per_policy()) | 402 | if (!have_governor_per_policy()) |
| 401 | cdata->gdbs_data = NULL; | 403 | cdata->gdbs_data = NULL; |
| 402 | cdata->exit(dbs_data, !policy->governor->initialized); | 404 | cdata->exit(dbs_data, !policy->governor->initialized); |
| @@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy, | |||
| 417 | if (!cdbs->shared || cdbs->shared->policy) | 419 | if (!cdbs->shared || cdbs->shared->policy) |
| 418 | return -EBUSY; | 420 | return -EBUSY; |
| 419 | 421 | ||
| 420 | policy->governor_data = NULL; | ||
| 421 | if (!--dbs_data->usage_count) { | 422 | if (!--dbs_data->usage_count) { |
| 422 | sysfs_remove_group(get_governor_parent_kobj(policy), | 423 | sysfs_remove_group(get_governor_parent_kobj(policy), |
| 423 | get_sysfs_attr(dbs_data)); | 424 | get_sysfs_attr(dbs_data)); |
| 424 | 425 | ||
| 426 | policy->governor_data = NULL; | ||
| 427 | |||
| 425 | if (!have_governor_per_policy()) | 428 | if (!have_governor_per_policy()) |
| 426 | cdata->gdbs_data = NULL; | 429 | cdata->gdbs_data = NULL; |
| 427 | 430 | ||
| 428 | cdata->exit(dbs_data, policy->governor->initialized == 1); | 431 | cdata->exit(dbs_data, policy->governor->initialized == 1); |
| 429 | kfree(dbs_data); | 432 | kfree(dbs_data); |
| 433 | } else { | ||
| 434 | policy->governor_data = NULL; | ||
| 430 | } | 435 | } |
| 431 | 436 | ||
| 432 | free_common_dbs_info(policy, cdata); | 437 | free_common_dbs_info(policy, cdata); |
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c index 1d99c97defa9..096377232747 100644 --- a/drivers/cpufreq/pxa2xx-cpufreq.c +++ b/drivers/cpufreq/pxa2xx-cpufreq.c | |||
| @@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void) | |||
| 202 | } | 202 | } |
| 203 | } | 203 | } |
| 204 | #else | 204 | #else |
| 205 | static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq) | 205 | static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) |
| 206 | { | 206 | { |
| 207 | return 0; | 207 | return 0; |
| 208 | } | 208 | } |
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 344058f8501a..d5657d50ac40 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c | |||
| @@ -119,7 +119,6 @@ struct cpuidle_coupled { | |||
| 119 | 119 | ||
| 120 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) | 120 | #define CPUIDLE_COUPLED_NOT_IDLE (-1) |
| 121 | 121 | ||
| 122 | static DEFINE_MUTEX(cpuidle_coupled_lock); | ||
| 123 | static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); | 122 | static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); |
| 124 | 123 | ||
| 125 | /* | 124 | /* |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 046423b0c5ca..f996efc56605 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 153 | * be frozen safely. | 153 | * be frozen safely. |
| 154 | */ | 154 | */ |
| 155 | index = find_deepest_state(drv, dev, UINT_MAX, 0, true); | 155 | index = find_deepest_state(drv, dev, UINT_MAX, 0, true); |
| 156 | if (index >= 0) | 156 | if (index > 0) |
| 157 | enter_freeze_proper(drv, dev, index); | 157 | enter_freeze_proper(drv, dev, index); |
| 158 | 158 | ||
| 159 | return index; | 159 | return index; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 313b0cc8d676..82edf95b7740 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -2278,60 +2278,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
| 2278 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | 2278 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) |
| 2279 | 2279 | ||
| 2280 | #define amdgpu_dpm_get_temperature(adev) \ | 2280 | #define amdgpu_dpm_get_temperature(adev) \ |
| 2281 | (adev)->pp_enabled ? \ | 2281 | ((adev)->pp_enabled ? \ |
| 2282 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ | 2282 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ |
| 2283 | (adev)->pm.funcs->get_temperature((adev)) | 2283 | (adev)->pm.funcs->get_temperature((adev))) |
| 2284 | 2284 | ||
| 2285 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ | 2285 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ |
| 2286 | (adev)->pp_enabled ? \ | 2286 | ((adev)->pp_enabled ? \ |
| 2287 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ | 2287 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ |
| 2288 | (adev)->pm.funcs->set_fan_control_mode((adev), (m)) | 2288 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) |
| 2289 | 2289 | ||
| 2290 | #define amdgpu_dpm_get_fan_control_mode(adev) \ | 2290 | #define amdgpu_dpm_get_fan_control_mode(adev) \ |
| 2291 | (adev)->pp_enabled ? \ | 2291 | ((adev)->pp_enabled ? \ |
| 2292 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ | 2292 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ |
| 2293 | (adev)->pm.funcs->get_fan_control_mode((adev)) | 2293 | (adev)->pm.funcs->get_fan_control_mode((adev))) |
| 2294 | 2294 | ||
| 2295 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ | 2295 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ |
| 2296 | (adev)->pp_enabled ? \ | 2296 | ((adev)->pp_enabled ? \ |
| 2297 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | 2297 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ |
| 2298 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) | 2298 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) |
| 2299 | 2299 | ||
| 2300 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ | 2300 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ |
| 2301 | (adev)->pp_enabled ? \ | 2301 | ((adev)->pp_enabled ? \ |
| 2302 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ | 2302 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ |
| 2303 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) | 2303 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) |
| 2304 | 2304 | ||
| 2305 | #define amdgpu_dpm_get_sclk(adev, l) \ | 2305 | #define amdgpu_dpm_get_sclk(adev, l) \ |
| 2306 | (adev)->pp_enabled ? \ | 2306 | ((adev)->pp_enabled ? \ |
| 2307 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ | 2307 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ |
| 2308 | (adev)->pm.funcs->get_sclk((adev), (l)) | 2308 | (adev)->pm.funcs->get_sclk((adev), (l))) |
| 2309 | 2309 | ||
| 2310 | #define amdgpu_dpm_get_mclk(adev, l) \ | 2310 | #define amdgpu_dpm_get_mclk(adev, l) \ |
| 2311 | (adev)->pp_enabled ? \ | 2311 | ((adev)->pp_enabled ? \ |
| 2312 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ | 2312 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ |
| 2313 | (adev)->pm.funcs->get_mclk((adev), (l)) | 2313 | (adev)->pm.funcs->get_mclk((adev), (l))) |
| 2314 | 2314 | ||
| 2315 | 2315 | ||
| 2316 | #define amdgpu_dpm_force_performance_level(adev, l) \ | 2316 | #define amdgpu_dpm_force_performance_level(adev, l) \ |
| 2317 | (adev)->pp_enabled ? \ | 2317 | ((adev)->pp_enabled ? \ |
| 2318 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ | 2318 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ |
| 2319 | (adev)->pm.funcs->force_performance_level((adev), (l)) | 2319 | (adev)->pm.funcs->force_performance_level((adev), (l))) |
| 2320 | 2320 | ||
| 2321 | #define amdgpu_dpm_powergate_uvd(adev, g) \ | 2321 | #define amdgpu_dpm_powergate_uvd(adev, g) \ |
| 2322 | (adev)->pp_enabled ? \ | 2322 | ((adev)->pp_enabled ? \ |
| 2323 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ | 2323 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ |
| 2324 | (adev)->pm.funcs->powergate_uvd((adev), (g)) | 2324 | (adev)->pm.funcs->powergate_uvd((adev), (g))) |
| 2325 | 2325 | ||
| 2326 | #define amdgpu_dpm_powergate_vce(adev, g) \ | 2326 | #define amdgpu_dpm_powergate_vce(adev, g) \ |
| 2327 | (adev)->pp_enabled ? \ | 2327 | ((adev)->pp_enabled ? \ |
| 2328 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ | 2328 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ |
| 2329 | (adev)->pm.funcs->powergate_vce((adev), (g)) | 2329 | (adev)->pm.funcs->powergate_vce((adev), (g))) |
| 2330 | 2330 | ||
| 2331 | #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ | 2331 | #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ |
| 2332 | (adev)->pp_enabled ? \ | 2332 | ((adev)->pp_enabled ? \ |
| 2333 | (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ | 2333 | (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ |
| 2334 | (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) | 2334 | (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) |
| 2335 | 2335 | ||
| 2336 | #define amdgpu_dpm_get_current_power_state(adev) \ | 2336 | #define amdgpu_dpm_get_current_power_state(adev) \ |
| 2337 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) | 2337 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6f89f8e034d0..b882e8175615 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
| 478 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; | 478 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
| 479 | unsigned i; | 479 | unsigned i; |
| 480 | 480 | ||
| 481 | amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); | ||
| 482 | |||
| 483 | if (!error) { | 481 | if (!error) { |
| 482 | amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm); | ||
| 483 | |||
| 484 | /* Sort the buffer list from the smallest to largest buffer, | 484 | /* Sort the buffer list from the smallest to largest buffer, |
| 485 | * which affects the order of buffers in the LRU list. | 485 | * which affects the order of buffers in the LRU list. |
| 486 | * This assures that the smallest buffers are added first | 486 | * This assures that the smallest buffers are added first |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index cfb6caad2a73..919146780a15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
| @@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev) | |||
| 333 | if (!adev->mode_info.mode_config_initialized) | 333 | if (!adev->mode_info.mode_config_initialized) |
| 334 | return 0; | 334 | return 0; |
| 335 | 335 | ||
| 336 | /* don't init fbdev if there are no connectors */ | ||
| 337 | if (list_empty(&adev->ddev->mode_config.connector_list)) | ||
| 338 | return 0; | ||
| 339 | |||
| 336 | /* select 8 bpp console on low vram cards */ | 340 | /* select 8 bpp console on low vram cards */ |
| 337 | if (adev->mc.real_vram_size <= (32*1024*1024)) | 341 | if (adev->mc.real_vram_size <= (32*1024*1024)) |
| 338 | bpp_sel = 8; | 342 | bpp_sel = 8; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index c3ce103b6a33..a2a16acee34d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
| @@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, | |||
| 399 | } | 399 | } |
| 400 | if (fpfn > bo->placements[i].fpfn) | 400 | if (fpfn > bo->placements[i].fpfn) |
| 401 | bo->placements[i].fpfn = fpfn; | 401 | bo->placements[i].fpfn = fpfn; |
| 402 | if (lpfn && lpfn < bo->placements[i].lpfn) | 402 | if (!bo->placements[i].lpfn || |
| 403 | (lpfn && lpfn < bo->placements[i].lpfn)) | ||
| 403 | bo->placements[i].lpfn = lpfn; | 404 | bo->placements[i].lpfn = lpfn; |
| 404 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; | 405 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
| 405 | } | 406 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 5ee9a0690278..b9d0d55f6b47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
| @@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle) | |||
| 99 | 99 | ||
| 100 | #ifdef CONFIG_DRM_AMD_POWERPLAY | 100 | #ifdef CONFIG_DRM_AMD_POWERPLAY |
| 101 | switch (adev->asic_type) { | 101 | switch (adev->asic_type) { |
| 102 | case CHIP_TONGA: | 102 | case CHIP_TONGA: |
| 103 | case CHIP_FIJI: | 103 | case CHIP_FIJI: |
| 104 | adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; | 104 | adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true; |
| 105 | break; | 105 | break; |
| 106 | default: | 106 | case CHIP_CARRIZO: |
| 107 | adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; | 107 | case CHIP_STONEY: |
| 108 | break; | 108 | adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false; |
| 109 | break; | ||
| 110 | /* These chips don't have powerplay implemenations */ | ||
| 111 | case CHIP_BONAIRE: | ||
| 112 | case CHIP_HAWAII: | ||
| 113 | case CHIP_KABINI: | ||
| 114 | case CHIP_MULLINS: | ||
| 115 | case CHIP_KAVERI: | ||
| 116 | case CHIP_TOPAZ: | ||
| 117 | default: | ||
| 118 | adev->pp_enabled = false; | ||
| 119 | break; | ||
| 109 | } | 120 | } |
| 110 | #else | 121 | #else |
| 111 | adev->pp_enabled = false; | 122 | adev->pp_enabled = false; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 78e9b0f14661..d1f234dd2126 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) | |||
| 487 | seq_printf(m, "rptr: 0x%08x [%5d]\n", | 487 | seq_printf(m, "rptr: 0x%08x [%5d]\n", |
| 488 | rptr, rptr); | 488 | rptr, rptr); |
| 489 | 489 | ||
| 490 | rptr_next = ~0; | 490 | rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); |
| 491 | 491 | ||
| 492 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", | 492 | seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", |
| 493 | ring->wptr, ring->wptr); | 493 | ring->wptr, ring->wptr); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index aefc668e6b5d..9599f7559b3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1282 | { | 1282 | { |
| 1283 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, | 1283 | const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, |
| 1284 | AMDGPU_VM_PTE_COUNT * 8); | 1284 | AMDGPU_VM_PTE_COUNT * 8); |
| 1285 | unsigned pd_size, pd_entries, pts_size; | 1285 | unsigned pd_size, pd_entries; |
| 1286 | int i, r; | 1286 | int i, r; |
| 1287 | 1287 | ||
| 1288 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { | 1288 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
| @@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1300 | pd_entries = amdgpu_vm_num_pdes(adev); | 1300 | pd_entries = amdgpu_vm_num_pdes(adev); |
| 1301 | 1301 | ||
| 1302 | /* allocate page table array */ | 1302 | /* allocate page table array */ |
| 1303 | pts_size = pd_entries * sizeof(struct amdgpu_vm_pt); | 1303 | vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt)); |
| 1304 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | ||
| 1305 | if (vm->page_tables == NULL) { | 1304 | if (vm->page_tables == NULL) { |
| 1306 | DRM_ERROR("Cannot allocate memory for page table array\n"); | 1305 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
| 1307 | return -ENOMEM; | 1306 | return -ENOMEM; |
| @@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
| 1361 | 1360 | ||
| 1362 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) | 1361 | for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) |
| 1363 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); | 1362 | amdgpu_bo_unref(&vm->page_tables[i].entry.robj); |
| 1364 | kfree(vm->page_tables); | 1363 | drm_free_large(vm->page_tables); |
| 1365 | 1364 | ||
| 1366 | amdgpu_bo_unref(&vm->page_directory); | 1365 | amdgpu_bo_unref(&vm->page_directory); |
| 1367 | fence_put(vm->page_directory_fence); | 1366 | fence_put(vm->page_directory_fence); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 13235d84e5a6..95c0cdfbd1b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -4186,7 +4186,18 @@ static int gfx_v8_0_soft_reset(void *handle) | |||
| 4186 | gfx_v8_0_cp_gfx_enable(adev, false); | 4186 | gfx_v8_0_cp_gfx_enable(adev, false); |
| 4187 | 4187 | ||
| 4188 | /* Disable MEC parsing/prefetching */ | 4188 | /* Disable MEC parsing/prefetching */ |
| 4189 | /* XXX todo */ | 4189 | gfx_v8_0_cp_compute_enable(adev, false); |
| 4190 | |||
| 4191 | if (grbm_soft_reset || srbm_soft_reset) { | ||
| 4192 | tmp = RREG32(mmGMCON_DEBUG); | ||
| 4193 | tmp = REG_SET_FIELD(tmp, | ||
| 4194 | GMCON_DEBUG, GFX_STALL, 1); | ||
| 4195 | tmp = REG_SET_FIELD(tmp, | ||
| 4196 | GMCON_DEBUG, GFX_CLEAR, 1); | ||
| 4197 | WREG32(mmGMCON_DEBUG, tmp); | ||
| 4198 | |||
| 4199 | udelay(50); | ||
| 4200 | } | ||
| 4190 | 4201 | ||
| 4191 | if (grbm_soft_reset) { | 4202 | if (grbm_soft_reset) { |
| 4192 | tmp = RREG32(mmGRBM_SOFT_RESET); | 4203 | tmp = RREG32(mmGRBM_SOFT_RESET); |
| @@ -4215,6 +4226,16 @@ static int gfx_v8_0_soft_reset(void *handle) | |||
| 4215 | WREG32(mmSRBM_SOFT_RESET, tmp); | 4226 | WREG32(mmSRBM_SOFT_RESET, tmp); |
| 4216 | tmp = RREG32(mmSRBM_SOFT_RESET); | 4227 | tmp = RREG32(mmSRBM_SOFT_RESET); |
| 4217 | } | 4228 | } |
| 4229 | |||
| 4230 | if (grbm_soft_reset || srbm_soft_reset) { | ||
| 4231 | tmp = RREG32(mmGMCON_DEBUG); | ||
| 4232 | tmp = REG_SET_FIELD(tmp, | ||
| 4233 | GMCON_DEBUG, GFX_STALL, 0); | ||
| 4234 | tmp = REG_SET_FIELD(tmp, | ||
| 4235 | GMCON_DEBUG, GFX_CLEAR, 0); | ||
| 4236 | WREG32(mmGMCON_DEBUG, tmp); | ||
| 4237 | } | ||
| 4238 | |||
| 4218 | /* Wait a little for things to settle down */ | 4239 | /* Wait a little for things to settle down */ |
| 4219 | udelay(50); | 4240 | udelay(50); |
| 4220 | gfx_v8_0_print_status((void *)adev); | 4241 | gfx_v8_0_print_status((void *)adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index f4a1346525fe..0497784b3652 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
| @@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle) | |||
| 122 | 122 | ||
| 123 | static int tonga_dpm_suspend(void *handle) | 123 | static int tonga_dpm_suspend(void *handle) |
| 124 | { | 124 | { |
| 125 | return 0; | 125 | return tonga_dpm_hw_fini(handle); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | static int tonga_dpm_resume(void *handle) | 128 | static int tonga_dpm_resume(void *handle) |
| 129 | { | 129 | { |
| 130 | int ret; | 130 | return tonga_dpm_hw_init(handle); |
| 131 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 132 | |||
| 133 | mutex_lock(&adev->pm.mutex); | ||
| 134 | |||
| 135 | ret = tonga_smu_start(adev); | ||
| 136 | if (ret) { | ||
| 137 | DRM_ERROR("SMU start failed\n"); | ||
| 138 | goto fail; | ||
| 139 | } | ||
| 140 | |||
| 141 | fail: | ||
| 142 | mutex_unlock(&adev->pm.mutex); | ||
| 143 | return ret; | ||
| 144 | } | 131 | } |
| 145 | 132 | ||
| 146 | static int tonga_dpm_set_clockgating_state(void *handle, | 133 | static int tonga_dpm_set_clockgating_state(void *handle, |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8f5d5edcf193..aa67244a77ae 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -64,6 +64,11 @@ static int pp_sw_init(void *handle) | |||
| 64 | if (ret == 0) | 64 | if (ret == 0) |
| 65 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); | 65 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); |
| 66 | 66 | ||
| 67 | if (ret) | ||
| 68 | printk("amdgpu: powerplay initialization failed\n"); | ||
| 69 | else | ||
| 70 | printk("amdgpu: powerplay initialized\n"); | ||
| 71 | |||
| 67 | return ret; | 72 | return ret; |
| 68 | } | 73 | } |
| 69 | 74 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c index 873a8d264d5c..ec222c665602 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c | |||
| @@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr) | |||
| 272 | UCODE_ID_CP_MEC_JT1_MASK | | 272 | UCODE_ID_CP_MEC_JT1_MASK | |
| 273 | UCODE_ID_CP_MEC_JT2_MASK; | 273 | UCODE_ID_CP_MEC_JT2_MASK; |
| 274 | 274 | ||
| 275 | if (smumgr->chip_id == CHIP_STONEY) | ||
| 276 | fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); | ||
| 277 | |||
| 275 | cz_request_smu_load_fw(smumgr); | 278 | cz_request_smu_load_fw(smumgr); |
| 276 | cz_check_fw_load_finish(smumgr, fw_to_check); | 279 | cz_check_fw_load_finish(smumgr, fw_to_check); |
| 277 | 280 | ||
| @@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr) | |||
| 282 | return ret; | 285 | return ret; |
| 283 | } | 286 | } |
| 284 | 287 | ||
| 285 | static uint8_t cz_translate_firmware_enum_to_arg( | 288 | static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr, |
| 286 | enum cz_scratch_entry firmware_enum) | 289 | enum cz_scratch_entry firmware_enum) |
| 287 | { | 290 | { |
| 288 | uint8_t ret = 0; | 291 | uint8_t ret = 0; |
| @@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg( | |||
| 292 | ret = UCODE_ID_SDMA0; | 295 | ret = UCODE_ID_SDMA0; |
| 293 | break; | 296 | break; |
| 294 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: | 297 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: |
| 295 | ret = UCODE_ID_SDMA1; | 298 | if (smumgr->chip_id == CHIP_STONEY) |
| 299 | ret = UCODE_ID_SDMA0; | ||
| 300 | else | ||
| 301 | ret = UCODE_ID_SDMA1; | ||
| 296 | break; | 302 | break; |
| 297 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: | 303 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: |
| 298 | ret = UCODE_ID_CP_CE; | 304 | ret = UCODE_ID_CP_CE; |
| @@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg( | |||
| 307 | ret = UCODE_ID_CP_MEC_JT1; | 313 | ret = UCODE_ID_CP_MEC_JT1; |
| 308 | break; | 314 | break; |
| 309 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: | 315 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: |
| 310 | ret = UCODE_ID_CP_MEC_JT2; | 316 | if (smumgr->chip_id == CHIP_STONEY) |
| 317 | ret = UCODE_ID_CP_MEC_JT1; | ||
| 318 | else | ||
| 319 | ret = UCODE_ID_CP_MEC_JT2; | ||
| 311 | break; | 320 | break; |
| 312 | case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: | 321 | case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: |
| 313 | ret = UCODE_ID_GMCON_RENG; | 322 | ret = UCODE_ID_GMCON_RENG; |
| @@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task( | |||
| 396 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; | 405 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; |
| 397 | 406 | ||
| 398 | task->type = type; | 407 | task->type = type; |
| 399 | task->arg = cz_translate_firmware_enum_to_arg(fw_enum); | 408 | task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); |
| 400 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; | 409 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; |
| 401 | 410 | ||
| 402 | for (i = 0; i < cz_smu->scratch_buffer_length; i++) | 411 | for (i = 0; i < cz_smu->scratch_buffer_length; i++) |
| @@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task( | |||
| 433 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; | 442 | struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++]; |
| 434 | 443 | ||
| 435 | task->type = TASK_TYPE_UCODE_LOAD; | 444 | task->type = TASK_TYPE_UCODE_LOAD; |
| 436 | task->arg = cz_translate_firmware_enum_to_arg(fw_enum); | 445 | task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum); |
| 437 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; | 446 | task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count; |
| 438 | 447 | ||
| 439 | for (i = 0; i < cz_smu->driver_buffer_length; i++) | 448 | for (i = 0; i < cz_smu->driver_buffer_length; i++) |
| @@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr) | |||
| 509 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | 518 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); |
| 510 | cz_smu_populate_single_ucode_load_task(smumgr, | 519 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 511 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | 520 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); |
| 512 | cz_smu_populate_single_ucode_load_task(smumgr, | 521 | |
| 522 | if (smumgr->chip_id == CHIP_STONEY) | ||
| 523 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 524 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | ||
| 525 | else | ||
| 526 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 513 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | 527 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); |
| 528 | |||
| 514 | cz_smu_populate_single_ucode_load_task(smumgr, | 529 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 515 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); | 530 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); |
| 516 | 531 | ||
| @@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) | |||
| 551 | 566 | ||
| 552 | cz_smu_populate_single_ucode_load_task(smumgr, | 567 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 553 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); | 568 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); |
| 554 | cz_smu_populate_single_ucode_load_task(smumgr, | 569 | if (smumgr->chip_id == CHIP_STONEY) |
| 570 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 571 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); | ||
| 572 | else | ||
| 573 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 555 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); | 574 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); |
| 556 | cz_smu_populate_single_ucode_load_task(smumgr, | 575 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 557 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); | 576 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); |
| @@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr) | |||
| 561 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | 580 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); |
| 562 | cz_smu_populate_single_ucode_load_task(smumgr, | 581 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 563 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | 582 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); |
| 564 | cz_smu_populate_single_ucode_load_task(smumgr, | 583 | if (smumgr->chip_id == CHIP_STONEY) |
| 584 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 585 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | ||
| 586 | else | ||
| 587 | cz_smu_populate_single_ucode_load_task(smumgr, | ||
| 565 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | 588 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); |
| 566 | cz_smu_populate_single_ucode_load_task(smumgr, | 589 | cz_smu_populate_single_ucode_load_task(smumgr, |
| 567 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); | 590 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); |
| @@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr) | |||
| 618 | 641 | ||
| 619 | for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { | 642 | for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) { |
| 620 | 643 | ||
| 621 | firmware_type = cz_translate_firmware_enum_to_arg( | 644 | firmware_type = cz_translate_firmware_enum_to_arg(smumgr, |
| 622 | firmware_list[i]); | 645 | firmware_list[i]); |
| 623 | 646 | ||
| 624 | ucode_id = cz_convert_fw_type_to_cgs(firmware_type); | 647 | ucode_id = cz_convert_fw_type_to_cgs(firmware_type); |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 57cccd68ca52..7c523060a076 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -946,9 +946,23 @@ static void wait_for_fences(struct drm_device *dev, | |||
| 946 | } | 946 | } |
| 947 | } | 947 | } |
| 948 | 948 | ||
| 949 | static bool framebuffer_changed(struct drm_device *dev, | 949 | /** |
| 950 | struct drm_atomic_state *old_state, | 950 | * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed |
| 951 | struct drm_crtc *crtc) | 951 | * @dev: DRM device |
| 952 | * @old_state: atomic state object with old state structures | ||
| 953 | * @crtc: DRM crtc | ||
| 954 | * | ||
| 955 | * Checks whether the framebuffer used for this CRTC changes as a result of | ||
| 956 | * the atomic update. This is useful for drivers which cannot use | ||
| 957 | * drm_atomic_helper_wait_for_vblanks() and need to reimplement its | ||
| 958 | * functionality. | ||
| 959 | * | ||
| 960 | * Returns: | ||
| 961 | * true if the framebuffer changed. | ||
| 962 | */ | ||
| 963 | bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, | ||
| 964 | struct drm_atomic_state *old_state, | ||
| 965 | struct drm_crtc *crtc) | ||
| 952 | { | 966 | { |
| 953 | struct drm_plane *plane; | 967 | struct drm_plane *plane; |
| 954 | struct drm_plane_state *old_plane_state; | 968 | struct drm_plane_state *old_plane_state; |
| @@ -965,6 +979,7 @@ static bool framebuffer_changed(struct drm_device *dev, | |||
| 965 | 979 | ||
| 966 | return false; | 980 | return false; |
| 967 | } | 981 | } |
| 982 | EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed); | ||
| 968 | 983 | ||
| 969 | /** | 984 | /** |
| 970 | * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs | 985 | * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs |
| @@ -999,7 +1014,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, | |||
| 999 | if (old_state->legacy_cursor_update) | 1014 | if (old_state->legacy_cursor_update) |
| 1000 | continue; | 1015 | continue; |
| 1001 | 1016 | ||
| 1002 | if (!framebuffer_changed(dev, old_state, crtc)) | 1017 | if (!drm_atomic_helper_framebuffer_changed(dev, |
| 1018 | old_state, crtc)) | ||
| 1003 | continue; | 1019 | continue; |
| 1004 | 1020 | ||
| 1005 | ret = drm_crtc_vblank_get(crtc); | 1021 | ret = drm_crtc_vblank_get(crtc); |
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h index 9e585d51fb78..e881482b5971 100644 --- a/drivers/gpu/drm/etnaviv/common.xml.h +++ b/drivers/gpu/drm/etnaviv/common.xml.h | |||
| @@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng | |||
| 8 | git clone git://0x04.net/rules-ng-ng | 8 | git clone git://0x04.net/rules-ng-ng |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01) | 11 | - state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53) |
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | 12 | - common.xml ( 18379 bytes, from 2015-12-12 09:02:53) |
| 13 | 13 | ||
| 14 | Copyright (C) 2015 | 14 | Copyright (C) 2015 |
| 15 | */ | 15 | */ |
| @@ -30,15 +30,19 @@ Copyright (C) 2015 | |||
| 30 | #define ENDIAN_MODE_NO_SWAP 0x00000000 | 30 | #define ENDIAN_MODE_NO_SWAP 0x00000000 |
| 31 | #define ENDIAN_MODE_SWAP_16 0x00000001 | 31 | #define ENDIAN_MODE_SWAP_16 0x00000001 |
| 32 | #define ENDIAN_MODE_SWAP_32 0x00000002 | 32 | #define ENDIAN_MODE_SWAP_32 0x00000002 |
| 33 | #define chipModel_GC200 0x00000200 | ||
| 33 | #define chipModel_GC300 0x00000300 | 34 | #define chipModel_GC300 0x00000300 |
| 34 | #define chipModel_GC320 0x00000320 | 35 | #define chipModel_GC320 0x00000320 |
| 36 | #define chipModel_GC328 0x00000328 | ||
| 35 | #define chipModel_GC350 0x00000350 | 37 | #define chipModel_GC350 0x00000350 |
| 36 | #define chipModel_GC355 0x00000355 | 38 | #define chipModel_GC355 0x00000355 |
| 37 | #define chipModel_GC400 0x00000400 | 39 | #define chipModel_GC400 0x00000400 |
| 38 | #define chipModel_GC410 0x00000410 | 40 | #define chipModel_GC410 0x00000410 |
| 39 | #define chipModel_GC420 0x00000420 | 41 | #define chipModel_GC420 0x00000420 |
| 42 | #define chipModel_GC428 0x00000428 | ||
| 40 | #define chipModel_GC450 0x00000450 | 43 | #define chipModel_GC450 0x00000450 |
| 41 | #define chipModel_GC500 0x00000500 | 44 | #define chipModel_GC500 0x00000500 |
| 45 | #define chipModel_GC520 0x00000520 | ||
| 42 | #define chipModel_GC530 0x00000530 | 46 | #define chipModel_GC530 0x00000530 |
| 43 | #define chipModel_GC600 0x00000600 | 47 | #define chipModel_GC600 0x00000600 |
| 44 | #define chipModel_GC700 0x00000700 | 48 | #define chipModel_GC700 0x00000700 |
| @@ -46,9 +50,16 @@ Copyright (C) 2015 | |||
| 46 | #define chipModel_GC860 0x00000860 | 50 | #define chipModel_GC860 0x00000860 |
| 47 | #define chipModel_GC880 0x00000880 | 51 | #define chipModel_GC880 0x00000880 |
| 48 | #define chipModel_GC1000 0x00001000 | 52 | #define chipModel_GC1000 0x00001000 |
| 53 | #define chipModel_GC1500 0x00001500 | ||
| 49 | #define chipModel_GC2000 0x00002000 | 54 | #define chipModel_GC2000 0x00002000 |
| 50 | #define chipModel_GC2100 0x00002100 | 55 | #define chipModel_GC2100 0x00002100 |
| 56 | #define chipModel_GC2200 0x00002200 | ||
| 57 | #define chipModel_GC2500 0x00002500 | ||
| 58 | #define chipModel_GC3000 0x00003000 | ||
| 51 | #define chipModel_GC4000 0x00004000 | 59 | #define chipModel_GC4000 0x00004000 |
| 60 | #define chipModel_GC5000 0x00005000 | ||
| 61 | #define chipModel_GC5200 0x00005200 | ||
| 62 | #define chipModel_GC6400 0x00006400 | ||
| 52 | #define RGBA_BITS_R 0x00000001 | 63 | #define RGBA_BITS_R 0x00000001 |
| 53 | #define RGBA_BITS_G 0x00000002 | 64 | #define RGBA_BITS_G 0x00000002 |
| 54 | #define RGBA_BITS_B 0x00000004 | 65 | #define RGBA_BITS_B 0x00000004 |
| @@ -160,7 +171,7 @@ Copyright (C) 2015 | |||
| 160 | #define chipMinorFeatures2_UNK8 0x00000100 | 171 | #define chipMinorFeatures2_UNK8 0x00000100 |
| 161 | #define chipMinorFeatures2_UNK9 0x00000200 | 172 | #define chipMinorFeatures2_UNK9 0x00000200 |
| 162 | #define chipMinorFeatures2_UNK10 0x00000400 | 173 | #define chipMinorFeatures2_UNK10 0x00000400 |
| 163 | #define chipMinorFeatures2_SAMPLERBASE_16 0x00000800 | 174 | #define chipMinorFeatures2_HALTI1 0x00000800 |
| 164 | #define chipMinorFeatures2_UNK12 0x00001000 | 175 | #define chipMinorFeatures2_UNK12 0x00001000 |
| 165 | #define chipMinorFeatures2_UNK13 0x00002000 | 176 | #define chipMinorFeatures2_UNK13 0x00002000 |
| 166 | #define chipMinorFeatures2_UNK14 0x00004000 | 177 | #define chipMinorFeatures2_UNK14 0x00004000 |
| @@ -189,7 +200,7 @@ Copyright (C) 2015 | |||
| 189 | #define chipMinorFeatures3_UNK5 0x00000020 | 200 | #define chipMinorFeatures3_UNK5 0x00000020 |
| 190 | #define chipMinorFeatures3_UNK6 0x00000040 | 201 | #define chipMinorFeatures3_UNK6 0x00000040 |
| 191 | #define chipMinorFeatures3_UNK7 0x00000080 | 202 | #define chipMinorFeatures3_UNK7 0x00000080 |
| 192 | #define chipMinorFeatures3_UNK8 0x00000100 | 203 | #define chipMinorFeatures3_FAST_MSAA 0x00000100 |
| 193 | #define chipMinorFeatures3_UNK9 0x00000200 | 204 | #define chipMinorFeatures3_UNK9 0x00000200 |
| 194 | #define chipMinorFeatures3_BUG_FIXES10 0x00000400 | 205 | #define chipMinorFeatures3_BUG_FIXES10 0x00000400 |
| 195 | #define chipMinorFeatures3_UNK11 0x00000800 | 206 | #define chipMinorFeatures3_UNK11 0x00000800 |
| @@ -199,7 +210,7 @@ Copyright (C) 2015 | |||
| 199 | #define chipMinorFeatures3_UNK15 0x00008000 | 210 | #define chipMinorFeatures3_UNK15 0x00008000 |
| 200 | #define chipMinorFeatures3_UNK16 0x00010000 | 211 | #define chipMinorFeatures3_UNK16 0x00010000 |
| 201 | #define chipMinorFeatures3_UNK17 0x00020000 | 212 | #define chipMinorFeatures3_UNK17 0x00020000 |
| 202 | #define chipMinorFeatures3_UNK18 0x00040000 | 213 | #define chipMinorFeatures3_ACE 0x00040000 |
| 203 | #define chipMinorFeatures3_UNK19 0x00080000 | 214 | #define chipMinorFeatures3_UNK19 0x00080000 |
| 204 | #define chipMinorFeatures3_UNK20 0x00100000 | 215 | #define chipMinorFeatures3_UNK20 0x00100000 |
| 205 | #define chipMinorFeatures3_UNK21 0x00200000 | 216 | #define chipMinorFeatures3_UNK21 0x00200000 |
| @@ -207,7 +218,7 @@ Copyright (C) 2015 | |||
| 207 | #define chipMinorFeatures3_UNK23 0x00800000 | 218 | #define chipMinorFeatures3_UNK23 0x00800000 |
| 208 | #define chipMinorFeatures3_UNK24 0x01000000 | 219 | #define chipMinorFeatures3_UNK24 0x01000000 |
| 209 | #define chipMinorFeatures3_UNK25 0x02000000 | 220 | #define chipMinorFeatures3_UNK25 0x02000000 |
| 210 | #define chipMinorFeatures3_UNK26 0x04000000 | 221 | #define chipMinorFeatures3_NEW_HZ 0x04000000 |
| 211 | #define chipMinorFeatures3_UNK27 0x08000000 | 222 | #define chipMinorFeatures3_UNK27 0x08000000 |
| 212 | #define chipMinorFeatures3_UNK28 0x10000000 | 223 | #define chipMinorFeatures3_UNK28 0x10000000 |
| 213 | #define chipMinorFeatures3_UNK29 0x20000000 | 224 | #define chipMinorFeatures3_UNK29 0x20000000 |
| @@ -229,9 +240,9 @@ Copyright (C) 2015 | |||
| 229 | #define chipMinorFeatures4_UNK13 0x00002000 | 240 | #define chipMinorFeatures4_UNK13 0x00002000 |
| 230 | #define chipMinorFeatures4_UNK14 0x00004000 | 241 | #define chipMinorFeatures4_UNK14 0x00004000 |
| 231 | #define chipMinorFeatures4_UNK15 0x00008000 | 242 | #define chipMinorFeatures4_UNK15 0x00008000 |
| 232 | #define chipMinorFeatures4_UNK16 0x00010000 | 243 | #define chipMinorFeatures4_HALTI2 0x00010000 |
| 233 | #define chipMinorFeatures4_UNK17 0x00020000 | 244 | #define chipMinorFeatures4_UNK17 0x00020000 |
| 234 | #define chipMinorFeatures4_UNK18 0x00040000 | 245 | #define chipMinorFeatures4_SMALL_MSAA 0x00040000 |
| 235 | #define chipMinorFeatures4_UNK19 0x00080000 | 246 | #define chipMinorFeatures4_UNK19 0x00080000 |
| 236 | #define chipMinorFeatures4_UNK20 0x00100000 | 247 | #define chipMinorFeatures4_UNK20 0x00100000 |
| 237 | #define chipMinorFeatures4_UNK21 0x00200000 | 248 | #define chipMinorFeatures4_UNK21 0x00200000 |
| @@ -245,5 +256,37 @@ Copyright (C) 2015 | |||
| 245 | #define chipMinorFeatures4_UNK29 0x20000000 | 256 | #define chipMinorFeatures4_UNK29 0x20000000 |
| 246 | #define chipMinorFeatures4_UNK30 0x40000000 | 257 | #define chipMinorFeatures4_UNK30 0x40000000 |
| 247 | #define chipMinorFeatures4_UNK31 0x80000000 | 258 | #define chipMinorFeatures4_UNK31 0x80000000 |
| 259 | #define chipMinorFeatures5_UNK0 0x00000001 | ||
| 260 | #define chipMinorFeatures5_UNK1 0x00000002 | ||
| 261 | #define chipMinorFeatures5_UNK2 0x00000004 | ||
| 262 | #define chipMinorFeatures5_UNK3 0x00000008 | ||
| 263 | #define chipMinorFeatures5_UNK4 0x00000010 | ||
| 264 | #define chipMinorFeatures5_UNK5 0x00000020 | ||
| 265 | #define chipMinorFeatures5_UNK6 0x00000040 | ||
| 266 | #define chipMinorFeatures5_UNK7 0x00000080 | ||
| 267 | #define chipMinorFeatures5_UNK8 0x00000100 | ||
| 268 | #define chipMinorFeatures5_HALTI3 0x00000200 | ||
| 269 | #define chipMinorFeatures5_UNK10 0x00000400 | ||
| 270 | #define chipMinorFeatures5_UNK11 0x00000800 | ||
| 271 | #define chipMinorFeatures5_UNK12 0x00001000 | ||
| 272 | #define chipMinorFeatures5_UNK13 0x00002000 | ||
| 273 | #define chipMinorFeatures5_UNK14 0x00004000 | ||
| 274 | #define chipMinorFeatures5_UNK15 0x00008000 | ||
| 275 | #define chipMinorFeatures5_UNK16 0x00010000 | ||
| 276 | #define chipMinorFeatures5_UNK17 0x00020000 | ||
| 277 | #define chipMinorFeatures5_UNK18 0x00040000 | ||
| 278 | #define chipMinorFeatures5_UNK19 0x00080000 | ||
| 279 | #define chipMinorFeatures5_UNK20 0x00100000 | ||
| 280 | #define chipMinorFeatures5_UNK21 0x00200000 | ||
| 281 | #define chipMinorFeatures5_UNK22 0x00400000 | ||
| 282 | #define chipMinorFeatures5_UNK23 0x00800000 | ||
| 283 | #define chipMinorFeatures5_UNK24 0x01000000 | ||
| 284 | #define chipMinorFeatures5_UNK25 0x02000000 | ||
| 285 | #define chipMinorFeatures5_UNK26 0x04000000 | ||
| 286 | #define chipMinorFeatures5_UNK27 0x08000000 | ||
| 287 | #define chipMinorFeatures5_UNK28 0x10000000 | ||
| 288 | #define chipMinorFeatures5_UNK29 0x20000000 | ||
| 289 | #define chipMinorFeatures5_UNK30 0x40000000 | ||
| 290 | #define chipMinorFeatures5_UNK31 0x80000000 | ||
| 248 | 291 | ||
| 249 | #endif /* COMMON_XML */ | 292 | #endif /* COMMON_XML */ |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 5c89ebb52fd2..e8858985f01e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c | |||
| @@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = { | |||
| 668 | .probe = etnaviv_pdev_probe, | 668 | .probe = etnaviv_pdev_probe, |
| 669 | .remove = etnaviv_pdev_remove, | 669 | .remove = etnaviv_pdev_remove, |
| 670 | .driver = { | 670 | .driver = { |
| 671 | .owner = THIS_MODULE, | ||
| 672 | .name = "etnaviv", | 671 | .name = "etnaviv", |
| 673 | .of_match_table = dt_match, | 672 | .of_match_table = dt_match, |
| 674 | }, | 673 | }, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index d6bd438bd5be..1cd6046e76b1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h | |||
| @@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, | |||
| 85 | struct dma_buf_attachment *attach, struct sg_table *sg); | 85 | struct dma_buf_attachment *attach, struct sg_table *sg); |
| 86 | int etnaviv_gem_prime_pin(struct drm_gem_object *obj); | 86 | int etnaviv_gem_prime_pin(struct drm_gem_object *obj); |
| 87 | void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); | 87 | void etnaviv_gem_prime_unpin(struct drm_gem_object *obj); |
| 88 | void *etnaviv_gem_vaddr(struct drm_gem_object *obj); | 88 | void *etnaviv_gem_vmap(struct drm_gem_object *obj); |
| 89 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, | 89 | int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op, |
| 90 | struct timespec *timeout); | 90 | struct timespec *timeout); |
| 91 | int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); | 91 | int etnaviv_gem_cpu_fini(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index bf8fa859e8be..4a29eeadbf1e 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c | |||
| @@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) | |||
| 201 | 201 | ||
| 202 | obj = vram->object; | 202 | obj = vram->object; |
| 203 | 203 | ||
| 204 | mutex_lock(&obj->lock); | ||
| 204 | pages = etnaviv_gem_get_pages(obj); | 205 | pages = etnaviv_gem_get_pages(obj); |
| 206 | mutex_unlock(&obj->lock); | ||
| 205 | if (pages) { | 207 | if (pages) { |
| 206 | int j; | 208 | int j; |
| 207 | 209 | ||
| @@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) | |||
| 213 | 215 | ||
| 214 | iter.hdr->iova = cpu_to_le64(vram->iova); | 216 | iter.hdr->iova = cpu_to_le64(vram->iova); |
| 215 | 217 | ||
| 216 | vaddr = etnaviv_gem_vaddr(&obj->base); | 218 | vaddr = etnaviv_gem_vmap(&obj->base); |
| 217 | if (vaddr && !IS_ERR(vaddr)) | 219 | if (vaddr) |
| 218 | memcpy(iter.data, vaddr, obj->base.size); | 220 | memcpy(iter.data, vaddr, obj->base.size); |
| 219 | 221 | ||
| 220 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + | 222 | etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data + |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 9f77c3b94cc6..4b519e4309b2 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c | |||
| @@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj) | |||
| 353 | drm_gem_object_unreference_unlocked(obj); | 353 | drm_gem_object_unreference_unlocked(obj); |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | void *etnaviv_gem_vaddr(struct drm_gem_object *obj) | 356 | void *etnaviv_gem_vmap(struct drm_gem_object *obj) |
| 357 | { | 357 | { |
| 358 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); | 358 | struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); |
| 359 | 359 | ||
| 360 | mutex_lock(&etnaviv_obj->lock); | 360 | if (etnaviv_obj->vaddr) |
| 361 | if (!etnaviv_obj->vaddr) { | 361 | return etnaviv_obj->vaddr; |
| 362 | struct page **pages = etnaviv_gem_get_pages(etnaviv_obj); | ||
| 363 | |||
| 364 | if (IS_ERR(pages)) | ||
| 365 | return ERR_CAST(pages); | ||
| 366 | 362 | ||
| 367 | etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 363 | mutex_lock(&etnaviv_obj->lock); |
| 368 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 364 | /* |
| 369 | } | 365 | * Need to check again, as we might have raced with another thread |
| 366 | * while waiting for the mutex. | ||
| 367 | */ | ||
| 368 | if (!etnaviv_obj->vaddr) | ||
| 369 | etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); | ||
| 370 | mutex_unlock(&etnaviv_obj->lock); | 370 | mutex_unlock(&etnaviv_obj->lock); |
| 371 | 371 | ||
| 372 | return etnaviv_obj->vaddr; | 372 | return etnaviv_obj->vaddr; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) | ||
| 376 | { | ||
| 377 | struct page **pages; | ||
| 378 | |||
| 379 | lockdep_assert_held(&obj->lock); | ||
| 380 | |||
| 381 | pages = etnaviv_gem_get_pages(obj); | ||
| 382 | if (IS_ERR(pages)) | ||
| 383 | return NULL; | ||
| 384 | |||
| 385 | return vmap(pages, obj->base.size >> PAGE_SHIFT, | ||
| 386 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | ||
| 387 | } | ||
| 388 | |||
| 375 | static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) | 389 | static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) |
| 376 | { | 390 | { |
| 377 | if (op & ETNA_PREP_READ) | 391 | if (op & ETNA_PREP_READ) |
| @@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) | |||
| 522 | static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { | 536 | static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = { |
| 523 | .get_pages = etnaviv_gem_shmem_get_pages, | 537 | .get_pages = etnaviv_gem_shmem_get_pages, |
| 524 | .release = etnaviv_gem_shmem_release, | 538 | .release = etnaviv_gem_shmem_release, |
| 539 | .vmap = etnaviv_gem_vmap_impl, | ||
| 525 | }; | 540 | }; |
| 526 | 541 | ||
| 527 | void etnaviv_gem_free_object(struct drm_gem_object *obj) | 542 | void etnaviv_gem_free_object(struct drm_gem_object *obj) |
| @@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj) | |||
| 866 | static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { | 881 | static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = { |
| 867 | .get_pages = etnaviv_gem_userptr_get_pages, | 882 | .get_pages = etnaviv_gem_userptr_get_pages, |
| 868 | .release = etnaviv_gem_userptr_release, | 883 | .release = etnaviv_gem_userptr_release, |
| 884 | .vmap = etnaviv_gem_vmap_impl, | ||
| 869 | }; | 885 | }; |
| 870 | 886 | ||
| 871 | int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, | 887 | int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index a300b4b3d545..ab5df8147a5f 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h | |||
| @@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj) | |||
| 78 | struct etnaviv_gem_ops { | 78 | struct etnaviv_gem_ops { |
| 79 | int (*get_pages)(struct etnaviv_gem_object *); | 79 | int (*get_pages)(struct etnaviv_gem_object *); |
| 80 | void (*release)(struct etnaviv_gem_object *); | 80 | void (*release)(struct etnaviv_gem_object *); |
| 81 | void *(*vmap)(struct etnaviv_gem_object *); | ||
| 81 | }; | 82 | }; |
| 82 | 83 | ||
| 83 | static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) | 84 | static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index e94db4f95770..4e67395f5fa1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c | |||
| @@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj) | |||
| 31 | 31 | ||
| 32 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) | 32 | void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) |
| 33 | { | 33 | { |
| 34 | return etnaviv_gem_vaddr(obj); | 34 | return etnaviv_gem_vmap(obj); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) | 37 | void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) |
| @@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) | |||
| 77 | drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); | 77 | drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) | ||
| 81 | { | ||
| 82 | lockdep_assert_held(&etnaviv_obj->lock); | ||
| 83 | |||
| 84 | return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf); | ||
| 85 | } | ||
| 86 | |||
| 80 | static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { | 87 | static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = { |
| 81 | /* .get_pages should never be called */ | 88 | /* .get_pages should never be called */ |
| 82 | .release = etnaviv_gem_prime_release, | 89 | .release = etnaviv_gem_prime_release, |
| 90 | .vmap = etnaviv_gem_prime_vmap_impl, | ||
| 83 | }; | 91 | }; |
| 84 | 92 | ||
| 85 | struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, | 93 | struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 056a72e6ed26..a33162cf4f4c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
| @@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |||
| 72 | *value = gpu->identity.minor_features3; | 72 | *value = gpu->identity.minor_features3; |
| 73 | break; | 73 | break; |
| 74 | 74 | ||
| 75 | case ETNAVIV_PARAM_GPU_FEATURES_5: | ||
| 76 | *value = gpu->identity.minor_features4; | ||
| 77 | break; | ||
| 78 | |||
| 79 | case ETNAVIV_PARAM_GPU_FEATURES_6: | ||
| 80 | *value = gpu->identity.minor_features5; | ||
| 81 | break; | ||
| 82 | |||
| 75 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: | 83 | case ETNAVIV_PARAM_GPU_STREAM_COUNT: |
| 76 | *value = gpu->identity.stream_count; | 84 | *value = gpu->identity.stream_count; |
| 77 | break; | 85 | break; |
| @@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |||
| 112 | *value = gpu->identity.num_constants; | 120 | *value = gpu->identity.num_constants; |
| 113 | break; | 121 | break; |
| 114 | 122 | ||
| 123 | case ETNAVIV_PARAM_GPU_NUM_VARYINGS: | ||
| 124 | *value = gpu->identity.varyings_count; | ||
| 125 | break; | ||
| 126 | |||
| 115 | default: | 127 | default: |
| 116 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); | 128 | DBG("%s: invalid param: %u", dev_name(gpu->dev), param); |
| 117 | return -EINVAL; | 129 | return -EINVAL; |
| @@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) | |||
| 120 | return 0; | 132 | return 0; |
| 121 | } | 133 | } |
| 122 | 134 | ||
| 135 | |||
| 136 | #define etnaviv_is_model_rev(gpu, mod, rev) \ | ||
| 137 | ((gpu)->identity.model == chipModel_##mod && \ | ||
| 138 | (gpu)->identity.revision == rev) | ||
| 139 | #define etnaviv_field(val, field) \ | ||
| 140 | (((val) & field##__MASK) >> field##__SHIFT) | ||
| 141 | |||
| 123 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | 142 | static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) |
| 124 | { | 143 | { |
| 125 | if (gpu->identity.minor_features0 & | 144 | if (gpu->identity.minor_features0 & |
| 126 | chipMinorFeatures0_MORE_MINOR_FEATURES) { | 145 | chipMinorFeatures0_MORE_MINOR_FEATURES) { |
| 127 | u32 specs[2]; | 146 | u32 specs[4]; |
| 147 | unsigned int streams; | ||
| 128 | 148 | ||
| 129 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); | 149 | specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS); |
| 130 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); | 150 | specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2); |
| 131 | 151 | specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3); | |
| 132 | gpu->identity.stream_count = | 152 | specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4); |
| 133 | (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK) | 153 | |
| 134 | >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT; | 154 | gpu->identity.stream_count = etnaviv_field(specs[0], |
| 135 | gpu->identity.register_max = | 155 | VIVS_HI_CHIP_SPECS_STREAM_COUNT); |
| 136 | (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK) | 156 | gpu->identity.register_max = etnaviv_field(specs[0], |
| 137 | >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT; | 157 | VIVS_HI_CHIP_SPECS_REGISTER_MAX); |
| 138 | gpu->identity.thread_count = | 158 | gpu->identity.thread_count = etnaviv_field(specs[0], |
| 139 | (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK) | 159 | VIVS_HI_CHIP_SPECS_THREAD_COUNT); |
| 140 | >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT; | 160 | gpu->identity.vertex_cache_size = etnaviv_field(specs[0], |
| 141 | gpu->identity.vertex_cache_size = | 161 | VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE); |
| 142 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK) | 162 | gpu->identity.shader_core_count = etnaviv_field(specs[0], |
| 143 | >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT; | 163 | VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT); |
| 144 | gpu->identity.shader_core_count = | 164 | gpu->identity.pixel_pipes = etnaviv_field(specs[0], |
| 145 | (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK) | 165 | VIVS_HI_CHIP_SPECS_PIXEL_PIPES); |
| 146 | >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT; | ||
| 147 | gpu->identity.pixel_pipes = | ||
| 148 | (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK) | ||
| 149 | >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT; | ||
| 150 | gpu->identity.vertex_output_buffer_size = | 166 | gpu->identity.vertex_output_buffer_size = |
| 151 | (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK) | 167 | etnaviv_field(specs[0], |
| 152 | >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT; | 168 | VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE); |
| 153 | 169 | ||
| 154 | gpu->identity.buffer_size = | 170 | gpu->identity.buffer_size = etnaviv_field(specs[1], |
| 155 | (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK) | 171 | VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE); |
| 156 | >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT; | 172 | gpu->identity.instruction_count = etnaviv_field(specs[1], |
| 157 | gpu->identity.instruction_count = | 173 | VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT); |
| 158 | (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK) | 174 | gpu->identity.num_constants = etnaviv_field(specs[1], |
| 159 | >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT; | 175 | VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS); |
| 160 | gpu->identity.num_constants = | 176 | |
| 161 | (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK) | 177 | gpu->identity.varyings_count = etnaviv_field(specs[2], |
| 162 | >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT; | 178 | VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT); |
| 179 | |||
| 180 | /* This overrides the value from older register if non-zero */ | ||
| 181 | streams = etnaviv_field(specs[3], | ||
| 182 | VIVS_HI_CHIP_SPECS_4_STREAM_COUNT); | ||
| 183 | if (streams) | ||
| 184 | gpu->identity.stream_count = streams; | ||
| 163 | } | 185 | } |
| 164 | 186 | ||
| 165 | /* Fill in the stream count if not specified */ | 187 | /* Fill in the stream count if not specified */ |
| @@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |||
| 173 | /* Convert the register max value */ | 195 | /* Convert the register max value */ |
| 174 | if (gpu->identity.register_max) | 196 | if (gpu->identity.register_max) |
| 175 | gpu->identity.register_max = 1 << gpu->identity.register_max; | 197 | gpu->identity.register_max = 1 << gpu->identity.register_max; |
| 176 | else if (gpu->identity.model == 0x0400) | 198 | else if (gpu->identity.model == chipModel_GC400) |
| 177 | gpu->identity.register_max = 32; | 199 | gpu->identity.register_max = 32; |
| 178 | else | 200 | else |
| 179 | gpu->identity.register_max = 64; | 201 | gpu->identity.register_max = 64; |
| @@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |||
| 181 | /* Convert thread count */ | 203 | /* Convert thread count */ |
| 182 | if (gpu->identity.thread_count) | 204 | if (gpu->identity.thread_count) |
| 183 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; | 205 | gpu->identity.thread_count = 1 << gpu->identity.thread_count; |
| 184 | else if (gpu->identity.model == 0x0400) | 206 | else if (gpu->identity.model == chipModel_GC400) |
| 185 | gpu->identity.thread_count = 64; | 207 | gpu->identity.thread_count = 64; |
| 186 | else if (gpu->identity.model == 0x0500 || | 208 | else if (gpu->identity.model == chipModel_GC500 || |
| 187 | gpu->identity.model == 0x0530) | 209 | gpu->identity.model == chipModel_GC530) |
| 188 | gpu->identity.thread_count = 128; | 210 | gpu->identity.thread_count = 128; |
| 189 | else | 211 | else |
| 190 | gpu->identity.thread_count = 256; | 212 | gpu->identity.thread_count = 256; |
| @@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |||
| 206 | if (gpu->identity.vertex_output_buffer_size) { | 228 | if (gpu->identity.vertex_output_buffer_size) { |
| 207 | gpu->identity.vertex_output_buffer_size = | 229 | gpu->identity.vertex_output_buffer_size = |
| 208 | 1 << gpu->identity.vertex_output_buffer_size; | 230 | 1 << gpu->identity.vertex_output_buffer_size; |
| 209 | } else if (gpu->identity.model == 0x0400) { | 231 | } else if (gpu->identity.model == chipModel_GC400) { |
| 210 | if (gpu->identity.revision < 0x4000) | 232 | if (gpu->identity.revision < 0x4000) |
| 211 | gpu->identity.vertex_output_buffer_size = 512; | 233 | gpu->identity.vertex_output_buffer_size = 512; |
| 212 | else if (gpu->identity.revision < 0x4200) | 234 | else if (gpu->identity.revision < 0x4200) |
| @@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |||
| 219 | 241 | ||
| 220 | switch (gpu->identity.instruction_count) { | 242 | switch (gpu->identity.instruction_count) { |
| 221 | case 0: | 243 | case 0: |
| 222 | if ((gpu->identity.model == 0x2000 && | 244 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || |
| 223 | gpu->identity.revision == 0x5108) || | 245 | gpu->identity.model == chipModel_GC880) |
| 224 | gpu->identity.model == 0x880) | ||
| 225 | gpu->identity.instruction_count = 512; | 246 | gpu->identity.instruction_count = 512; |
| 226 | else | 247 | else |
| 227 | gpu->identity.instruction_count = 256; | 248 | gpu->identity.instruction_count = 256; |
| @@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) | |||
| 242 | 263 | ||
| 243 | if (gpu->identity.num_constants == 0) | 264 | if (gpu->identity.num_constants == 0) |
| 244 | gpu->identity.num_constants = 168; | 265 | gpu->identity.num_constants = 168; |
| 266 | |||
| 267 | if (gpu->identity.varyings_count == 0) { | ||
| 268 | if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0) | ||
| 269 | gpu->identity.varyings_count = 12; | ||
| 270 | else | ||
| 271 | gpu->identity.varyings_count = 8; | ||
| 272 | } | ||
| 273 | |||
| 274 | /* | ||
| 275 | * For some cores, two varyings are consumed for position, so the | ||
| 276 | * maximum varying count needs to be reduced by one. | ||
| 277 | */ | ||
| 278 | if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || | ||
| 279 | etnaviv_is_model_rev(gpu, GC4000, 0x5222) || | ||
| 280 | etnaviv_is_model_rev(gpu, GC4000, 0x5245) || | ||
| 281 | etnaviv_is_model_rev(gpu, GC4000, 0x5208) || | ||
| 282 | etnaviv_is_model_rev(gpu, GC3000, 0x5435) || | ||
| 283 | etnaviv_is_model_rev(gpu, GC2200, 0x5244) || | ||
| 284 | etnaviv_is_model_rev(gpu, GC2100, 0x5108) || | ||
| 285 | etnaviv_is_model_rev(gpu, GC2000, 0x5108) || | ||
| 286 | etnaviv_is_model_rev(gpu, GC1500, 0x5246) || | ||
| 287 | etnaviv_is_model_rev(gpu, GC880, 0x5107) || | ||
| 288 | etnaviv_is_model_rev(gpu, GC880, 0x5106)) | ||
| 289 | gpu->identity.varyings_count -= 1; | ||
| 245 | } | 290 | } |
| 246 | 291 | ||
| 247 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | 292 | static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) |
| @@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |||
| 251 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); | 296 | chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY); |
| 252 | 297 | ||
| 253 | /* Special case for older graphic cores. */ | 298 | /* Special case for older graphic cores. */ |
| 254 | if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK) | 299 | if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) { |
| 255 | >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) == 0x01) { | 300 | gpu->identity.model = chipModel_GC500; |
| 256 | gpu->identity.model = 0x500; /* gc500 */ | 301 | gpu->identity.revision = etnaviv_field(chipIdentity, |
| 257 | gpu->identity.revision = | 302 | VIVS_HI_CHIP_IDENTITY_REVISION); |
| 258 | (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK) | ||
| 259 | >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT; | ||
| 260 | } else { | 303 | } else { |
| 261 | 304 | ||
| 262 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); | 305 | gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL); |
| @@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |||
| 269 | * same. Only for GC400 family. | 312 | * same. Only for GC400 family. |
| 270 | */ | 313 | */ |
| 271 | if ((gpu->identity.model & 0xff00) == 0x0400 && | 314 | if ((gpu->identity.model & 0xff00) == 0x0400 && |
| 272 | gpu->identity.model != 0x0420) { | 315 | gpu->identity.model != chipModel_GC420) { |
| 273 | gpu->identity.model = gpu->identity.model & 0x0400; | 316 | gpu->identity.model = gpu->identity.model & 0x0400; |
| 274 | } | 317 | } |
| 275 | 318 | ||
| 276 | /* Another special case */ | 319 | /* Another special case */ |
| 277 | if (gpu->identity.model == 0x300 && | 320 | if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { |
| 278 | gpu->identity.revision == 0x2201) { | ||
| 279 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); | 321 | u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE); |
| 280 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); | 322 | u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); |
| 281 | 323 | ||
| @@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |||
| 295 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); | 337 | gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE); |
| 296 | 338 | ||
| 297 | /* Disable fast clear on GC700. */ | 339 | /* Disable fast clear on GC700. */ |
| 298 | if (gpu->identity.model == 0x700) | 340 | if (gpu->identity.model == chipModel_GC700) |
| 299 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; | 341 | gpu->identity.features &= ~chipFeatures_FAST_CLEAR; |
| 300 | 342 | ||
| 301 | if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) || | 343 | if ((gpu->identity.model == chipModel_GC500 && |
| 302 | (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) { | 344 | gpu->identity.revision < 2) || |
| 345 | (gpu->identity.model == chipModel_GC300 && | ||
| 346 | gpu->identity.revision < 0x2000)) { | ||
| 303 | 347 | ||
| 304 | /* | 348 | /* |
| 305 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these | 349 | * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these |
| @@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |||
| 309 | gpu->identity.minor_features1 = 0; | 353 | gpu->identity.minor_features1 = 0; |
| 310 | gpu->identity.minor_features2 = 0; | 354 | gpu->identity.minor_features2 = 0; |
| 311 | gpu->identity.minor_features3 = 0; | 355 | gpu->identity.minor_features3 = 0; |
| 356 | gpu->identity.minor_features4 = 0; | ||
| 357 | gpu->identity.minor_features5 = 0; | ||
| 312 | } else | 358 | } else |
| 313 | gpu->identity.minor_features0 = | 359 | gpu->identity.minor_features0 = |
| 314 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); | 360 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0); |
| @@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) | |||
| 321 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); | 367 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2); |
| 322 | gpu->identity.minor_features3 = | 368 | gpu->identity.minor_features3 = |
| 323 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); | 369 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3); |
| 370 | gpu->identity.minor_features4 = | ||
| 371 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4); | ||
| 372 | gpu->identity.minor_features5 = | ||
| 373 | gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); | ||
| 324 | } | 374 | } |
| 325 | 375 | ||
| 326 | /* GC600 idle register reports zero bits where modules aren't present */ | 376 | /* GC600 idle register reports zero bits where modules aren't present */ |
| @@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) | |||
| 441 | { | 491 | { |
| 442 | u16 prefetch; | 492 | u16 prefetch; |
| 443 | 493 | ||
| 444 | if (gpu->identity.model == chipModel_GC320 && | 494 | if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || |
| 445 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 && | 495 | etnaviv_is_model_rev(gpu, GC320, 0x5220)) && |
| 446 | (gpu->identity.revision == 0x5007 || | 496 | gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { |
| 447 | gpu->identity.revision == 0x5220)) { | ||
| 448 | u32 mc_memory_debug; | 497 | u32 mc_memory_debug; |
| 449 | 498 | ||
| 450 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; | 499 | mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff; |
| @@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) | |||
| 466 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); | 515 | VIVS_HI_AXI_CONFIG_ARCACHE(2)); |
| 467 | 516 | ||
| 468 | /* GC2000 rev 5108 needs a special bus config */ | 517 | /* GC2000 rev 5108 needs a special bus config */ |
| 469 | if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) { | 518 | if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { |
| 470 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); | 519 | u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); |
| 471 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | | 520 | bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | |
| 472 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); | 521 | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); |
| @@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
| 511 | 560 | ||
| 512 | if (gpu->identity.model == 0) { | 561 | if (gpu->identity.model == 0) { |
| 513 | dev_err(gpu->dev, "Unknown GPU model\n"); | 562 | dev_err(gpu->dev, "Unknown GPU model\n"); |
| 514 | pm_runtime_put_autosuspend(gpu->dev); | 563 | ret = -ENXIO; |
| 515 | return -ENXIO; | 564 | goto fail; |
| 565 | } | ||
| 566 | |||
| 567 | /* Exclude VG cores with FE2.0 */ | ||
| 568 | if (gpu->identity.features & chipFeatures_PIPE_VG && | ||
| 569 | gpu->identity.features & chipFeatures_FE20) { | ||
| 570 | dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n"); | ||
| 571 | ret = -ENXIO; | ||
| 572 | goto fail; | ||
| 516 | } | 573 | } |
| 517 | 574 | ||
| 518 | ret = etnaviv_hw_reset(gpu); | 575 | ret = etnaviv_hw_reset(gpu); |
| @@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
| 539 | goto fail; | 596 | goto fail; |
| 540 | } | 597 | } |
| 541 | 598 | ||
| 542 | /* TODO: we will leak here memory - fix it! */ | ||
| 543 | |||
| 544 | gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); | 599 | gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); |
| 545 | if (!gpu->mmu) { | 600 | if (!gpu->mmu) { |
| 601 | iommu_domain_free(iommu); | ||
| 546 | ret = -ENOMEM; | 602 | ret = -ENOMEM; |
| 547 | goto fail; | 603 | goto fail; |
| 548 | } | 604 | } |
| @@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
| 552 | if (!gpu->buffer) { | 608 | if (!gpu->buffer) { |
| 553 | ret = -ENOMEM; | 609 | ret = -ENOMEM; |
| 554 | dev_err(gpu->dev, "could not create command buffer\n"); | 610 | dev_err(gpu->dev, "could not create command buffer\n"); |
| 555 | goto fail; | 611 | goto destroy_iommu; |
| 556 | } | 612 | } |
| 557 | if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { | 613 | if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) { |
| 558 | ret = -EINVAL; | 614 | ret = -EINVAL; |
| @@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) | |||
| 582 | free_buffer: | 638 | free_buffer: |
| 583 | etnaviv_gpu_cmdbuf_free(gpu->buffer); | 639 | etnaviv_gpu_cmdbuf_free(gpu->buffer); |
| 584 | gpu->buffer = NULL; | 640 | gpu->buffer = NULL; |
| 641 | destroy_iommu: | ||
| 642 | etnaviv_iommu_destroy(gpu->mmu); | ||
| 643 | gpu->mmu = NULL; | ||
| 585 | fail: | 644 | fail: |
| 586 | pm_runtime_mark_last_busy(gpu->dev); | 645 | pm_runtime_mark_last_busy(gpu->dev); |
| 587 | pm_runtime_put_autosuspend(gpu->dev); | 646 | pm_runtime_put_autosuspend(gpu->dev); |
| @@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | |||
| 642 | gpu->identity.minor_features2); | 701 | gpu->identity.minor_features2); |
| 643 | seq_printf(m, "\t minor_features3: 0x%08x\n", | 702 | seq_printf(m, "\t minor_features3: 0x%08x\n", |
| 644 | gpu->identity.minor_features3); | 703 | gpu->identity.minor_features3); |
| 704 | seq_printf(m, "\t minor_features4: 0x%08x\n", | ||
| 705 | gpu->identity.minor_features4); | ||
| 706 | seq_printf(m, "\t minor_features5: 0x%08x\n", | ||
| 707 | gpu->identity.minor_features5); | ||
| 645 | 708 | ||
| 646 | seq_puts(m, "\tspecs\n"); | 709 | seq_puts(m, "\tspecs\n"); |
| 647 | seq_printf(m, "\t stream_count: %d\n", | 710 | seq_printf(m, "\t stream_count: %d\n", |
| @@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) | |||
| 664 | gpu->identity.instruction_count); | 727 | gpu->identity.instruction_count); |
| 665 | seq_printf(m, "\t num_constants: %d\n", | 728 | seq_printf(m, "\t num_constants: %d\n", |
| 666 | gpu->identity.num_constants); | 729 | gpu->identity.num_constants); |
| 730 | seq_printf(m, "\t varyings_count: %d\n", | ||
| 731 | gpu->identity.varyings_count); | ||
| 667 | 732 | ||
| 668 | seq_printf(m, "\taxi: 0x%08x\n", axi); | 733 | seq_printf(m, "\taxi: 0x%08x\n", axi); |
| 669 | seq_printf(m, "\tidle: 0x%08x\n", idle); | 734 | seq_printf(m, "\tidle: 0x%08x\n", idle); |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index c75d50359ab0..f233ac4c7c1c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h | |||
| @@ -46,6 +46,12 @@ struct etnaviv_chip_identity { | |||
| 46 | /* Supported minor feature 3 fields. */ | 46 | /* Supported minor feature 3 fields. */ |
| 47 | u32 minor_features3; | 47 | u32 minor_features3; |
| 48 | 48 | ||
| 49 | /* Supported minor feature 4 fields. */ | ||
| 50 | u32 minor_features4; | ||
| 51 | |||
| 52 | /* Supported minor feature 5 fields. */ | ||
| 53 | u32 minor_features5; | ||
| 54 | |||
| 49 | /* Number of streams supported. */ | 55 | /* Number of streams supported. */ |
| 50 | u32 stream_count; | 56 | u32 stream_count; |
| 51 | 57 | ||
| @@ -75,6 +81,9 @@ struct etnaviv_chip_identity { | |||
| 75 | 81 | ||
| 76 | /* Buffer size */ | 82 | /* Buffer size */ |
| 77 | u32 buffer_size; | 83 | u32 buffer_size; |
| 84 | |||
| 85 | /* Number of varyings */ | ||
| 86 | u8 varyings_count; | ||
| 78 | }; | 87 | }; |
| 79 | 88 | ||
| 80 | struct etnaviv_event { | 89 | struct etnaviv_event { |
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 0064f2640396..6a7de5f1454a 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h | |||
| @@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng | |||
| 8 | git clone git://0x04.net/rules-ng-ng | 8 | git clone git://0x04.net/rules-ng-ng |
| 9 | 9 | ||
| 10 | The rules-ng-ng source files this header was generated from are: | 10 | The rules-ng-ng source files this header was generated from are: |
| 11 | - state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21) | 11 | - state_hi.xml ( 24309 bytes, from 2015-12-12 09:02:53) |
| 12 | - common.xml ( 18437 bytes, from 2015-03-25 11:27:41) | 12 | - common.xml ( 18437 bytes, from 2015-12-12 09:02:53) |
| 13 | 13 | ||
| 14 | Copyright (C) 2015 | 14 | Copyright (C) 2015 |
| 15 | */ | 15 | */ |
| @@ -182,8 +182,25 @@ Copyright (C) 2015 | |||
| 182 | 182 | ||
| 183 | #define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 | 183 | #define VIVS_HI_CHIP_MINOR_FEATURE_3 0x00000088 |
| 184 | 184 | ||
| 185 | #define VIVS_HI_CHIP_SPECS_3 0x0000008c | ||
| 186 | #define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK 0x000001f0 | ||
| 187 | #define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT 4 | ||
| 188 | #define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK) | ||
| 189 | #define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK 0x00000007 | ||
| 190 | #define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0 | ||
| 191 | #define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK) | ||
| 192 | |||
| 185 | #define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 | 193 | #define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094 |
| 186 | 194 | ||
| 195 | #define VIVS_HI_CHIP_SPECS_4 0x0000009c | ||
| 196 | #define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK 0x0001f000 | ||
| 197 | #define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT 12 | ||
| 198 | #define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK) | ||
| 199 | |||
| 200 | #define VIVS_HI_CHIP_MINOR_FEATURE_5 0x000000a0 | ||
| 201 | |||
| 202 | #define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8 | ||
| 203 | |||
| 187 | #define VIVS_PM 0x00000000 | 204 | #define VIVS_PM 0x00000000 |
| 188 | 205 | ||
| 189 | #define VIVS_PM_POWER_CONTROLS 0x00000100 | 206 | #define VIVS_PM_POWER_CONTROLS 0x00000100 |
| @@ -206,6 +223,11 @@ Copyright (C) 2015 | |||
| 206 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 | 223 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 |
| 207 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 | 224 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE 0x00000002 |
| 208 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 | 225 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE 0x00000004 |
| 226 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH 0x00000008 | ||
| 227 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA 0x00000010 | ||
| 228 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE 0x00000020 | ||
| 229 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA 0x00000040 | ||
| 230 | #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080 | ||
| 209 | 231 | ||
| 210 | #define VIVS_PM_PULSE_EATER 0x0000010c | 232 | #define VIVS_PM_PULSE_EATER 0x0000010c |
| 211 | 233 | ||
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 6bfc46369db1..367a916f364e 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
| @@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, | |||
| 304 | unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) & | 304 | unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) & |
| 305 | DENTIST_DPREFCLK_WDIVIDER_MASK) >> | 305 | DENTIST_DPREFCLK_WDIVIDER_MASK) >> |
| 306 | DENTIST_DPREFCLK_WDIVIDER_SHIFT; | 306 | DENTIST_DPREFCLK_WDIVIDER_SHIFT; |
| 307 | 307 | div = radeon_audio_decode_dfs_div(div); | |
| 308 | if (div < 128 && div >= 96) | ||
| 309 | div -= 64; | ||
| 310 | else if (div >= 64) | ||
| 311 | div = div / 2 - 16; | ||
| 312 | else if (div >= 8) | ||
| 313 | div /= 4; | ||
| 314 | else | ||
| 315 | div = 0; | ||
| 316 | 308 | ||
| 317 | if (div) | 309 | if (div) |
| 318 | clock = rdev->clock.gpupll_outputfreq * 10 / div; | 310 | clock = clock * 100 / div; |
| 319 | 311 | ||
| 320 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); | 312 | WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000); |
| 321 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); | 313 | WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index 9953356fe263..3cf04a2f44bb 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
| @@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev, | |||
| 289 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 289 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
| 290 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 290 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
| 291 | */ | 291 | */ |
| 292 | if (ASIC_IS_DCE41(rdev)) { | ||
| 293 | unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) & | ||
| 294 | DENTIST_DPREFCLK_WDIVIDER_MASK) >> | ||
| 295 | DENTIST_DPREFCLK_WDIVIDER_SHIFT; | ||
| 296 | div = radeon_audio_decode_dfs_div(div); | ||
| 297 | |||
| 298 | if (div) | ||
| 299 | clock = 100 * clock / div; | ||
| 300 | } | ||
| 301 | |||
| 292 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); | 302 | WREG32(DCCG_AUDIO_DTO1_PHASE, 24000); |
| 293 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 303 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
| 294 | } | 304 | } |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 4aa5f755572b..13b6029d65cc 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
| @@ -511,6 +511,11 @@ | |||
| 511 | #define DCCG_AUDIO_DTO1_CNTL 0x05cc | 511 | #define DCCG_AUDIO_DTO1_CNTL 0x05cc |
| 512 | # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) | 512 | # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3) |
| 513 | 513 | ||
| 514 | #define DCE41_DENTIST_DISPCLK_CNTL 0x049c | ||
| 515 | # define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24) | ||
| 516 | # define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24) | ||
| 517 | # define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24 | ||
| 518 | |||
| 514 | /* DCE 4.0 AFMT */ | 519 | /* DCE 4.0 AFMT */ |
| 515 | #define HDMI_CONTROL 0x7030 | 520 | #define HDMI_CONTROL 0x7030 |
| 516 | # define HDMI_KEEPOUT_MODE (1 << 0) | 521 | # define HDMI_KEEPOUT_MODE (1 << 0) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5ae6db98aa4d..78a51b3eda10 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -268,7 +268,7 @@ struct radeon_clock { | |||
| 268 | uint32_t current_dispclk; | 268 | uint32_t current_dispclk; |
| 269 | uint32_t dp_extclk; | 269 | uint32_t dp_extclk; |
| 270 | uint32_t max_pixel_clock; | 270 | uint32_t max_pixel_clock; |
| 271 | uint32_t gpupll_outputfreq; | 271 | uint32_t vco_freq; |
| 272 | }; | 272 | }; |
| 273 | 273 | ||
| 274 | /* | 274 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 08fc1b5effa8..de9a2ffcf5f7 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -1106,6 +1106,31 @@ union firmware_info { | |||
| 1106 | ATOM_FIRMWARE_INFO_V2_2 info_22; | 1106 | ATOM_FIRMWARE_INFO_V2_2 info_22; |
| 1107 | }; | 1107 | }; |
| 1108 | 1108 | ||
| 1109 | union igp_info { | ||
| 1110 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
| 1111 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; | ||
| 1112 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; | ||
| 1113 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; | ||
| 1114 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; | ||
| 1115 | }; | ||
| 1116 | |||
| 1117 | static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev) | ||
| 1118 | { | ||
| 1119 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
| 1120 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
| 1121 | union igp_info *igp_info; | ||
| 1122 | u8 frev, crev; | ||
| 1123 | u16 data_offset; | ||
| 1124 | |||
| 1125 | if (atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
| 1126 | &frev, &crev, &data_offset)) { | ||
| 1127 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
| 1128 | data_offset); | ||
| 1129 | rdev->clock.vco_freq = | ||
| 1130 | le32_to_cpu(igp_info->info_6.ulDentistVCOFreq); | ||
| 1131 | } | ||
| 1132 | } | ||
| 1133 | |||
| 1109 | bool radeon_atom_get_clock_info(struct drm_device *dev) | 1134 | bool radeon_atom_get_clock_info(struct drm_device *dev) |
| 1110 | { | 1135 | { |
| 1111 | struct radeon_device *rdev = dev->dev_private; | 1136 | struct radeon_device *rdev = dev->dev_private; |
| @@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
| 1257 | rdev->mode_info.firmware_flags = | 1282 | rdev->mode_info.firmware_flags = |
| 1258 | le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); | 1283 | le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess); |
| 1259 | 1284 | ||
| 1260 | if (ASIC_IS_DCE8(rdev)) { | 1285 | if (ASIC_IS_DCE8(rdev)) |
| 1261 | rdev->clock.gpupll_outputfreq = | 1286 | rdev->clock.vco_freq = |
| 1262 | le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq); | 1287 | le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq); |
| 1263 | if (rdev->clock.gpupll_outputfreq == 0) | 1288 | else if (ASIC_IS_DCE5(rdev)) |
| 1264 | rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */ | 1289 | rdev->clock.vco_freq = rdev->clock.current_dispclk; |
| 1265 | } | 1290 | else if (ASIC_IS_DCE41(rdev)) |
| 1291 | radeon_atombios_get_dentist_vco_freq(rdev); | ||
| 1292 | else | ||
| 1293 | rdev->clock.vco_freq = rdev->clock.current_dispclk; | ||
| 1294 | |||
| 1295 | if (rdev->clock.vco_freq == 0) | ||
| 1296 | rdev->clock.vco_freq = 360000; /* 3.6 GHz */ | ||
| 1266 | 1297 | ||
| 1267 | return true; | 1298 | return true; |
| 1268 | } | 1299 | } |
| @@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) | |||
| 1270 | return false; | 1301 | return false; |
| 1271 | } | 1302 | } |
| 1272 | 1303 | ||
| 1273 | union igp_info { | ||
| 1274 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
| 1275 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; | ||
| 1276 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; | ||
| 1277 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; | ||
| 1278 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; | ||
| 1279 | }; | ||
| 1280 | |||
| 1281 | bool radeon_atombios_sideport_present(struct radeon_device *rdev) | 1304 | bool radeon_atombios_sideport_present(struct radeon_device *rdev) |
| 1282 | { | 1305 | { |
| 1283 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1306 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 2c02e99b5f95..b214663b370d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
| @@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
| 739 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 739 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
| 740 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 740 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
| 741 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 741 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
| 742 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
| 743 | struct radeon_connector_atom_dig *dig_connector = | ||
| 744 | radeon_connector->con_priv; | ||
| 745 | 742 | ||
| 746 | if (!dig || !dig->afmt) | 743 | if (!dig || !dig->afmt) |
| 747 | return; | 744 | return; |
| @@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
| 753 | radeon_audio_write_speaker_allocation(encoder); | 750 | radeon_audio_write_speaker_allocation(encoder); |
| 754 | radeon_audio_write_sad_regs(encoder); | 751 | radeon_audio_write_sad_regs(encoder); |
| 755 | radeon_audio_write_latency_fields(encoder, mode); | 752 | radeon_audio_write_latency_fields(encoder, mode); |
| 756 | if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) | 753 | radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10); |
| 757 | radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); | ||
| 758 | else | ||
| 759 | radeon_audio_set_dto(encoder, dig_connector->dp_clock); | ||
| 760 | radeon_audio_set_audio_packet(encoder); | 754 | radeon_audio_set_audio_packet(encoder); |
| 761 | radeon_audio_select_pin(encoder); | 755 | radeon_audio_select_pin(encoder); |
| 762 | 756 | ||
| @@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode) | |||
| 781 | if (radeon_encoder->audio && radeon_encoder->audio->dpms) | 775 | if (radeon_encoder->audio && radeon_encoder->audio->dpms) |
| 782 | radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON); | 776 | radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON); |
| 783 | } | 777 | } |
| 778 | |||
| 779 | unsigned int radeon_audio_decode_dfs_div(unsigned int div) | ||
| 780 | { | ||
| 781 | if (div >= 8 && div < 64) | ||
| 782 | return (div - 8) * 25 + 200; | ||
| 783 | else if (div >= 64 && div < 96) | ||
| 784 | return (div - 64) * 50 + 1600; | ||
| 785 | else if (div >= 96 && div < 128) | ||
| 786 | return (div - 96) * 100 + 3200; | ||
| 787 | else | ||
| 788 | return 0; | ||
| 789 | } | ||
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h index 059cc3012062..5c70cceaa4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.h +++ b/drivers/gpu/drm/radeon/radeon_audio.h | |||
| @@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev); | |||
| 79 | void radeon_audio_mode_set(struct drm_encoder *encoder, | 79 | void radeon_audio_mode_set(struct drm_encoder *encoder, |
| 80 | struct drm_display_mode *mode); | 80 | struct drm_display_mode *mode); |
| 81 | void radeon_audio_dpms(struct drm_encoder *encoder, int mode); | 81 | void radeon_audio_dpms(struct drm_encoder *encoder, int mode); |
| 82 | unsigned int radeon_audio_decode_dfs_div(unsigned int div); | ||
| 82 | 83 | ||
| 83 | #endif | 84 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index b3bb92368ae0..298ea1c453c3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -1670,8 +1670,10 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
| 1670 | /* setup afmt */ | 1670 | /* setup afmt */ |
| 1671 | radeon_afmt_init(rdev); | 1671 | radeon_afmt_init(rdev); |
| 1672 | 1672 | ||
| 1673 | radeon_fbdev_init(rdev); | 1673 | if (!list_empty(&rdev->ddev->mode_config.connector_list)) { |
| 1674 | drm_kms_helper_poll_init(rdev->ddev); | 1674 | radeon_fbdev_init(rdev); |
| 1675 | drm_kms_helper_poll_init(rdev->ddev); | ||
| 1676 | } | ||
| 1675 | 1677 | ||
| 1676 | /* do pm late init */ | 1678 | /* do pm late init */ |
| 1677 | ret = radeon_pm_late_init(rdev); | 1679 | ret = radeon_pm_late_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3dcc5733ff69..e26c963f2e93 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
| @@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, | |||
| 663 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); | 663 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
| 664 | if (!bo_va) { | 664 | if (!bo_va) { |
| 665 | args->operation = RADEON_VA_RESULT_ERROR; | 665 | args->operation = RADEON_VA_RESULT_ERROR; |
| 666 | radeon_bo_unreserve(rbo); | ||
| 666 | drm_gem_object_unreference_unlocked(gobj); | 667 | drm_gem_object_unreference_unlocked(gobj); |
| 667 | return -ENOENT; | 668 | return -ENOENT; |
| 668 | } | 669 | } |
diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c index 07a0d378e122..a01efe39a820 100644 --- a/drivers/gpu/drm/radeon/vce_v1_0.c +++ b/drivers/gpu/drm/radeon/vce_v1_0.c | |||
| @@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) | |||
| 178 | return -EINVAL; | 178 | return -EINVAL; |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | for (i = 0; i < sign->num; ++i) { | 181 | for (i = 0; i < le32_to_cpu(sign->num); ++i) { |
| 182 | if (sign->val[i].chip_id == chip_id) | 182 | if (le32_to_cpu(sign->val[i].chip_id) == chip_id) |
| 183 | break; | 183 | break; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | if (i == sign->num) | 186 | if (i == le32_to_cpu(sign->num)) |
| 187 | return -EINVAL; | 187 | return -EINVAL; |
| 188 | 188 | ||
| 189 | data += (256 - 64) / 4; | 189 | data += (256 - 64) / 4; |
| @@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data) | |||
| 191 | data[1] = sign->val[i].nonce[1]; | 191 | data[1] = sign->val[i].nonce[1]; |
| 192 | data[2] = sign->val[i].nonce[2]; | 192 | data[2] = sign->val[i].nonce[2]; |
| 193 | data[3] = sign->val[i].nonce[3]; | 193 | data[3] = sign->val[i].nonce[3]; |
| 194 | data[4] = sign->len + 64; | 194 | data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64); |
| 195 | 195 | ||
| 196 | memset(&data[5], 0, 44); | 196 | memset(&data[5], 0, 44); |
| 197 | memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); | 197 | memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign)); |
| 198 | 198 | ||
| 199 | data += data[4] / 4; | 199 | data += le32_to_cpu(data[4]) / 4; |
| 200 | data[0] = sign->val[i].sigval[0]; | 200 | data[0] = sign->val[i].sigval[0]; |
| 201 | data[1] = sign->val[i].sigval[1]; | 201 | data[1] = sign->val[i].sigval[1]; |
| 202 | data[2] = sign->val[i].sigval[2]; | 202 | data[2] = sign->val[i].sigval[2]; |
| 203 | data[3] = sign->val[i].sigval[3]; | 203 | data[3] = sign->val[i].sigval[3]; |
| 204 | 204 | ||
| 205 | rdev->vce.keyselect = sign->val[i].keyselect; | 205 | rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect); |
| 206 | 206 | ||
| 207 | return 0; | 207 | return 0; |
| 208 | } | 208 | } |
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index d1dc0f7b01db..f6a809afceec 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile | |||
| @@ -2,11 +2,11 @@ | |||
| 2 | # Makefile for the drm device driver. This driver provides support for the | 2 | # Makefile for the drm device driver. This driver provides support for the |
| 3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | 3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. |
| 4 | 4 | ||
| 5 | rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \ | 5 | rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ |
| 6 | rockchip_drm_gem.o | 6 | rockchip_drm_gem.o rockchip_drm_vop.o |
| 7 | rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o | ||
| 7 | 8 | ||
| 8 | obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o | 9 | obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o |
| 9 | obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o | 10 | obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o |
| 10 | 11 | ||
| 11 | obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \ | 12 | obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o |
| 12 | rockchip_vop_reg.o | ||
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c index 7bfe243c6173..f8f8f29fb7c3 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c | |||
| @@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi) | |||
| 461 | 461 | ||
| 462 | static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi) | 462 | static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi) |
| 463 | { | 463 | { |
| 464 | unsigned int bpp, i, pre; | 464 | unsigned int i, pre; |
| 465 | unsigned long mpclk, pllref, tmp; | 465 | unsigned long mpclk, pllref, tmp; |
| 466 | unsigned int m = 1, n = 1, target_mbps = 1000; | 466 | unsigned int m = 1, n = 1, target_mbps = 1000; |
| 467 | unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps; | 467 | unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps; |
| 468 | int bpp; | ||
| 468 | 469 | ||
| 469 | bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); | 470 | bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); |
| 470 | if (bpp < 0) { | 471 | if (bpp < 0) { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 8397d1b62ef9..a0d51ccb6ea4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c | |||
| @@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev, | |||
| 55 | 55 | ||
| 56 | return arm_iommu_attach_device(dev, mapping); | 56 | return arm_iommu_attach_device(dev, mapping); |
| 57 | } | 57 | } |
| 58 | EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device); | ||
| 59 | 58 | ||
| 60 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, | 59 | void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, |
| 61 | struct device *dev) | 60 | struct device *dev) |
| 62 | { | 61 | { |
| 63 | arm_iommu_detach_device(dev); | 62 | arm_iommu_detach_device(dev); |
| 64 | } | 63 | } |
| 65 | EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device); | ||
| 66 | 64 | ||
| 67 | int rockchip_register_crtc_funcs(struct drm_crtc *crtc, | 65 | int rockchip_register_crtc_funcs(struct drm_crtc *crtc, |
| 68 | const struct rockchip_crtc_funcs *crtc_funcs) | 66 | const struct rockchip_crtc_funcs *crtc_funcs) |
| @@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc, | |||
| 77 | 75 | ||
| 78 | return 0; | 76 | return 0; |
| 79 | } | 77 | } |
| 80 | EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs); | ||
| 81 | 78 | ||
| 82 | void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) | 79 | void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) |
| 83 | { | 80 | { |
| @@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc) | |||
| 89 | 86 | ||
| 90 | priv->crtc_funcs[pipe] = NULL; | 87 | priv->crtc_funcs[pipe] = NULL; |
| 91 | } | 88 | } |
| 92 | EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs); | ||
| 93 | 89 | ||
| 94 | static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, | 90 | static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm, |
| 95 | int pipe) | 91 | int pipe) |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index f7844883cb76..3b8f652698f8 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c | |||
| @@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb, | |||
| 39 | 39 | ||
| 40 | return rk_fb->obj[plane]; | 40 | return rk_fb->obj[plane]; |
| 41 | } | 41 | } |
| 42 | EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj); | ||
| 43 | 42 | ||
| 44 | static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) | 43 | static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) |
| 45 | { | 44 | { |
| @@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc) | |||
| 177 | crtc_funcs->wait_for_update(crtc); | 176 | crtc_funcs->wait_for_update(crtc); |
| 178 | } | 177 | } |
| 179 | 178 | ||
| 179 | /* | ||
| 180 | * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066 | ||
| 181 | * have hardware counters for neither vblanks nor scanlines, which results in | ||
| 182 | * a race where: | ||
| 183 | * | <-- HW vsync irq and reg take effect | ||
| 184 | * plane_commit --> | | ||
| 185 | * get_vblank and wait --> | | ||
| 186 | * | <-- handle_vblank, vblank->count + 1 | ||
| 187 | * cleanup_fb --> | | ||
| 188 | * iommu crash --> | | ||
| 189 | * | <-- HW vsync irq and reg take effect | ||
| 190 | * | ||
| 191 | * This function is equivalent but uses rockchip_crtc_wait_for_update() instead | ||
| 192 | * of waiting for vblank_count to change. | ||
| 193 | */ | ||
| 180 | static void | 194 | static void |
| 181 | rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state) | 195 | rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state) |
| 182 | { | 196 | { |
| 183 | struct drm_crtc_state *old_crtc_state; | 197 | struct drm_crtc_state *old_crtc_state; |
| 184 | struct drm_crtc *crtc; | 198 | struct drm_crtc *crtc; |
| @@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state) | |||
| 194 | if (!crtc->state->active) | 208 | if (!crtc->state->active) |
| 195 | continue; | 209 | continue; |
| 196 | 210 | ||
| 211 | if (!drm_atomic_helper_framebuffer_changed(dev, | ||
| 212 | old_state, crtc)) | ||
| 213 | continue; | ||
| 214 | |||
| 197 | ret = drm_crtc_vblank_get(crtc); | 215 | ret = drm_crtc_vblank_get(crtc); |
| 198 | if (ret != 0) | 216 | if (ret != 0) |
| 199 | continue; | 217 | continue; |
| @@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) | |||
| 241 | 259 | ||
| 242 | drm_atomic_helper_commit_planes(dev, state, true); | 260 | drm_atomic_helper_commit_planes(dev, state, true); |
| 243 | 261 | ||
| 244 | rockchip_atomic_wait_for_complete(state); | 262 | rockchip_atomic_wait_for_complete(dev, state); |
| 245 | 263 | ||
| 246 | drm_atomic_helper_cleanup_planes(dev, state); | 264 | drm_atomic_helper_cleanup_planes(dev, state); |
| 247 | 265 | ||
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h index 50432e9b5b37..73718c5f5bbf 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h | |||
| @@ -15,7 +15,18 @@ | |||
| 15 | #ifndef _ROCKCHIP_DRM_FBDEV_H | 15 | #ifndef _ROCKCHIP_DRM_FBDEV_H |
| 16 | #define _ROCKCHIP_DRM_FBDEV_H | 16 | #define _ROCKCHIP_DRM_FBDEV_H |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_DRM_FBDEV_EMULATION | ||
| 18 | int rockchip_drm_fbdev_init(struct drm_device *dev); | 19 | int rockchip_drm_fbdev_init(struct drm_device *dev); |
| 19 | void rockchip_drm_fbdev_fini(struct drm_device *dev); | 20 | void rockchip_drm_fbdev_fini(struct drm_device *dev); |
| 21 | #else | ||
| 22 | static inline int rockchip_drm_fbdev_init(struct drm_device *dev) | ||
| 23 | { | ||
| 24 | return 0; | ||
| 25 | } | ||
| 26 | |||
| 27 | static inline void rockchip_drm_fbdev_fini(struct drm_device *dev) | ||
| 28 | { | ||
| 29 | } | ||
| 30 | #endif | ||
| 20 | 31 | ||
| 21 | #endif /* _ROCKCHIP_DRM_FBDEV_H */ | 32 | #endif /* _ROCKCHIP_DRM_FBDEV_H */ |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index d908321b94ce..18e07338c6e5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
| @@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv, | |||
| 234 | /* | 234 | /* |
| 235 | * align to 64 bytes since Mali requires it. | 235 | * align to 64 bytes since Mali requires it. |
| 236 | */ | 236 | */ |
| 237 | min_pitch = ALIGN(min_pitch, 64); | 237 | args->pitch = ALIGN(min_pitch, 64); |
| 238 | 238 | args->size = args->pitch * args->height; | |
| 239 | if (args->pitch < min_pitch) | ||
| 240 | args->pitch = min_pitch; | ||
| 241 | |||
| 242 | if (args->size < args->pitch * args->height) | ||
| 243 | args->size = args->pitch * args->height; | ||
| 244 | 239 | ||
| 245 | rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, | 240 | rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size, |
| 246 | &args->handle); | 241 | &args->handle); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 46c2a8dfd8aa..fd370548d7d7 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
| @@ -43,8 +43,8 @@ | |||
| 43 | 43 | ||
| 44 | #define REG_SET(x, base, reg, v, mode) \ | 44 | #define REG_SET(x, base, reg, v, mode) \ |
| 45 | __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) | 45 | __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) |
| 46 | #define REG_SET_MASK(x, base, reg, v, mode) \ | 46 | #define REG_SET_MASK(x, base, reg, mask, v, mode) \ |
| 47 | __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v) | 47 | __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v) |
| 48 | 48 | ||
| 49 | #define VOP_WIN_SET(x, win, name, v) \ | 49 | #define VOP_WIN_SET(x, win, name, v) \ |
| 50 | REG_SET(x, win->base, win->phy->name, v, RELAXED) | 50 | REG_SET(x, win->base, win->phy->name, v, RELAXED) |
| @@ -58,16 +58,18 @@ | |||
| 58 | #define VOP_INTR_GET(vop, name) \ | 58 | #define VOP_INTR_GET(vop, name) \ |
| 59 | vop_read_reg(vop, 0, &vop->data->ctrl->name) | 59 | vop_read_reg(vop, 0, &vop->data->ctrl->name) |
| 60 | 60 | ||
| 61 | #define VOP_INTR_SET(vop, name, v) \ | 61 | #define VOP_INTR_SET(vop, name, mask, v) \ |
| 62 | REG_SET(vop, 0, vop->data->intr->name, v, NORMAL) | 62 | REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL) |
| 63 | #define VOP_INTR_SET_TYPE(vop, name, type, v) \ | 63 | #define VOP_INTR_SET_TYPE(vop, name, type, v) \ |
| 64 | do { \ | 64 | do { \ |
| 65 | int i, reg = 0; \ | 65 | int i, reg = 0, mask = 0; \ |
| 66 | for (i = 0; i < vop->data->intr->nintrs; i++) { \ | 66 | for (i = 0; i < vop->data->intr->nintrs; i++) { \ |
| 67 | if (vop->data->intr->intrs[i] & type) \ | 67 | if (vop->data->intr->intrs[i] & type) { \ |
| 68 | reg |= (v) << i; \ | 68 | reg |= (v) << i; \ |
| 69 | mask |= 1 << i; \ | ||
| 70 | } \ | ||
| 69 | } \ | 71 | } \ |
| 70 | VOP_INTR_SET(vop, name, reg); \ | 72 | VOP_INTR_SET(vop, name, mask, reg); \ |
| 71 | } while (0) | 73 | } while (0) |
| 72 | #define VOP_INTR_GET_TYPE(vop, name, type) \ | 74 | #define VOP_INTR_GET_TYPE(vop, name, type) \ |
| 73 | vop_get_intr_type(vop, &vop->data->intr->name, type) | 75 | vop_get_intr_type(vop, &vop->data->intr->name, type) |
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 424d515ffcda..314ff71db978 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c | |||
| @@ -144,19 +144,16 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) | |||
| 144 | } | 144 | } |
| 145 | #endif /* CONFIG_DEBUG_FS */ | 145 | #endif /* CONFIG_DEBUG_FS */ |
| 146 | 146 | ||
| 147 | /* | ||
| 148 | * Asks the firmware to turn on power to the V3D engine. | ||
| 149 | * | ||
| 150 | * This may be doable with just the clocks interface, though this | ||
| 151 | * packet does some other register setup from the firmware, too. | ||
| 152 | */ | ||
| 153 | int | 147 | int |
| 154 | vc4_v3d_set_power(struct vc4_dev *vc4, bool on) | 148 | vc4_v3d_set_power(struct vc4_dev *vc4, bool on) |
| 155 | { | 149 | { |
| 156 | if (on) | 150 | /* XXX: This interface is needed for GPU reset, and the way to |
| 157 | return pm_generic_poweroff(&vc4->v3d->pdev->dev); | 151 | * do it is to turn our power domain off and back on. We |
| 158 | else | 152 | * can't just reset from within the driver, because the reset |
| 159 | return pm_generic_resume(&vc4->v3d->pdev->dev); | 153 | * bits are in the power domain's register area, and get set |
| 154 | * during the poweron process. | ||
| 155 | */ | ||
| 156 | return 0; | ||
| 160 | } | 157 | } |
| 161 | 158 | ||
| 162 | static void vc4_v3d_init_hw(struct drm_device *dev) | 159 | static void vc4_v3d_init_hw(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c49812b80dd0..24fb348a44e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | * | 25 | * |
| 26 | **************************************************************************/ | 26 | **************************************************************************/ |
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/console.h> | ||
| 28 | 29 | ||
| 29 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
| 30 | #include "vmwgfx_drv.h" | 31 | #include "vmwgfx_drv.h" |
| @@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1538 | static int __init vmwgfx_init(void) | 1539 | static int __init vmwgfx_init(void) |
| 1539 | { | 1540 | { |
| 1540 | int ret; | 1541 | int ret; |
| 1542 | |||
| 1543 | #ifdef CONFIG_VGA_CONSOLE | ||
| 1544 | if (vgacon_text_force()) | ||
| 1545 | return -EINVAL; | ||
| 1546 | #endif | ||
| 1547 | |||
| 1541 | ret = drm_pci_init(&driver, &vmw_pci_driver); | 1548 | ret = drm_pci_init(&driver, &vmw_pci_driver); |
| 1542 | if (ret) | 1549 | if (ret) |
| 1543 | DRM_ERROR("Failed initializing DRM.\n"); | 1550 | DRM_ERROR("Failed initializing DRM.\n"); |
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index c8487894b312..c43318d3416e 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c | |||
| @@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); | |||
| 932 | static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { | 932 | static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { |
| 933 | { | 933 | { |
| 934 | /* | 934 | /* |
| 935 | * CPU fan speed going up and down on Dell Studio XPS 8000 | ||
| 936 | * for unknown reasons. | ||
| 937 | */ | ||
| 938 | .ident = "Dell Studio XPS 8000", | ||
| 939 | .matches = { | ||
| 940 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 941 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"), | ||
| 942 | }, | ||
| 943 | }, | ||
| 944 | { | ||
| 945 | /* | ||
| 935 | * CPU fan speed going up and down on Dell Studio XPS 8100 | 946 | * CPU fan speed going up and down on Dell Studio XPS 8100 |
| 936 | * for unknown reasons. | 947 | * for unknown reasons. |
| 937 | */ | 948 | */ |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index f77eb971ce95..4f695d8fcafa 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -90,7 +90,15 @@ static ssize_t show_power(struct device *dev, | |||
| 90 | pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), | 90 | pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), |
| 91 | REG_TDP_LIMIT3, &val); | 91 | REG_TDP_LIMIT3, &val); |
| 92 | 92 | ||
| 93 | tdp_limit = val >> 16; | 93 | /* |
| 94 | * On Carrizo and later platforms, ApmTdpLimit bit field | ||
| 95 | * is extended to 16:31 from 16:28. | ||
| 96 | */ | ||
| 97 | if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60) | ||
| 98 | tdp_limit = val >> 16; | ||
| 99 | else | ||
| 100 | tdp_limit = (val >> 16) & 0x1fff; | ||
| 101 | |||
| 94 | curr_pwr_watts = ((u64)(tdp_limit + | 102 | curr_pwr_watts = ((u64)(tdp_limit + |
| 95 | data->base_tdp)) << running_avg_range; | 103 | data->base_tdp)) << running_avg_range; |
| 96 | curr_pwr_watts -= running_avg_capture; | 104 | curr_pwr_watts -= running_avg_capture; |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 539b0dea8034..e5e223938eec 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -2049,7 +2049,7 @@ static void do_attach(struct iommu_dev_data *dev_data, | |||
| 2049 | /* Update device table */ | 2049 | /* Update device table */ |
| 2050 | set_dte_entry(dev_data->devid, domain, ats); | 2050 | set_dte_entry(dev_data->devid, domain, ats); |
| 2051 | if (alias != dev_data->devid) | 2051 | if (alias != dev_data->devid) |
| 2052 | set_dte_entry(dev_data->devid, domain, ats); | 2052 | set_dte_entry(alias, domain, ats); |
| 2053 | 2053 | ||
| 2054 | device_flush_dte(dev_data); | 2054 | device_flush_dte(dev_data); |
| 2055 | } | 2055 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ac7387686ddc..986a53e3eb96 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) | |||
| 1489 | { | 1489 | { |
| 1490 | struct pci_dev *pdev; | 1490 | struct pci_dev *pdev; |
| 1491 | 1491 | ||
| 1492 | if (dev_is_pci(info->dev)) | 1492 | if (!dev_is_pci(info->dev)) |
| 1493 | return; | 1493 | return; |
| 1494 | 1494 | ||
| 1495 | pdev = to_pci_dev(info->dev); | 1495 | pdev = to_pci_dev(info->dev); |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 8bbcbfe7695c..381ca5a37a7b 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/sizes.h> | 25 | #include <linux/sizes.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/dma-mapping.h> | ||
| 28 | 29 | ||
| 29 | #include <asm/barrier.h> | 30 | #include <asm/barrier.h> |
| 30 | 31 | ||
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 5f2fda12e006..fa49f9143b80 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c | |||
| @@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot) | |||
| 953 | { | 953 | { |
| 954 | pci_lock_rescan_remove(); | 954 | pci_lock_rescan_remove(); |
| 955 | 955 | ||
| 956 | if (slot->flags & SLOT_IS_GOING_AWAY) | 956 | if (slot->flags & SLOT_IS_GOING_AWAY) { |
| 957 | pci_unlock_rescan_remove(); | ||
| 957 | return -ENODEV; | 958 | return -ENODEV; |
| 959 | } | ||
| 958 | 960 | ||
| 959 | /* configure all functions */ | 961 | /* configure all functions */ |
| 960 | if (!(slot->flags & SLOT_ENABLED)) | 962 | if (!(slot->flags & SLOT_ENABLED)) |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index c692dfebd0ba..50597f9522fe 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
| @@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp, | |||
| 139 | 139 | ||
| 140 | device = container_of(kobj, struct device, kobj); | 140 | device = container_of(kobj, struct device, kobj); |
| 141 | chp = to_channelpath(device); | 141 | chp = to_channelpath(device); |
| 142 | if (!chp->cmg_chars) | 142 | if (chp->cmg == -1) |
| 143 | return 0; | 143 | return 0; |
| 144 | 144 | ||
| 145 | return memory_read_from_buffer(buf, count, &off, | 145 | return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars, |
| 146 | chp->cmg_chars, sizeof(struct cmg_chars)); | 146 | sizeof(chp->cmg_chars)); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static struct bin_attribute chp_measurement_chars_attr = { | 149 | static struct bin_attribute chp_measurement_chars_attr = { |
| @@ -416,7 +416,8 @@ static void chp_release(struct device *dev) | |||
| 416 | * chp_update_desc - update channel-path description | 416 | * chp_update_desc - update channel-path description |
| 417 | * @chp - channel-path | 417 | * @chp - channel-path |
| 418 | * | 418 | * |
| 419 | * Update the channel-path description of the specified channel-path. | 419 | * Update the channel-path description of the specified channel-path |
| 420 | * including channel measurement related information. | ||
| 420 | * Return zero on success, non-zero otherwise. | 421 | * Return zero on success, non-zero otherwise. |
| 421 | */ | 422 | */ |
| 422 | int chp_update_desc(struct channel_path *chp) | 423 | int chp_update_desc(struct channel_path *chp) |
| @@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp) | |||
| 428 | return rc; | 429 | return rc; |
| 429 | 430 | ||
| 430 | rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); | 431 | rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); |
| 432 | if (rc) | ||
| 433 | return rc; | ||
| 431 | 434 | ||
| 432 | return rc; | 435 | return chsc_get_channel_measurement_chars(chp); |
| 433 | } | 436 | } |
| 434 | 437 | ||
| 435 | /** | 438 | /** |
| @@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid) | |||
| 466 | ret = -ENODEV; | 469 | ret = -ENODEV; |
| 467 | goto out_free; | 470 | goto out_free; |
| 468 | } | 471 | } |
| 469 | /* Get channel-measurement characteristics. */ | ||
| 470 | if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) { | ||
| 471 | ret = chsc_get_channel_measurement_chars(chp); | ||
| 472 | if (ret) | ||
| 473 | goto out_free; | ||
| 474 | } else { | ||
| 475 | chp->cmg = -1; | ||
| 476 | } | ||
| 477 | dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); | 472 | dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id); |
| 478 | 473 | ||
| 479 | /* make it known to the system */ | 474 | /* make it known to the system */ |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index 4efd5b867cc3..af0232290dc4 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
| @@ -48,7 +48,7 @@ struct channel_path { | |||
| 48 | /* Channel-measurement related stuff: */ | 48 | /* Channel-measurement related stuff: */ |
| 49 | int cmg; | 49 | int cmg; |
| 50 | int shared; | 50 | int shared; |
| 51 | void *cmg_chars; | 51 | struct cmg_chars cmg_chars; |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| 54 | /* Return channel_path struct for given chpid. */ | 54 | /* Return channel_path struct for given chpid. */ |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index a831d18596a5..c424c0c7367e 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
| 15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| 17 | #include <linux/mutex.h> | ||
| 17 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
| 18 | 19 | ||
| 19 | #include <asm/cio.h> | 20 | #include <asm/cio.h> |
| @@ -224,8 +225,9 @@ out_unreg: | |||
| 224 | 225 | ||
| 225 | void chsc_chp_offline(struct chp_id chpid) | 226 | void chsc_chp_offline(struct chp_id chpid) |
| 226 | { | 227 | { |
| 227 | char dbf_txt[15]; | 228 | struct channel_path *chp = chpid_to_chp(chpid); |
| 228 | struct chp_link link; | 229 | struct chp_link link; |
| 230 | char dbf_txt[15]; | ||
| 229 | 231 | ||
| 230 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); | 232 | sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id); |
| 231 | CIO_TRACE_EVENT(2, dbf_txt); | 233 | CIO_TRACE_EVENT(2, dbf_txt); |
| @@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid) | |||
| 236 | link.chpid = chpid; | 238 | link.chpid = chpid; |
| 237 | /* Wait until previous actions have settled. */ | 239 | /* Wait until previous actions have settled. */ |
| 238 | css_wait_for_slow_path(); | 240 | css_wait_for_slow_path(); |
| 241 | |||
| 242 | mutex_lock(&chp->lock); | ||
| 243 | chp_update_desc(chp); | ||
| 244 | mutex_unlock(&chp->lock); | ||
| 245 | |||
| 239 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); | 246 | for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link); |
| 240 | } | 247 | } |
| 241 | 248 | ||
| @@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow) | |||
| 690 | 697 | ||
| 691 | void chsc_chp_online(struct chp_id chpid) | 698 | void chsc_chp_online(struct chp_id chpid) |
| 692 | { | 699 | { |
| 693 | char dbf_txt[15]; | 700 | struct channel_path *chp = chpid_to_chp(chpid); |
| 694 | struct chp_link link; | 701 | struct chp_link link; |
| 702 | char dbf_txt[15]; | ||
| 695 | 703 | ||
| 696 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); | 704 | sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id); |
| 697 | CIO_TRACE_EVENT(2, dbf_txt); | 705 | CIO_TRACE_EVENT(2, dbf_txt); |
| @@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid) | |||
| 701 | link.chpid = chpid; | 709 | link.chpid = chpid; |
| 702 | /* Wait until previous actions have settled. */ | 710 | /* Wait until previous actions have settled. */ |
| 703 | css_wait_for_slow_path(); | 711 | css_wait_for_slow_path(); |
| 712 | |||
| 713 | mutex_lock(&chp->lock); | ||
| 714 | chp_update_desc(chp); | ||
| 715 | mutex_unlock(&chp->lock); | ||
| 716 | |||
| 704 | for_each_subchannel_staged(__s390_process_res_acc, NULL, | 717 | for_each_subchannel_staged(__s390_process_res_acc, NULL, |
| 705 | &link); | 718 | &link); |
| 706 | css_schedule_reprobe(); | 719 | css_schedule_reprobe(); |
| @@ -967,22 +980,19 @@ static void | |||
| 967 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 980 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
| 968 | struct cmg_chars *chars) | 981 | struct cmg_chars *chars) |
| 969 | { | 982 | { |
| 970 | struct cmg_chars *cmg_chars; | ||
| 971 | int i, mask; | 983 | int i, mask; |
| 972 | 984 | ||
| 973 | cmg_chars = chp->cmg_chars; | ||
| 974 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { | 985 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { |
| 975 | mask = 0x80 >> (i + 3); | 986 | mask = 0x80 >> (i + 3); |
| 976 | if (cmcv & mask) | 987 | if (cmcv & mask) |
| 977 | cmg_chars->values[i] = chars->values[i]; | 988 | chp->cmg_chars.values[i] = chars->values[i]; |
| 978 | else | 989 | else |
| 979 | cmg_chars->values[i] = 0; | 990 | chp->cmg_chars.values[i] = 0; |
| 980 | } | 991 | } |
| 981 | } | 992 | } |
| 982 | 993 | ||
| 983 | int chsc_get_channel_measurement_chars(struct channel_path *chp) | 994 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
| 984 | { | 995 | { |
| 985 | struct cmg_chars *cmg_chars; | ||
| 986 | int ccode, ret; | 996 | int ccode, ret; |
| 987 | 997 | ||
| 988 | struct { | 998 | struct { |
| @@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
| 1006 | u32 data[NR_MEASUREMENT_CHARS]; | 1016 | u32 data[NR_MEASUREMENT_CHARS]; |
| 1007 | } __attribute__ ((packed)) *scmc_area; | 1017 | } __attribute__ ((packed)) *scmc_area; |
| 1008 | 1018 | ||
| 1009 | chp->cmg_chars = NULL; | 1019 | chp->shared = -1; |
| 1010 | cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); | 1020 | chp->cmg = -1; |
| 1011 | if (!cmg_chars) | 1021 | |
| 1012 | return -ENOMEM; | 1022 | if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm) |
| 1023 | return 0; | ||
| 1013 | 1024 | ||
| 1014 | spin_lock_irq(&chsc_page_lock); | 1025 | spin_lock_irq(&chsc_page_lock); |
| 1015 | memset(chsc_page, 0, PAGE_SIZE); | 1026 | memset(chsc_page, 0, PAGE_SIZE); |
| @@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
| 1031 | scmc_area->response.code); | 1042 | scmc_area->response.code); |
| 1032 | goto out; | 1043 | goto out; |
| 1033 | } | 1044 | } |
| 1034 | if (scmc_area->not_valid) { | 1045 | if (scmc_area->not_valid) |
| 1035 | chp->cmg = -1; | ||
| 1036 | chp->shared = -1; | ||
| 1037 | goto out; | 1046 | goto out; |
| 1038 | } | 1047 | |
| 1039 | chp->cmg = scmc_area->cmg; | 1048 | chp->cmg = scmc_area->cmg; |
| 1040 | chp->shared = scmc_area->shared; | 1049 | chp->shared = scmc_area->shared; |
| 1041 | if (chp->cmg != 2 && chp->cmg != 3) { | 1050 | if (chp->cmg != 2 && chp->cmg != 3) { |
| 1042 | /* No cmg-dependent data. */ | 1051 | /* No cmg-dependent data. */ |
| 1043 | goto out; | 1052 | goto out; |
| 1044 | } | 1053 | } |
| 1045 | chp->cmg_chars = cmg_chars; | ||
| 1046 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | 1054 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, |
| 1047 | (struct cmg_chars *) &scmc_area->data); | 1055 | (struct cmg_chars *) &scmc_area->data); |
| 1048 | out: | 1056 | out: |
| 1049 | spin_unlock_irq(&chsc_page_lock); | 1057 | spin_unlock_irq(&chsc_page_lock); |
| 1050 | if (!chp->cmg_chars) | ||
| 1051 | kfree(cmg_chars); | ||
| 1052 | |||
| 1053 | return ret; | 1058 | return ret; |
| 1054 | } | 1059 | } |
| 1055 | 1060 | ||
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h index 7b23f43c7b08..de1b6c1d172c 100644 --- a/drivers/s390/crypto/zcrypt_error.h +++ b/drivers/s390/crypto/zcrypt_error.h | |||
| @@ -112,9 +112,10 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
| 112 | atomic_set(&zcrypt_rescan_req, 1); | 112 | atomic_set(&zcrypt_rescan_req, 1); |
| 113 | zdev->online = 0; | 113 | zdev->online = 0; |
| 114 | pr_err("Cryptographic device %x failed and was set offline\n", | 114 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 115 | zdev->ap_dev->qid); | 115 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 116 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", | 116 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", |
| 117 | zdev->ap_dev->qid, zdev->online, ehdr->reply_code); | 117 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, |
| 118 | ehdr->reply_code); | ||
| 118 | return -EAGAIN; | 119 | return -EAGAIN; |
| 119 | case REP82_ERROR_TRANSPORT_FAIL: | 120 | case REP82_ERROR_TRANSPORT_FAIL: |
| 120 | case REP82_ERROR_MACHINE_FAILURE: | 121 | case REP82_ERROR_MACHINE_FAILURE: |
| @@ -123,16 +124,18 @@ static inline int convert_error(struct zcrypt_device *zdev, | |||
| 123 | atomic_set(&zcrypt_rescan_req, 1); | 124 | atomic_set(&zcrypt_rescan_req, 1); |
| 124 | zdev->online = 0; | 125 | zdev->online = 0; |
| 125 | pr_err("Cryptographic device %x failed and was set offline\n", | 126 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 126 | zdev->ap_dev->qid); | 127 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 127 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", | 128 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", |
| 128 | zdev->ap_dev->qid, zdev->online, ehdr->reply_code); | 129 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, |
| 130 | ehdr->reply_code); | ||
| 129 | return -EAGAIN; | 131 | return -EAGAIN; |
| 130 | default: | 132 | default: |
| 131 | zdev->online = 0; | 133 | zdev->online = 0; |
| 132 | pr_err("Cryptographic device %x failed and was set offline\n", | 134 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 133 | zdev->ap_dev->qid); | 135 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 134 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", | 136 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", |
| 135 | zdev->ap_dev->qid, zdev->online, ehdr->reply_code); | 137 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, |
| 138 | ehdr->reply_code); | ||
| 136 | return -EAGAIN; /* repeat the request on a different device. */ | 139 | return -EAGAIN; /* repeat the request on a different device. */ |
| 137 | } | 140 | } |
| 138 | } | 141 | } |
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 74edf2934e7c..eedfaa2cf715 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c | |||
| @@ -336,9 +336,10 @@ static int convert_type80(struct zcrypt_device *zdev, | |||
| 336 | /* The result is too short, the CEX2A card may not do that.. */ | 336 | /* The result is too short, the CEX2A card may not do that.. */ |
| 337 | zdev->online = 0; | 337 | zdev->online = 0; |
| 338 | pr_err("Cryptographic device %x failed and was set offline\n", | 338 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 339 | zdev->ap_dev->qid); | 339 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 340 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", | 340 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", |
| 341 | zdev->ap_dev->qid, zdev->online, t80h->code); | 341 | AP_QID_DEVICE(zdev->ap_dev->qid), |
| 342 | zdev->online, t80h->code); | ||
| 342 | 343 | ||
| 343 | return -EAGAIN; /* repeat the request on a different device. */ | 344 | return -EAGAIN; /* repeat the request on a different device. */ |
| 344 | } | 345 | } |
| @@ -368,9 +369,9 @@ static int convert_response(struct zcrypt_device *zdev, | |||
| 368 | default: /* Unknown response type, this should NEVER EVER happen */ | 369 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 369 | zdev->online = 0; | 370 | zdev->online = 0; |
| 370 | pr_err("Cryptographic device %x failed and was set offline\n", | 371 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 371 | zdev->ap_dev->qid); | 372 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 372 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", | 373 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", |
| 373 | zdev->ap_dev->qid, zdev->online); | 374 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); |
| 374 | return -EAGAIN; /* repeat the request on a different device. */ | 375 | return -EAGAIN; /* repeat the request on a different device. */ |
| 375 | } | 376 | } |
| 376 | } | 377 | } |
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 9a2dd472c1cc..21959719daef 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c | |||
| @@ -572,9 +572,9 @@ static int convert_type86_ica(struct zcrypt_device *zdev, | |||
| 572 | return -EINVAL; | 572 | return -EINVAL; |
| 573 | zdev->online = 0; | 573 | zdev->online = 0; |
| 574 | pr_err("Cryptographic device %x failed and was set offline\n", | 574 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 575 | zdev->ap_dev->qid); | 575 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 576 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", | 576 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d", |
| 577 | zdev->ap_dev->qid, zdev->online, | 577 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online, |
| 578 | msg->hdr.reply_code); | 578 | msg->hdr.reply_code); |
| 579 | return -EAGAIN; /* repeat the request on a different device. */ | 579 | return -EAGAIN; /* repeat the request on a different device. */ |
| 580 | } | 580 | } |
| @@ -715,9 +715,9 @@ static int convert_response_ica(struct zcrypt_device *zdev, | |||
| 715 | default: /* Unknown response type, this should NEVER EVER happen */ | 715 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 716 | zdev->online = 0; | 716 | zdev->online = 0; |
| 717 | pr_err("Cryptographic device %x failed and was set offline\n", | 717 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 718 | zdev->ap_dev->qid); | 718 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 719 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", | 719 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", |
| 720 | zdev->ap_dev->qid, zdev->online); | 720 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); |
| 721 | return -EAGAIN; /* repeat the request on a different device. */ | 721 | return -EAGAIN; /* repeat the request on a different device. */ |
| 722 | } | 722 | } |
| 723 | } | 723 | } |
| @@ -747,9 +747,9 @@ static int convert_response_xcrb(struct zcrypt_device *zdev, | |||
| 747 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ | 747 | xcRB->status = 0x0008044DL; /* HDD_InvalidParm */ |
| 748 | zdev->online = 0; | 748 | zdev->online = 0; |
| 749 | pr_err("Cryptographic device %x failed and was set offline\n", | 749 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 750 | zdev->ap_dev->qid); | 750 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 751 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", | 751 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", |
| 752 | zdev->ap_dev->qid, zdev->online); | 752 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); |
| 753 | return -EAGAIN; /* repeat the request on a different device. */ | 753 | return -EAGAIN; /* repeat the request on a different device. */ |
| 754 | } | 754 | } |
| 755 | } | 755 | } |
| @@ -773,9 +773,9 @@ static int convert_response_ep11_xcrb(struct zcrypt_device *zdev, | |||
| 773 | default: /* Unknown response type, this should NEVER EVER happen */ | 773 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 774 | zdev->online = 0; | 774 | zdev->online = 0; |
| 775 | pr_err("Cryptographic device %x failed and was set offline\n", | 775 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 776 | zdev->ap_dev->qid); | 776 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 777 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", | 777 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", |
| 778 | zdev->ap_dev->qid, zdev->online); | 778 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); |
| 779 | return -EAGAIN; /* repeat the request on a different device. */ | 779 | return -EAGAIN; /* repeat the request on a different device. */ |
| 780 | } | 780 | } |
| 781 | } | 781 | } |
| @@ -800,9 +800,9 @@ static int convert_response_rng(struct zcrypt_device *zdev, | |||
| 800 | default: /* Unknown response type, this should NEVER EVER happen */ | 800 | default: /* Unknown response type, this should NEVER EVER happen */ |
| 801 | zdev->online = 0; | 801 | zdev->online = 0; |
| 802 | pr_err("Cryptographic device %x failed and was set offline\n", | 802 | pr_err("Cryptographic device %x failed and was set offline\n", |
| 803 | zdev->ap_dev->qid); | 803 | AP_QID_DEVICE(zdev->ap_dev->qid)); |
| 804 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", | 804 | ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail", |
| 805 | zdev->ap_dev->qid, zdev->online); | 805 | AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online); |
| 806 | return -EAGAIN; /* repeat the request on a different device. */ | 806 | return -EAGAIN; /* repeat the request on a different device. */ |
| 807 | } | 807 | } |
| 808 | } | 808 | } |
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index 37a0c7156087..b67661836c9f 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig | |||
| @@ -1,5 +1,7 @@ | |||
| 1 | config SCSI_HISI_SAS | 1 | config SCSI_HISI_SAS |
| 2 | tristate "HiSilicon SAS" | 2 | tristate "HiSilicon SAS" |
| 3 | depends on HAS_DMA | ||
| 4 | depends on ARM64 || COMPILE_TEST | ||
| 3 | select SCSI_SAS_LIBSAS | 5 | select SCSI_SAS_LIBSAS |
| 4 | select BLK_DEV_INTEGRITY | 6 | select BLK_DEV_INTEGRITY |
| 5 | help | 7 | help |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 4e08d1cd704d..bb669d32ccd0 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2893 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && | 2893 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && |
| 2894 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) | 2894 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) |
| 2895 | rw_max = q->limits.io_opt = | 2895 | rw_max = q->limits.io_opt = |
| 2896 | logical_to_sectors(sdp, sdkp->opt_xfer_blocks); | 2896 | sdkp->opt_xfer_blocks * sdp->sector_size; |
| 2897 | else | 2897 | else |
| 2898 | rw_max = BLK_DEF_MAX_SECTORS; | 2898 | rw_max = BLK_DEF_MAX_SECTORS; |
| 2899 | 2899 | ||
| @@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors) | |||
| 3268 | struct scsi_disk *sdkp = dev_get_drvdata(dev); | 3268 | struct scsi_disk *sdkp = dev_get_drvdata(dev); |
| 3269 | int ret = 0; | 3269 | int ret = 0; |
| 3270 | 3270 | ||
| 3271 | if (!sdkp) | 3271 | if (!sdkp) /* E.g.: runtime suspend following sd_remove() */ |
| 3272 | return 0; /* this can happen */ | 3272 | return 0; |
| 3273 | 3273 | ||
| 3274 | if (sdkp->WCE && sdkp->media_present) { | 3274 | if (sdkp->WCE && sdkp->media_present) { |
| 3275 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); | 3275 | sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n"); |
| @@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev) | |||
| 3308 | { | 3308 | { |
| 3309 | struct scsi_disk *sdkp = dev_get_drvdata(dev); | 3309 | struct scsi_disk *sdkp = dev_get_drvdata(dev); |
| 3310 | 3310 | ||
| 3311 | if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */ | ||
| 3312 | return 0; | ||
| 3313 | |||
| 3311 | if (!sdkp->device->manage_start_stop) | 3314 | if (!sdkp->device->manage_start_stop) |
| 3312 | return 0; | 3315 | return 0; |
| 3313 | 3316 | ||
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 8bd54a64efd6..64c867405ad4 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c | |||
| @@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev) | |||
| 144 | { | 144 | { |
| 145 | struct scsi_cd *cd = dev_get_drvdata(dev); | 145 | struct scsi_cd *cd = dev_get_drvdata(dev); |
| 146 | 146 | ||
| 147 | if (!cd) /* E.g.: runtime suspend following sr_remove() */ | ||
| 148 | return 0; | ||
| 149 | |||
| 147 | if (cd->media_present) | 150 | if (cd->media_present) |
| 148 | return -EBUSY; | 151 | return -EBUSY; |
| 149 | else | 152 | else |
| @@ -985,6 +988,7 @@ static int sr_remove(struct device *dev) | |||
| 985 | scsi_autopm_get_device(cd->device); | 988 | scsi_autopm_get_device(cd->device); |
| 986 | 989 | ||
| 987 | del_gendisk(cd->disk); | 990 | del_gendisk(cd->disk); |
| 991 | dev_set_drvdata(dev, NULL); | ||
| 988 | 992 | ||
| 989 | mutex_lock(&sr_ref_mutex); | 993 | mutex_lock(&sr_ref_mutex); |
| 990 | kref_put(&cd->kref, sr_kref_release); | 994 | kref_put(&cd->kref, sr_kref_release); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 36205c27c4d0..f6bed86c17f9 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -545,6 +545,7 @@ err_enable_device: | |||
| 545 | static void virtio_pci_remove(struct pci_dev *pci_dev) | 545 | static void virtio_pci_remove(struct pci_dev *pci_dev) |
| 546 | { | 546 | { |
| 547 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | 547 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); |
| 548 | struct device *dev = get_device(&vp_dev->vdev.dev); | ||
| 548 | 549 | ||
| 549 | unregister_virtio_device(&vp_dev->vdev); | 550 | unregister_virtio_device(&vp_dev->vdev); |
| 550 | 551 | ||
| @@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) | |||
| 554 | virtio_pci_modern_remove(vp_dev); | 555 | virtio_pci_modern_remove(vp_dev); |
| 555 | 556 | ||
| 556 | pci_disable_device(pci_dev); | 557 | pci_disable_device(pci_dev); |
| 558 | put_device(dev); | ||
| 557 | } | 559 | } |
| 558 | 560 | ||
| 559 | static struct pci_driver virtio_pci_driver = { | 561 | static struct pci_driver virtio_pci_driver = { |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c index 945fc4327201..4ac2ca8a7656 100644 --- a/drivers/xen/tmem.c +++ b/drivers/xen/tmem.c | |||
| @@ -242,7 +242,7 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) | |||
| 242 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); | 242 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); |
| 243 | } | 243 | } |
| 244 | 244 | ||
| 245 | static struct cleancache_ops tmem_cleancache_ops = { | 245 | static const struct cleancache_ops tmem_cleancache_ops = { |
| 246 | .put_page = tmem_cleancache_put_page, | 246 | .put_page = tmem_cleancache_put_page, |
| 247 | .get_page = tmem_cleancache_get_page, | 247 | .get_page = tmem_cleancache_get_page, |
| 248 | .invalidate_page = tmem_cleancache_flush_page, | 248 | .invalidate_page = tmem_cleancache_flush_page, |
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 88d9af3d4581..5fb60ea7eee2 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
| @@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, | |||
| 328 | list_add_tail(&work->ordered_list, &wq->ordered_list); | 328 | list_add_tail(&work->ordered_list, &wq->ordered_list); |
| 329 | spin_unlock_irqrestore(&wq->list_lock, flags); | 329 | spin_unlock_irqrestore(&wq->list_lock, flags); |
| 330 | } | 330 | } |
| 331 | queue_work(wq->normal_wq, &work->normal_work); | ||
| 332 | trace_btrfs_work_queued(work); | 331 | trace_btrfs_work_queued(work); |
| 332 | queue_work(wq->normal_wq, &work->normal_work); | ||
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | void btrfs_queue_work(struct btrfs_workqueue *wq, | 335 | void btrfs_queue_work(struct btrfs_workqueue *wq, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index dd08e29f5117..4545e2e2ad45 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -182,6 +182,7 @@ static struct btrfs_lockdep_keyset { | |||
| 182 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, | 182 | { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, |
| 183 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, | 183 | { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, |
| 184 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, | 184 | { .id = BTRFS_UUID_TREE_OBJECTID, .name_stem = "uuid" }, |
| 185 | { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" }, | ||
| 185 | { .id = 0, .name_stem = "tree" }, | 186 | { .id = 0, .name_stem = "tree" }, |
| 186 | }; | 187 | }; |
| 187 | 188 | ||
| @@ -1787,7 +1788,6 @@ static int cleaner_kthread(void *arg) | |||
| 1787 | int again; | 1788 | int again; |
| 1788 | struct btrfs_trans_handle *trans; | 1789 | struct btrfs_trans_handle *trans; |
| 1789 | 1790 | ||
| 1790 | set_freezable(); | ||
| 1791 | do { | 1791 | do { |
| 1792 | again = 0; | 1792 | again = 0; |
| 1793 | 1793 | ||
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c index 393e36bd5845..53dbeaf6ce94 100644 --- a/fs/btrfs/free-space-tree.c +++ b/fs/btrfs/free-space-tree.c | |||
| @@ -153,6 +153,20 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize) | |||
| 153 | 153 | ||
| 154 | static unsigned long *alloc_bitmap(u32 bitmap_size) | 154 | static unsigned long *alloc_bitmap(u32 bitmap_size) |
| 155 | { | 155 | { |
| 156 | void *mem; | ||
| 157 | |||
| 158 | /* | ||
| 159 | * The allocation size varies, observed numbers were < 4K up to 16K. | ||
| 160 | * Using vmalloc unconditionally would be too heavy, we'll try | ||
| 161 | * contiguous allocations first. | ||
| 162 | */ | ||
| 163 | if (bitmap_size <= PAGE_SIZE) | ||
| 164 | return kzalloc(bitmap_size, GFP_NOFS); | ||
| 165 | |||
| 166 | mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN); | ||
| 167 | if (mem) | ||
| 168 | return mem; | ||
| 169 | |||
| 156 | return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, | 170 | return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO, |
| 157 | PAGE_KERNEL); | 171 | PAGE_KERNEL); |
| 158 | } | 172 | } |
| @@ -289,7 +303,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, | |||
| 289 | 303 | ||
| 290 | ret = 0; | 304 | ret = 0; |
| 291 | out: | 305 | out: |
| 292 | vfree(bitmap); | 306 | kvfree(bitmap); |
| 293 | if (ret) | 307 | if (ret) |
| 294 | btrfs_abort_transaction(trans, root, ret); | 308 | btrfs_abort_transaction(trans, root, ret); |
| 295 | return ret; | 309 | return ret; |
| @@ -438,7 +452,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans, | |||
| 438 | 452 | ||
| 439 | ret = 0; | 453 | ret = 0; |
| 440 | out: | 454 | out: |
| 441 | vfree(bitmap); | 455 | kvfree(bitmap); |
| 442 | if (ret) | 456 | if (ret) |
| 443 | btrfs_abort_transaction(trans, root, ret); | 457 | btrfs_abort_transaction(trans, root, ret); |
| 444 | return ret; | 458 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e28f3d4691af..5f06eb1f4384 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -7116,21 +7116,41 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
| 7116 | if (ret) | 7116 | if (ret) |
| 7117 | return ERR_PTR(ret); | 7117 | return ERR_PTR(ret); |
| 7118 | 7118 | ||
| 7119 | em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, | 7119 | /* |
| 7120 | ins.offset, ins.offset, ins.offset, 0); | 7120 | * Create the ordered extent before the extent map. This is to avoid |
| 7121 | if (IS_ERR(em)) { | 7121 | * races with the fast fsync path that would lead to it logging file |
| 7122 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); | 7122 | * extent items that point to disk extents that were not yet written to. |
| 7123 | return em; | 7123 | * The fast fsync path collects ordered extents into a local list and |
| 7124 | } | 7124 | * then collects all the new extent maps, so we must create the ordered |
| 7125 | 7125 | * extent first and make sure the fast fsync path collects any new | |
| 7126 | * ordered extents after collecting new extent maps as well. | ||
| 7127 | * The fsync path simply can not rely on inode_dio_wait() because it | ||
| 7128 | * causes deadlock with AIO. | ||
| 7129 | */ | ||
| 7126 | ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, | 7130 | ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, |
| 7127 | ins.offset, ins.offset, 0); | 7131 | ins.offset, ins.offset, 0); |
| 7128 | if (ret) { | 7132 | if (ret) { |
| 7129 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); | 7133 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); |
| 7130 | free_extent_map(em); | ||
| 7131 | return ERR_PTR(ret); | 7134 | return ERR_PTR(ret); |
| 7132 | } | 7135 | } |
| 7133 | 7136 | ||
| 7137 | em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, | ||
| 7138 | ins.offset, ins.offset, ins.offset, 0); | ||
| 7139 | if (IS_ERR(em)) { | ||
| 7140 | struct btrfs_ordered_extent *oe; | ||
| 7141 | |||
| 7142 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); | ||
| 7143 | oe = btrfs_lookup_ordered_extent(inode, start); | ||
| 7144 | ASSERT(oe); | ||
| 7145 | if (WARN_ON(!oe)) | ||
| 7146 | return em; | ||
| 7147 | set_bit(BTRFS_ORDERED_IOERR, &oe->flags); | ||
| 7148 | set_bit(BTRFS_ORDERED_IO_DONE, &oe->flags); | ||
| 7149 | btrfs_remove_ordered_extent(inode, oe); | ||
| 7150 | /* Once for our lookup and once for the ordered extents tree. */ | ||
| 7151 | btrfs_put_ordered_extent(oe); | ||
| 7152 | btrfs_put_ordered_extent(oe); | ||
| 7153 | } | ||
| 7134 | return em; | 7154 | return em; |
| 7135 | } | 7155 | } |
| 7136 | 7156 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index fd1c4d982463..2bd0011450df 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
| @@ -575,7 +575,8 @@ static int is_cowonly_root(u64 root_objectid) | |||
| 575 | root_objectid == BTRFS_TREE_LOG_OBJECTID || | 575 | root_objectid == BTRFS_TREE_LOG_OBJECTID || |
| 576 | root_objectid == BTRFS_CSUM_TREE_OBJECTID || | 576 | root_objectid == BTRFS_CSUM_TREE_OBJECTID || |
| 577 | root_objectid == BTRFS_UUID_TREE_OBJECTID || | 577 | root_objectid == BTRFS_UUID_TREE_OBJECTID || |
| 578 | root_objectid == BTRFS_QUOTA_TREE_OBJECTID) | 578 | root_objectid == BTRFS_QUOTA_TREE_OBJECTID || |
| 579 | root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) | ||
| 579 | return 1; | 580 | return 1; |
| 580 | return 0; | 581 | return 0; |
| 581 | } | 582 | } |
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index e0ac85949067..539e7b5e3f86 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c | |||
| @@ -202,6 +202,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF); | |||
| 202 | BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56); | 202 | BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56); |
| 203 | BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA); | 203 | BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA); |
| 204 | BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); | 204 | BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES); |
| 205 | BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE); | ||
| 205 | 206 | ||
| 206 | static struct attribute *btrfs_supported_feature_attrs[] = { | 207 | static struct attribute *btrfs_supported_feature_attrs[] = { |
| 207 | BTRFS_FEAT_ATTR_PTR(mixed_backref), | 208 | BTRFS_FEAT_ATTR_PTR(mixed_backref), |
| @@ -213,6 +214,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { | |||
| 213 | BTRFS_FEAT_ATTR_PTR(raid56), | 214 | BTRFS_FEAT_ATTR_PTR(raid56), |
| 214 | BTRFS_FEAT_ATTR_PTR(skinny_metadata), | 215 | BTRFS_FEAT_ATTR_PTR(skinny_metadata), |
| 215 | BTRFS_FEAT_ATTR_PTR(no_holes), | 216 | BTRFS_FEAT_ATTR_PTR(no_holes), |
| 217 | BTRFS_FEAT_ATTR_PTR(free_space_tree), | ||
| 216 | NULL | 218 | NULL |
| 217 | }; | 219 | }; |
| 218 | 220 | ||
| @@ -780,6 +782,39 @@ failure: | |||
| 780 | return error; | 782 | return error; |
| 781 | } | 783 | } |
| 782 | 784 | ||
| 785 | |||
| 786 | /* | ||
| 787 | * Change per-fs features in /sys/fs/btrfs/UUID/features to match current | ||
| 788 | * values in superblock. Call after any changes to incompat/compat_ro flags | ||
| 789 | */ | ||
| 790 | void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, | ||
| 791 | u64 bit, enum btrfs_feature_set set) | ||
| 792 | { | ||
| 793 | struct btrfs_fs_devices *fs_devs; | ||
| 794 | struct kobject *fsid_kobj; | ||
| 795 | u64 features; | ||
| 796 | int ret; | ||
| 797 | |||
| 798 | if (!fs_info) | ||
| 799 | return; | ||
| 800 | |||
| 801 | features = get_features(fs_info, set); | ||
| 802 | ASSERT(bit & supported_feature_masks[set]); | ||
| 803 | |||
| 804 | fs_devs = fs_info->fs_devices; | ||
| 805 | fsid_kobj = &fs_devs->fsid_kobj; | ||
| 806 | |||
| 807 | if (!fsid_kobj->state_initialized) | ||
| 808 | return; | ||
| 809 | |||
| 810 | /* | ||
| 811 | * FIXME: this is too heavy to update just one value, ideally we'd like | ||
| 812 | * to use sysfs_update_group but some refactoring is needed first. | ||
| 813 | */ | ||
| 814 | sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group); | ||
| 815 | ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group); | ||
| 816 | } | ||
| 817 | |||
| 783 | static int btrfs_init_debugfs(void) | 818 | static int btrfs_init_debugfs(void) |
| 784 | { | 819 | { |
| 785 | #ifdef CONFIG_DEBUG_FS | 820 | #ifdef CONFIG_DEBUG_FS |
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h index 9c09522125a6..d7da1a4c2f6c 100644 --- a/fs/btrfs/sysfs.h +++ b/fs/btrfs/sysfs.h | |||
| @@ -56,7 +56,7 @@ static struct btrfs_feature_attr btrfs_attr_##_name = { \ | |||
| 56 | #define BTRFS_FEAT_ATTR_COMPAT(name, feature) \ | 56 | #define BTRFS_FEAT_ATTR_COMPAT(name, feature) \ |
| 57 | BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature) | 57 | BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature) |
| 58 | #define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \ | 58 | #define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \ |
| 59 | BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT, feature) | 59 | BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature) |
| 60 | #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ | 60 | #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \ |
| 61 | BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) | 61 | BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature) |
| 62 | 62 | ||
| @@ -90,4 +90,7 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs, | |||
| 90 | struct kobject *parent); | 90 | struct kobject *parent); |
| 91 | int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs); | 91 | int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs); |
| 92 | void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); | 92 | void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs); |
| 93 | void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info, | ||
| 94 | u64 bit, enum btrfs_feature_set set); | ||
| 95 | |||
| 93 | #endif /* _BTRFS_SYSFS_H_ */ | 96 | #endif /* _BTRFS_SYSFS_H_ */ |
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index b1d920b30070..0e1e61a7ec23 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c | |||
| @@ -82,18 +82,18 @@ void btrfs_destroy_test_fs(void) | |||
| 82 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void) | 82 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void) |
| 83 | { | 83 | { |
| 84 | struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), | 84 | struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info), |
| 85 | GFP_NOFS); | 85 | GFP_KERNEL); |
| 86 | 86 | ||
| 87 | if (!fs_info) | 87 | if (!fs_info) |
| 88 | return fs_info; | 88 | return fs_info; |
| 89 | fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices), | 89 | fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices), |
| 90 | GFP_NOFS); | 90 | GFP_KERNEL); |
| 91 | if (!fs_info->fs_devices) { | 91 | if (!fs_info->fs_devices) { |
| 92 | kfree(fs_info); | 92 | kfree(fs_info); |
| 93 | return NULL; | 93 | return NULL; |
| 94 | } | 94 | } |
| 95 | fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block), | 95 | fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block), |
| 96 | GFP_NOFS); | 96 | GFP_KERNEL); |
| 97 | if (!fs_info->super_copy) { | 97 | if (!fs_info->super_copy) { |
| 98 | kfree(fs_info->fs_devices); | 98 | kfree(fs_info->fs_devices); |
| 99 | kfree(fs_info); | 99 | kfree(fs_info); |
| @@ -180,11 +180,11 @@ btrfs_alloc_dummy_block_group(unsigned long length) | |||
| 180 | { | 180 | { |
| 181 | struct btrfs_block_group_cache *cache; | 181 | struct btrfs_block_group_cache *cache; |
| 182 | 182 | ||
| 183 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | 183 | cache = kzalloc(sizeof(*cache), GFP_KERNEL); |
| 184 | if (!cache) | 184 | if (!cache) |
| 185 | return NULL; | 185 | return NULL; |
| 186 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | 186 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), |
| 187 | GFP_NOFS); | 187 | GFP_KERNEL); |
| 188 | if (!cache->free_space_ctl) { | 188 | if (!cache->free_space_ctl) { |
| 189 | kfree(cache); | 189 | kfree(cache); |
| 190 | return NULL; | 190 | return NULL; |
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index e29fa297e053..669b58201e36 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c | |||
| @@ -94,7 +94,7 @@ static int test_find_delalloc(void) | |||
| 94 | * test. | 94 | * test. |
| 95 | */ | 95 | */ |
| 96 | for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { | 96 | for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { |
| 97 | page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); | 97 | page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); |
| 98 | if (!page) { | 98 | if (!page) { |
| 99 | test_msg("Failed to allocate test page\n"); | 99 | test_msg("Failed to allocate test page\n"); |
| 100 | ret = -ENOMEM; | 100 | ret = -ENOMEM; |
| @@ -113,7 +113,7 @@ static int test_find_delalloc(void) | |||
| 113 | * |--- delalloc ---| | 113 | * |--- delalloc ---| |
| 114 | * |--- search ---| | 114 | * |--- search ---| |
| 115 | */ | 115 | */ |
| 116 | set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_NOFS); | 116 | set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL); |
| 117 | start = 0; | 117 | start = 0; |
| 118 | end = 0; | 118 | end = 0; |
| 119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -144,7 +144,7 @@ static int test_find_delalloc(void) | |||
| 144 | test_msg("Couldn't find the locked page\n"); | 144 | test_msg("Couldn't find the locked page\n"); |
| 145 | goto out_bits; | 145 | goto out_bits; |
| 146 | } | 146 | } |
| 147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_NOFS); | 147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL); |
| 148 | start = test_start; | 148 | start = test_start; |
| 149 | end = 0; | 149 | end = 0; |
| 150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -199,7 +199,7 @@ static int test_find_delalloc(void) | |||
| 199 | * | 199 | * |
| 200 | * We are re-using our test_start from above since it works out well. | 200 | * We are re-using our test_start from above since it works out well. |
| 201 | */ | 201 | */ |
| 202 | set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_NOFS); | 202 | set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL); |
| 203 | start = test_start; | 203 | start = test_start; |
| 204 | end = 0; | 204 | end = 0; |
| 205 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 205 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -262,7 +262,7 @@ static int test_find_delalloc(void) | |||
| 262 | } | 262 | } |
| 263 | ret = 0; | 263 | ret = 0; |
| 264 | out_bits: | 264 | out_bits: |
| 265 | clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS); | 265 | clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); |
| 266 | out: | 266 | out: |
| 267 | if (locked_page) | 267 | if (locked_page) |
| 268 | page_cache_release(locked_page); | 268 | page_cache_release(locked_page); |
| @@ -360,7 +360,7 @@ static int test_eb_bitmaps(void) | |||
| 360 | 360 | ||
| 361 | test_msg("Running extent buffer bitmap tests\n"); | 361 | test_msg("Running extent buffer bitmap tests\n"); |
| 362 | 362 | ||
| 363 | bitmap = kmalloc(len, GFP_NOFS); | 363 | bitmap = kmalloc(len, GFP_KERNEL); |
| 364 | if (!bitmap) { | 364 | if (!bitmap) { |
| 365 | test_msg("Couldn't allocate test bitmap\n"); | 365 | test_msg("Couldn't allocate test bitmap\n"); |
| 366 | return -ENOMEM; | 366 | return -ENOMEM; |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 5de55fdd28bc..e2d3da02deee 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
| @@ -974,7 +974,7 @@ static int test_extent_accounting(void) | |||
| 974 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | 974 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, |
| 975 | EXTENT_DELALLOC | EXTENT_DIRTY | | 975 | EXTENT_DELALLOC | EXTENT_DIRTY | |
| 976 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | 976 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, |
| 977 | NULL, GFP_NOFS); | 977 | NULL, GFP_KERNEL); |
| 978 | if (ret) { | 978 | if (ret) { |
| 979 | test_msg("clear_extent_bit returned %d\n", ret); | 979 | test_msg("clear_extent_bit returned %d\n", ret); |
| 980 | goto out; | 980 | goto out; |
| @@ -1045,7 +1045,7 @@ static int test_extent_accounting(void) | |||
| 1045 | BTRFS_MAX_EXTENT_SIZE+8191, | 1045 | BTRFS_MAX_EXTENT_SIZE+8191, |
| 1046 | EXTENT_DIRTY | EXTENT_DELALLOC | | 1046 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1047 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | 1047 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, |
| 1048 | NULL, GFP_NOFS); | 1048 | NULL, GFP_KERNEL); |
| 1049 | if (ret) { | 1049 | if (ret) { |
| 1050 | test_msg("clear_extent_bit returned %d\n", ret); | 1050 | test_msg("clear_extent_bit returned %d\n", ret); |
| 1051 | goto out; | 1051 | goto out; |
| @@ -1079,7 +1079,7 @@ static int test_extent_accounting(void) | |||
| 1079 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | 1079 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, |
| 1080 | EXTENT_DIRTY | EXTENT_DELALLOC | | 1080 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1081 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | 1081 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, |
| 1082 | NULL, GFP_NOFS); | 1082 | NULL, GFP_KERNEL); |
| 1083 | if (ret) { | 1083 | if (ret) { |
| 1084 | test_msg("clear_extent_bit returned %d\n", ret); | 1084 | test_msg("clear_extent_bit returned %d\n", ret); |
| 1085 | goto out; | 1085 | goto out; |
| @@ -1096,7 +1096,7 @@ out: | |||
| 1096 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, | 1096 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1, |
| 1097 | EXTENT_DIRTY | EXTENT_DELALLOC | | 1097 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1098 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | 1098 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, |
| 1099 | NULL, GFP_NOFS); | 1099 | NULL, GFP_KERNEL); |
| 1100 | iput(inode); | 1100 | iput(inode); |
| 1101 | btrfs_free_dummy_root(root); | 1101 | btrfs_free_dummy_root(root); |
| 1102 | return ret; | 1102 | return ret; |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 323e12cc9d2f..978c3a810893 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -4127,7 +4127,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
| 4127 | struct inode *inode, | 4127 | struct inode *inode, |
| 4128 | struct btrfs_path *path, | 4128 | struct btrfs_path *path, |
| 4129 | struct list_head *logged_list, | 4129 | struct list_head *logged_list, |
| 4130 | struct btrfs_log_ctx *ctx) | 4130 | struct btrfs_log_ctx *ctx, |
| 4131 | const u64 start, | ||
| 4132 | const u64 end) | ||
| 4131 | { | 4133 | { |
| 4132 | struct extent_map *em, *n; | 4134 | struct extent_map *em, *n; |
| 4133 | struct list_head extents; | 4135 | struct list_head extents; |
| @@ -4166,7 +4168,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, | |||
| 4166 | } | 4168 | } |
| 4167 | 4169 | ||
| 4168 | list_sort(NULL, &extents, extent_cmp); | 4170 | list_sort(NULL, &extents, extent_cmp); |
| 4169 | 4171 | /* | |
| 4172 | * Collect any new ordered extents within the range. This is to | ||
| 4173 | * prevent logging file extent items without waiting for the disk | ||
| 4174 | * location they point to being written. We do this only to deal | ||
| 4175 | * with races against concurrent lockless direct IO writes. | ||
| 4176 | */ | ||
| 4177 | btrfs_get_logged_extents(inode, logged_list, start, end); | ||
| 4170 | process: | 4178 | process: |
| 4171 | while (!list_empty(&extents)) { | 4179 | while (!list_empty(&extents)) { |
| 4172 | em = list_entry(extents.next, struct extent_map, list); | 4180 | em = list_entry(extents.next, struct extent_map, list); |
| @@ -4701,7 +4709,7 @@ log_extents: | |||
| 4701 | goto out_unlock; | 4709 | goto out_unlock; |
| 4702 | } | 4710 | } |
| 4703 | ret = btrfs_log_changed_extents(trans, root, inode, dst_path, | 4711 | ret = btrfs_log_changed_extents(trans, root, inode, dst_path, |
| 4704 | &logged_list, ctx); | 4712 | &logged_list, ctx, start, end); |
| 4705 | if (ret) { | 4713 | if (ret) { |
| 4706 | err = ret; | 4714 | err = ret; |
| 4707 | goto out_unlock; | 4715 | goto out_unlock; |
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 89d008dc08e2..fe5efada9d68 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h | |||
| @@ -42,6 +42,10 @@ int drm_atomic_helper_commit(struct drm_device *dev, | |||
| 42 | struct drm_atomic_state *state, | 42 | struct drm_atomic_state *state, |
| 43 | bool async); | 43 | bool async); |
| 44 | 44 | ||
| 45 | bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, | ||
| 46 | struct drm_atomic_state *old_state, | ||
| 47 | struct drm_crtc *crtc); | ||
| 48 | |||
| 45 | void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, | 49 | void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, |
| 46 | struct drm_atomic_state *old_state); | 50 | struct drm_atomic_state *old_state); |
| 47 | 51 | ||
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h index bda5ec0b4b4d..fccf7f44139d 100644 --- a/include/linux/cleancache.h +++ b/include/linux/cleancache.h | |||
| @@ -37,7 +37,7 @@ struct cleancache_ops { | |||
| 37 | void (*invalidate_fs)(int); | 37 | void (*invalidate_fs)(int); |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | extern int cleancache_register_ops(struct cleancache_ops *ops); | 40 | extern int cleancache_register_ops(const struct cleancache_ops *ops); |
| 41 | extern void __cleancache_init_fs(struct super_block *); | 41 | extern void __cleancache_init_fs(struct super_block *); |
| 42 | extern void __cleancache_init_shared_fs(struct super_block *); | 42 | extern void __cleancache_init_shared_fs(struct super_block *); |
| 43 | extern int __cleancache_get_page(struct page *); | 43 | extern int __cleancache_get_page(struct page *); |
| @@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *); | |||
| 48 | 48 | ||
| 49 | #ifdef CONFIG_CLEANCACHE | 49 | #ifdef CONFIG_CLEANCACHE |
| 50 | #define cleancache_enabled (1) | 50 | #define cleancache_enabled (1) |
| 51 | static inline bool cleancache_fs_enabled(struct page *page) | ||
| 52 | { | ||
| 53 | return page->mapping->host->i_sb->cleancache_poolid >= 0; | ||
| 54 | } | ||
| 55 | static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) | 51 | static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) |
| 56 | { | 52 | { |
| 57 | return mapping->host->i_sb->cleancache_poolid >= 0; | 53 | return mapping->host->i_sb->cleancache_poolid >= 0; |
| 58 | } | 54 | } |
| 55 | static inline bool cleancache_fs_enabled(struct page *page) | ||
| 56 | { | ||
| 57 | return cleancache_fs_enabled_mapping(page->mapping); | ||
| 58 | } | ||
| 59 | #else | 59 | #else |
| 60 | #define cleancache_enabled (0) | 60 | #define cleancache_enabled (0) |
| 61 | #define cleancache_fs_enabled(_page) (0) | 61 | #define cleancache_fs_enabled(_page) (0) |
| @@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb) | |||
| 89 | 89 | ||
| 90 | static inline int cleancache_get_page(struct page *page) | 90 | static inline int cleancache_get_page(struct page *page) |
| 91 | { | 91 | { |
| 92 | int ret = -1; | ||
| 93 | |||
| 94 | if (cleancache_enabled && cleancache_fs_enabled(page)) | 92 | if (cleancache_enabled && cleancache_fs_enabled(page)) |
| 95 | ret = __cleancache_get_page(page); | 93 | return __cleancache_get_page(page); |
| 96 | return ret; | 94 | return -1; |
| 97 | } | 95 | } |
| 98 | 96 | ||
| 99 | static inline void cleancache_put_page(struct page *page) | 97 | static inline void cleancache_put_page(struct page *page) |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 0639dcc98195..81de7123959d 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -165,7 +165,6 @@ struct ftrace_ops { | |||
| 165 | ftrace_func_t saved_func; | 165 | ftrace_func_t saved_func; |
| 166 | int __percpu *disabled; | 166 | int __percpu *disabled; |
| 167 | #ifdef CONFIG_DYNAMIC_FTRACE | 167 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 168 | int nr_trampolines; | ||
| 169 | struct ftrace_ops_hash local_hash; | 168 | struct ftrace_ops_hash local_hash; |
| 170 | struct ftrace_ops_hash *func_hash; | 169 | struct ftrace_ops_hash *func_hash; |
| 171 | struct ftrace_ops_hash old_hash; | 170 | struct ftrace_ops_hash old_hash; |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index f28dff313b07..a5c539fa5d2b 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -133,8 +133,9 @@ struct iommu_dm_region { | |||
| 133 | 133 | ||
| 134 | /** | 134 | /** |
| 135 | * struct iommu_ops - iommu ops and capabilities | 135 | * struct iommu_ops - iommu ops and capabilities |
| 136 | * @domain_init: init iommu domain | 136 | * @capable: check capability |
| 137 | * @domain_destroy: destroy iommu domain | 137 | * @domain_alloc: allocate iommu domain |
| 138 | * @domain_free: free iommu domain | ||
| 138 | * @attach_dev: attach device to an iommu domain | 139 | * @attach_dev: attach device to an iommu domain |
| 139 | * @detach_dev: detach device from an iommu domain | 140 | * @detach_dev: detach device from an iommu domain |
| 140 | * @map: map a physically contiguous memory region to an iommu domain | 141 | * @map: map a physically contiguous memory region to an iommu domain |
| @@ -144,8 +145,15 @@ struct iommu_dm_region { | |||
| 144 | * @iova_to_phys: translate iova to physical address | 145 | * @iova_to_phys: translate iova to physical address |
| 145 | * @add_device: add device to iommu grouping | 146 | * @add_device: add device to iommu grouping |
| 146 | * @remove_device: remove device from iommu grouping | 147 | * @remove_device: remove device from iommu grouping |
| 148 | * @device_group: find iommu group for a particular device | ||
| 147 | * @domain_get_attr: Query domain attributes | 149 | * @domain_get_attr: Query domain attributes |
| 148 | * @domain_set_attr: Change domain attributes | 150 | * @domain_set_attr: Change domain attributes |
| 151 | * @get_dm_regions: Request list of direct mapping requirements for a device | ||
| 152 | * @put_dm_regions: Free list of direct mapping requirements for a device | ||
| 153 | * @domain_window_enable: Configure and enable a particular window for a domain | ||
| 154 | * @domain_window_disable: Disable a particular window for a domain | ||
| 155 | * @domain_set_windows: Set the number of windows for a domain | ||
| 156 | * @domain_get_windows: Return the number of windows for a domain | ||
| 149 | * @of_xlate: add OF master IDs to iommu grouping | 157 | * @of_xlate: add OF master IDs to iommu grouping |
| 150 | * @pgsize_bitmap: bitmap of supported page sizes | 158 | * @pgsize_bitmap: bitmap of supported page sizes |
| 151 | * @priv: per-instance data private to the iommu driver | 159 | * @priv: per-instance data private to the iommu driver |
| @@ -182,9 +190,9 @@ struct iommu_ops { | |||
| 182 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, | 190 | int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, |
| 183 | phys_addr_t paddr, u64 size, int prot); | 191 | phys_addr_t paddr, u64 size, int prot); |
| 184 | void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); | 192 | void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); |
| 185 | /* Set the numer of window per domain */ | 193 | /* Set the number of windows per domain */ |
| 186 | int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); | 194 | int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); |
| 187 | /* Get the numer of window per domain */ | 195 | /* Get the number of windows per domain */ |
| 188 | u32 (*domain_get_windows)(struct iommu_domain *domain); | 196 | u32 (*domain_get_windows)(struct iommu_domain *domain); |
| 189 | 197 | ||
| 190 | #ifdef CONFIG_OF_IOMMU | 198 | #ifdef CONFIG_OF_IOMMU |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index e7a018eaf3a2..017fced60242 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -1,10 +1,13 @@ | |||
| 1 | #ifndef __LINUX_SWIOTLB_H | 1 | #ifndef __LINUX_SWIOTLB_H |
| 2 | #define __LINUX_SWIOTLB_H | 2 | #define __LINUX_SWIOTLB_H |
| 3 | 3 | ||
| 4 | #include <linux/dma-direction.h> | ||
| 5 | #include <linux/init.h> | ||
| 4 | #include <linux/types.h> | 6 | #include <linux/types.h> |
| 5 | 7 | ||
| 6 | struct device; | 8 | struct device; |
| 7 | struct dma_attrs; | 9 | struct dma_attrs; |
| 10 | struct page; | ||
| 8 | struct scatterlist; | 11 | struct scatterlist; |
| 9 | 12 | ||
| 10 | extern int swiotlb_force; | 13 | extern int swiotlb_force; |
diff --git a/include/trace/events/fence.h b/include/trace/events/fence.h index 98feb1b82896..d6dfa05ba322 100644 --- a/include/trace/events/fence.h +++ b/include/trace/events/fence.h | |||
| @@ -17,7 +17,7 @@ TRACE_EVENT(fence_annotate_wait_on, | |||
| 17 | 17 | ||
| 18 | TP_STRUCT__entry( | 18 | TP_STRUCT__entry( |
| 19 | __string(driver, fence->ops->get_driver_name(fence)) | 19 | __string(driver, fence->ops->get_driver_name(fence)) |
| 20 | __string(timeline, fence->ops->get_driver_name(fence)) | 20 | __string(timeline, fence->ops->get_timeline_name(fence)) |
| 21 | __field(unsigned int, context) | 21 | __field(unsigned int, context) |
| 22 | __field(unsigned int, seqno) | 22 | __field(unsigned int, seqno) |
| 23 | 23 | ||
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 4cc989ad6851..f95e1c43c3fb 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h | |||
| @@ -48,6 +48,8 @@ struct drm_etnaviv_timespec { | |||
| 48 | #define ETNAVIV_PARAM_GPU_FEATURES_2 0x05 | 48 | #define ETNAVIV_PARAM_GPU_FEATURES_2 0x05 |
| 49 | #define ETNAVIV_PARAM_GPU_FEATURES_3 0x06 | 49 | #define ETNAVIV_PARAM_GPU_FEATURES_3 0x06 |
| 50 | #define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 | 50 | #define ETNAVIV_PARAM_GPU_FEATURES_4 0x07 |
| 51 | #define ETNAVIV_PARAM_GPU_FEATURES_5 0x08 | ||
| 52 | #define ETNAVIV_PARAM_GPU_FEATURES_6 0x09 | ||
| 51 | 53 | ||
| 52 | #define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 | 54 | #define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10 |
| 53 | #define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 | 55 | #define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11 |
| @@ -59,6 +61,7 @@ struct drm_etnaviv_timespec { | |||
| 59 | #define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17 | 61 | #define ETNAVIV_PARAM_GPU_BUFFER_SIZE 0x17 |
| 60 | #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 | 62 | #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 |
| 61 | #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 | 63 | #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 |
| 64 | #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a | ||
| 62 | 65 | ||
| 63 | #define ETNA_MAX_PIPES 4 | 66 | #define ETNA_MAX_PIPES 4 |
| 64 | 67 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 02e8dfaa1ce2..68d3ebc12601 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -235,7 +235,7 @@ config PM_TRACE_RTC | |||
| 235 | 235 | ||
| 236 | config APM_EMULATION | 236 | config APM_EMULATION |
| 237 | tristate "Advanced Power Management Emulation" | 237 | tristate "Advanced Power Management Emulation" |
| 238 | depends on PM && SYS_SUPPORTS_APM_EMULATION | 238 | depends on SYS_SUPPORTS_APM_EMULATION |
| 239 | help | 239 | help |
| 240 | APM is a BIOS specification for saving power using several different | 240 | APM is a BIOS specification for saving power using several different |
| 241 | techniques. This is mostly useful for battery powered laptops with | 241 | techniques. This is mostly useful for battery powered laptops with |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index de0e786b2667..544a7133cbd1 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -162,7 +162,7 @@ static void cpuidle_idle_call(void) | |||
| 162 | */ | 162 | */ |
| 163 | if (idle_should_freeze()) { | 163 | if (idle_should_freeze()) { |
| 164 | entered_state = cpuidle_enter_freeze(drv, dev); | 164 | entered_state = cpuidle_enter_freeze(drv, dev); |
| 165 | if (entered_state >= 0) { | 165 | if (entered_state > 0) { |
| 166 | local_irq_enable(); | 166 | local_irq_enable(); |
| 167 | goto exit_idle; | 167 | goto exit_idle; |
| 168 | } | 168 | } |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 580ac2d4024f..15a1795bbba1 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void) | |||
| 316 | put_seccomp_filter(thread); | 316 | put_seccomp_filter(thread); |
| 317 | smp_store_release(&thread->seccomp.filter, | 317 | smp_store_release(&thread->seccomp.filter, |
| 318 | caller->seccomp.filter); | 318 | caller->seccomp.filter); |
| 319 | |||
| 320 | /* | ||
| 321 | * Don't let an unprivileged task work around | ||
| 322 | * the no_new_privs restriction by creating | ||
| 323 | * a thread that sets it up, enters seccomp, | ||
| 324 | * then dies. | ||
| 325 | */ | ||
| 326 | if (task_no_new_privs(caller)) | ||
| 327 | task_set_no_new_privs(thread); | ||
| 328 | |||
| 319 | /* | 329 | /* |
| 320 | * Opt the other thread into seccomp if needed. | 330 | * Opt the other thread into seccomp if needed. |
| 321 | * As threads are considered to be trust-realm | 331 | * As threads are considered to be trust-realm |
| 322 | * equivalent (see ptrace_may_access), it is safe to | 332 | * equivalent (see ptrace_may_access), it is safe to |
| 323 | * allow one thread to transition the other. | 333 | * allow one thread to transition the other. |
| 324 | */ | 334 | */ |
| 325 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) { | 335 | if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) |
| 326 | /* | ||
| 327 | * Don't let an unprivileged task work around | ||
| 328 | * the no_new_privs restriction by creating | ||
| 329 | * a thread that sets it up, enters seccomp, | ||
| 330 | * then dies. | ||
| 331 | */ | ||
| 332 | if (task_no_new_privs(caller)) | ||
| 333 | task_set_no_new_privs(thread); | ||
| 334 | |||
| 335 | seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); | 336 | seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); |
| 336 | } | ||
| 337 | } | 337 | } |
| 338 | } | 338 | } |
| 339 | 339 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 87fb9801bd9e..d9293402ee68 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -1751,7 +1751,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, | |||
| 1751 | { | 1751 | { |
| 1752 | __buffer_unlock_commit(buffer, event); | 1752 | __buffer_unlock_commit(buffer, event); |
| 1753 | 1753 | ||
| 1754 | ftrace_trace_stack(tr, buffer, flags, 6, pc, regs); | 1754 | ftrace_trace_stack(tr, buffer, flags, 0, pc, regs); |
| 1755 | ftrace_trace_userstack(buffer, flags, pc); | 1755 | ftrace_trace_userstack(buffer, flags, pc); |
| 1756 | } | 1756 | } |
| 1757 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); | 1757 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs); |
diff --git a/mm/cleancache.c b/mm/cleancache.c index 8fc50811119b..ba5d8f3e6d68 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * cleancache_ops is set by cleancache_register_ops to contain the pointers | 22 | * cleancache_ops is set by cleancache_register_ops to contain the pointers |
| 23 | * to the cleancache "backend" implementation functions. | 23 | * to the cleancache "backend" implementation functions. |
| 24 | */ | 24 | */ |
| 25 | static struct cleancache_ops *cleancache_ops __read_mostly; | 25 | static const struct cleancache_ops *cleancache_ops __read_mostly; |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * Counters available via /sys/kernel/debug/cleancache (if debugfs is | 28 | * Counters available via /sys/kernel/debug/cleancache (if debugfs is |
| @@ -49,7 +49,7 @@ static void cleancache_register_ops_sb(struct super_block *sb, void *unused) | |||
| 49 | /* | 49 | /* |
| 50 | * Register operations for cleancache. Returns 0 on success. | 50 | * Register operations for cleancache. Returns 0 on success. |
| 51 | */ | 51 | */ |
| 52 | int cleancache_register_ops(struct cleancache_ops *ops) | 52 | int cleancache_register_ops(const struct cleancache_ops *ops) |
| 53 | { | 53 | { |
| 54 | if (cmpxchg(&cleancache_ops, NULL, ops)) | 54 | if (cmpxchg(&cleancache_ops, NULL, ops)) |
| 55 | return -EBUSY; | 55 | return -EBUSY; |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index e080746e1a6b..48958d3cec9e 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
| @@ -594,7 +594,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname) | |||
| 594 | if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || | 594 | if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 || |
| 595 | strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || | 595 | strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 || |
| 596 | strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || | 596 | strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 || |
| 597 | strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0) | 597 | strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 || |
| 598 | strcmp(symname, ".TOC.") == 0) | ||
| 598 | return 1; | 599 | return 1; |
| 599 | /* Do not ignore this symbol */ | 600 | /* Do not ignore this symbol */ |
| 600 | return 0; | 601 | return 0; |
diff --git a/security/keys/key.c b/security/keys/key.c index 07a87311055c..09ef276c4bdc 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
| @@ -430,7 +430,8 @@ static int __key_instantiate_and_link(struct key *key, | |||
| 430 | 430 | ||
| 431 | /* and link it into the destination keyring */ | 431 | /* and link it into the destination keyring */ |
| 432 | if (keyring) { | 432 | if (keyring) { |
| 433 | set_bit(KEY_FLAG_KEEP, &key->flags); | 433 | if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) |
| 434 | set_bit(KEY_FLAG_KEEP, &key->flags); | ||
| 434 | 435 | ||
| 435 | __key_link(key, _edit); | 436 | __key_link(key, _edit); |
| 436 | } | 437 | } |
diff --git a/sound/core/Kconfig b/sound/core/Kconfig index e3e949126a56..a2a1e24becc6 100644 --- a/sound/core/Kconfig +++ b/sound/core/Kconfig | |||
| @@ -97,11 +97,11 @@ config SND_PCM_TIMER | |||
| 97 | bool "PCM timer interface" if EXPERT | 97 | bool "PCM timer interface" if EXPERT |
| 98 | default y | 98 | default y |
| 99 | help | 99 | help |
| 100 | If you disable this option, pcm timer will be inavailable, so | 100 | If you disable this option, pcm timer will be unavailable, so |
| 101 | those stubs used pcm timer (e.g. dmix, dsnoop & co) may work | 101 | those stubs that use pcm timer (e.g. dmix, dsnoop & co) may work |
| 102 | incorrectlly. | 102 | incorrectlly. |
| 103 | 103 | ||
| 104 | For some embedded device, we may disable it to reduce memory | 104 | For some embedded devices, we may disable it to reduce memory |
| 105 | footprint, about 20KB on x86_64 platform. | 105 | footprint, about 20KB on x86_64 platform. |
| 106 | 106 | ||
| 107 | config SND_SEQUENCER_OSS | 107 | config SND_SEQUENCER_OSS |
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 18b8dc45bb8f..7fac3cae8abd 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
| @@ -46,6 +46,13 @@ | |||
| 46 | #include <sound/compress_offload.h> | 46 | #include <sound/compress_offload.h> |
| 47 | #include <sound/compress_driver.h> | 47 | #include <sound/compress_driver.h> |
| 48 | 48 | ||
| 49 | /* struct snd_compr_codec_caps overflows the ioctl bit size for some | ||
| 50 | * architectures, so we need to disable the relevant ioctls. | ||
| 51 | */ | ||
| 52 | #if _IOC_SIZEBITS < 14 | ||
| 53 | #define COMPR_CODEC_CAPS_OVERFLOW | ||
| 54 | #endif | ||
| 55 | |||
| 49 | /* TODO: | 56 | /* TODO: |
| 50 | * - add substream support for multiple devices in case of | 57 | * - add substream support for multiple devices in case of |
| 51 | * SND_DYNAMIC_MINORS is not used | 58 | * SND_DYNAMIC_MINORS is not used |
| @@ -440,6 +447,7 @@ out: | |||
| 440 | return retval; | 447 | return retval; |
| 441 | } | 448 | } |
| 442 | 449 | ||
| 450 | #ifndef COMPR_CODEC_CAPS_OVERFLOW | ||
| 443 | static int | 451 | static int |
| 444 | snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) | 452 | snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg) |
| 445 | { | 453 | { |
| @@ -463,6 +471,7 @@ out: | |||
| 463 | kfree(caps); | 471 | kfree(caps); |
| 464 | return retval; | 472 | return retval; |
| 465 | } | 473 | } |
| 474 | #endif /* !COMPR_CODEC_CAPS_OVERFLOW */ | ||
| 466 | 475 | ||
| 467 | /* revisit this with snd_pcm_preallocate_xxx */ | 476 | /* revisit this with snd_pcm_preallocate_xxx */ |
| 468 | static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, | 477 | static int snd_compr_allocate_buffer(struct snd_compr_stream *stream, |
| @@ -801,9 +810,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg) | |||
| 801 | case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): | 810 | case _IOC_NR(SNDRV_COMPRESS_GET_CAPS): |
| 802 | retval = snd_compr_get_caps(stream, arg); | 811 | retval = snd_compr_get_caps(stream, arg); |
| 803 | break; | 812 | break; |
| 813 | #ifndef COMPR_CODEC_CAPS_OVERFLOW | ||
| 804 | case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): | 814 | case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS): |
| 805 | retval = snd_compr_get_codec_caps(stream, arg); | 815 | retval = snd_compr_get_codec_caps(stream, arg); |
| 806 | break; | 816 | break; |
| 817 | #endif | ||
| 807 | case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): | 818 | case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS): |
| 808 | retval = snd_compr_set_params(stream, arg); | 819 | retval = snd_compr_set_params(stream, arg); |
| 809 | break; | 820 | break; |
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index b1221b29728e..6779e82b46dd 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c | |||
| @@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level) | |||
| 202 | 202 | ||
| 203 | dp->index = i; | 203 | dp->index = i; |
| 204 | if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) { | 204 | if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) { |
| 205 | pr_err("ALSA: seq_oss: too many applications\n"); | 205 | pr_debug("ALSA: seq_oss: too many applications\n"); |
| 206 | rc = -ENOMEM; | 206 | rc = -ENOMEM; |
| 207 | goto _error; | 207 | goto _error; |
| 208 | } | 208 | } |
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index 0f3b38184fe5..b16dbef04174 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c | |||
| @@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp) | |||
| 308 | struct seq_oss_synth *rec; | 308 | struct seq_oss_synth *rec; |
| 309 | struct seq_oss_synthinfo *info; | 309 | struct seq_oss_synthinfo *info; |
| 310 | 310 | ||
| 311 | if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) | 311 | if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS)) |
| 312 | return; | 312 | return; |
| 313 | for (i = 0; i < dp->max_synthdev; i++) { | 313 | for (i = 0; i < dp->max_synthdev; i++) { |
| 314 | info = &dp->synths[i]; | 314 | info = &dp->synths[i]; |
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index 75b74850c005..bde33308f0d6 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c | |||
| @@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver."); | |||
| 87 | module_param(fake_buffer, bool, 0444); | 87 | module_param(fake_buffer, bool, 0444); |
| 88 | MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); | 88 | MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); |
| 89 | #ifdef CONFIG_HIGH_RES_TIMERS | 89 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 90 | module_param(hrtimer, bool, 0644); | 90 | module_param(hrtimer, bool, 0444); |
| 91 | MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); | 91 | MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); |
| 92 | #endif | 92 | #endif |
| 93 | 93 | ||
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 926e5dcbb66a..5022c9b97ddf 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c | |||
| @@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = { | |||
| 47 | [6] = 0x07, | 47 | [6] = 0x07, |
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | static unsigned int | 50 | static int |
| 51 | get_formation_index(unsigned int rate) | 51 | get_formation_index(unsigned int rate, unsigned int *index) |
| 52 | { | 52 | { |
| 53 | unsigned int i; | 53 | unsigned int i; |
| 54 | 54 | ||
| 55 | for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) { | 55 | for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) { |
| 56 | if (snd_bebob_rate_table[i] == rate) | 56 | if (snd_bebob_rate_table[i] == rate) { |
| 57 | return i; | 57 | *index = i; |
| 58 | return 0; | ||
| 59 | } | ||
| 58 | } | 60 | } |
| 59 | return -EINVAL; | 61 | return -EINVAL; |
| 60 | } | 62 | } |
| @@ -425,7 +427,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate) | |||
| 425 | goto end; | 427 | goto end; |
| 426 | 428 | ||
| 427 | /* confirm params for both streams */ | 429 | /* confirm params for both streams */ |
| 428 | index = get_formation_index(rate); | 430 | err = get_formation_index(rate, &index); |
| 431 | if (err < 0) | ||
| 432 | goto end; | ||
| 429 | pcm_channels = bebob->tx_stream_formations[index].pcm; | 433 | pcm_channels = bebob->tx_stream_formations[index].pcm; |
| 430 | midi_channels = bebob->tx_stream_formations[index].midi; | 434 | midi_channels = bebob->tx_stream_formations[index].midi; |
| 431 | err = amdtp_am824_set_parameters(&bebob->tx_stream, rate, | 435 | err = amdtp_am824_set_parameters(&bebob->tx_stream, rate, |
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig index 0216475fc759..37adcc6cbe6b 100644 --- a/sound/isa/Kconfig +++ b/sound/isa/Kconfig | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | config SND_WSS_LIB | 3 | config SND_WSS_LIB |
| 4 | tristate | 4 | tristate |
| 5 | select SND_PCM | 5 | select SND_PCM |
| 6 | select SND_TIMER | ||
| 6 | 7 | ||
| 7 | config SND_SB_COMMON | 8 | config SND_SB_COMMON |
| 8 | tristate | 9 | tristate |
| @@ -42,6 +43,7 @@ config SND_AD1816A | |||
| 42 | select SND_OPL3_LIB | 43 | select SND_OPL3_LIB |
| 43 | select SND_MPU401_UART | 44 | select SND_MPU401_UART |
| 44 | select SND_PCM | 45 | select SND_PCM |
| 46 | select SND_TIMER | ||
| 45 | help | 47 | help |
| 46 | Say Y here to include support for Analog Devices SoundPort | 48 | Say Y here to include support for Analog Devices SoundPort |
| 47 | AD1816A or compatible sound chips. | 49 | AD1816A or compatible sound chips. |
| @@ -209,6 +211,7 @@ config SND_GUSCLASSIC | |||
| 209 | tristate "Gravis UltraSound Classic" | 211 | tristate "Gravis UltraSound Classic" |
| 210 | select SND_RAWMIDI | 212 | select SND_RAWMIDI |
| 211 | select SND_PCM | 213 | select SND_PCM |
| 214 | select SND_TIMER | ||
| 212 | help | 215 | help |
| 213 | Say Y here to include support for Gravis UltraSound Classic | 216 | Say Y here to include support for Gravis UltraSound Classic |
| 214 | soundcards. | 217 | soundcards. |
| @@ -221,6 +224,7 @@ config SND_GUSEXTREME | |||
| 221 | select SND_OPL3_LIB | 224 | select SND_OPL3_LIB |
| 222 | select SND_MPU401_UART | 225 | select SND_MPU401_UART |
| 223 | select SND_PCM | 226 | select SND_PCM |
| 227 | select SND_TIMER | ||
| 224 | help | 228 | help |
| 225 | Say Y here to include support for Gravis UltraSound Extreme | 229 | Say Y here to include support for Gravis UltraSound Extreme |
| 226 | soundcards. | 230 | soundcards. |
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig index 656ce39bddbc..8f6594a7d37f 100644 --- a/sound/pci/Kconfig +++ b/sound/pci/Kconfig | |||
| @@ -155,6 +155,7 @@ config SND_AZT3328 | |||
| 155 | select SND_PCM | 155 | select SND_PCM |
| 156 | select SND_RAWMIDI | 156 | select SND_RAWMIDI |
| 157 | select SND_AC97_CODEC | 157 | select SND_AC97_CODEC |
| 158 | select SND_TIMER | ||
| 158 | depends on ZONE_DMA | 159 | depends on ZONE_DMA |
| 159 | help | 160 | help |
| 160 | Say Y here to include support for Aztech AZF3328 (PCI168) | 161 | Say Y here to include support for Aztech AZF3328 (PCI168) |
| @@ -463,6 +464,7 @@ config SND_EMU10K1 | |||
| 463 | select SND_HWDEP | 464 | select SND_HWDEP |
| 464 | select SND_RAWMIDI | 465 | select SND_RAWMIDI |
| 465 | select SND_AC97_CODEC | 466 | select SND_AC97_CODEC |
| 467 | select SND_TIMER | ||
| 466 | depends on ZONE_DMA | 468 | depends on ZONE_DMA |
| 467 | help | 469 | help |
| 468 | Say Y to include support for Sound Blaster PCI 512, Live!, | 470 | Say Y to include support for Sound Blaster PCI 512, Live!, |
| @@ -889,6 +891,7 @@ config SND_YMFPCI | |||
| 889 | select SND_OPL3_LIB | 891 | select SND_OPL3_LIB |
| 890 | select SND_MPU401_UART | 892 | select SND_MPU401_UART |
| 891 | select SND_AC97_CODEC | 893 | select SND_AC97_CODEC |
| 894 | select SND_TIMER | ||
| 892 | help | 895 | help |
| 893 | Say Y here to include support for Yamaha PCI audio chips - | 896 | Say Y here to include support for Yamaha PCI audio chips - |
| 894 | YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754. | 897 | YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754. |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 256e6cda218f..4045dca3d699 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -90,6 +90,8 @@ enum { | |||
| 90 | #define NVIDIA_HDA_ENABLE_COHBIT 0x01 | 90 | #define NVIDIA_HDA_ENABLE_COHBIT 0x01 |
| 91 | 91 | ||
| 92 | /* Defines for Intel SCH HDA snoop control */ | 92 | /* Defines for Intel SCH HDA snoop control */ |
| 93 | #define INTEL_HDA_CGCTL 0x48 | ||
| 94 | #define INTEL_HDA_CGCTL_MISCBDCGE (0x1 << 6) | ||
| 93 | #define INTEL_SCH_HDA_DEVC 0x78 | 95 | #define INTEL_SCH_HDA_DEVC 0x78 |
| 94 | #define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11) | 96 | #define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11) |
| 95 | 97 | ||
| @@ -534,10 +536,21 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset) | |||
| 534 | { | 536 | { |
| 535 | struct hdac_bus *bus = azx_bus(chip); | 537 | struct hdac_bus *bus = azx_bus(chip); |
| 536 | struct pci_dev *pci = chip->pci; | 538 | struct pci_dev *pci = chip->pci; |
| 539 | u32 val; | ||
| 537 | 540 | ||
| 538 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) | 541 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) |
| 539 | snd_hdac_set_codec_wakeup(bus, true); | 542 | snd_hdac_set_codec_wakeup(bus, true); |
| 543 | if (IS_BROXTON(pci)) { | ||
| 544 | pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); | ||
| 545 | val = val & ~INTEL_HDA_CGCTL_MISCBDCGE; | ||
| 546 | pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); | ||
| 547 | } | ||
| 540 | azx_init_chip(chip, full_reset); | 548 | azx_init_chip(chip, full_reset); |
| 549 | if (IS_BROXTON(pci)) { | ||
| 550 | pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val); | ||
| 551 | val = val | INTEL_HDA_CGCTL_MISCBDCGE; | ||
| 552 | pci_write_config_dword(pci, INTEL_HDA_CGCTL, val); | ||
| 553 | } | ||
| 541 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) | 554 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) |
| 542 | snd_hdac_set_codec_wakeup(bus, false); | 555 | snd_hdac_set_codec_wakeup(bus, false); |
| 543 | 556 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 426a29a1c19b..1f52b55d77c9 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -3653,6 +3653,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi), | |||
| 3653 | HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), | 3653 | HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi), |
| 3654 | HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), | 3654 | HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi), |
| 3655 | HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), | 3655 | HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi), |
| 3656 | HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi), | ||
| 3656 | HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), | 3657 | HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch), |
| 3657 | HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), | 3658 | HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), |
| 3658 | HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), | 3659 | HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), |
diff --git a/sound/sparc/Kconfig b/sound/sparc/Kconfig index d75deba5617d..dfcd38647606 100644 --- a/sound/sparc/Kconfig +++ b/sound/sparc/Kconfig | |||
| @@ -22,6 +22,7 @@ config SND_SUN_AMD7930 | |||
| 22 | config SND_SUN_CS4231 | 22 | config SND_SUN_CS4231 |
| 23 | tristate "Sun CS4231" | 23 | tristate "Sun CS4231" |
| 24 | select SND_PCM | 24 | select SND_PCM |
| 25 | select SND_TIMER | ||
| 25 | help | 26 | help |
| 26 | Say Y here to include support for CS4231 sound device on Sun. | 27 | Say Y here to include support for CS4231 sound device on Sun. |
| 27 | 28 | ||
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 23ea6d800c4c..a75d9ce7d77a 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -1205,8 +1205,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev) | |||
| 1205 | * "Playback Design" products need a 50ms delay after setting the | 1205 | * "Playback Design" products need a 50ms delay after setting the |
| 1206 | * USB interface. | 1206 | * USB interface. |
| 1207 | */ | 1207 | */ |
| 1208 | if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) | 1208 | switch (le16_to_cpu(dev->descriptor.idVendor)) { |
| 1209 | case 0x23ba: /* Playback Design */ | ||
| 1210 | case 0x0644: /* TEAC Corp. */ | ||
| 1209 | mdelay(50); | 1211 | mdelay(50); |
| 1212 | break; | ||
| 1213 | } | ||
| 1210 | } | 1214 | } |
| 1211 | 1215 | ||
| 1212 | void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, | 1216 | void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, |
| @@ -1221,6 +1225,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, | |||
| 1221 | (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) | 1225 | (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
| 1222 | mdelay(20); | 1226 | mdelay(20); |
| 1223 | 1227 | ||
| 1228 | /* | ||
| 1229 | * "TEAC Corp." products need a 20ms delay after each | ||
| 1230 | * class compliant request | ||
| 1231 | */ | ||
| 1232 | if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) && | ||
| 1233 | (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) | ||
| 1234 | mdelay(20); | ||
| 1235 | |||
| 1224 | /* Marantz/Denon devices with USB DAC functionality need a delay | 1236 | /* Marantz/Denon devices with USB DAC functionality need a delay |
| 1225 | * after each class compliant request | 1237 | * after each class compliant request |
| 1226 | */ | 1238 | */ |
diff --git a/tools/virtio/asm/barrier.h b/tools/virtio/asm/barrier.h index 26b7926bda88..ba34f9e96efd 100644 --- a/tools/virtio/asm/barrier.h +++ b/tools/virtio/asm/barrier.h | |||
| @@ -1,15 +1,19 @@ | |||
| 1 | #if defined(__i386__) || defined(__x86_64__) | 1 | #if defined(__i386__) || defined(__x86_64__) |
| 2 | #define barrier() asm volatile("" ::: "memory") | 2 | #define barrier() asm volatile("" ::: "memory") |
| 3 | #define mb() __sync_synchronize() | 3 | #define virt_mb() __sync_synchronize() |
| 4 | 4 | #define virt_rmb() barrier() | |
| 5 | #define smp_mb() mb() | 5 | #define virt_wmb() barrier() |
| 6 | # define dma_rmb() barrier() | 6 | /* Atomic store should be enough, but gcc generates worse code in that case. */ |
| 7 | # define dma_wmb() barrier() | 7 | #define virt_store_mb(var, value) do { \ |
| 8 | # define smp_rmb() barrier() | 8 | typeof(var) virt_store_mb_value = (value); \ |
| 9 | # define smp_wmb() barrier() | 9 | __atomic_exchange(&(var), &virt_store_mb_value, &virt_store_mb_value, \ |
| 10 | __ATOMIC_SEQ_CST); \ | ||
| 11 | barrier(); \ | ||
| 12 | } while (0); | ||
| 10 | /* Weak barriers should be used. If not - it's a bug */ | 13 | /* Weak barriers should be used. If not - it's a bug */ |
| 11 | # define rmb() abort() | 14 | # define mb() abort() |
| 12 | # define wmb() abort() | 15 | # define rmb() abort() |
| 16 | # define wmb() abort() | ||
| 13 | #else | 17 | #else |
| 14 | #error Please fill in barrier macros | 18 | #error Please fill in barrier macros |
| 15 | #endif | 19 | #endif |
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h new file mode 100644 index 000000000000..845960e1cbf2 --- /dev/null +++ b/tools/virtio/linux/compiler.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | #ifndef LINUX_COMPILER_H | ||
| 2 | #define LINUX_COMPILER_H | ||
| 3 | |||
| 4 | #define WRITE_ONCE(var, val) \ | ||
| 5 | (*((volatile typeof(val) *)(&(var))) = (val)) | ||
| 6 | |||
| 7 | #define READ_ONCE(var) (*((volatile typeof(val) *)(&(var)))) | ||
| 8 | |||
| 9 | #endif | ||
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 4db7d5691ba7..033849948215 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | #include <assert.h> | 8 | #include <assert.h> |
| 9 | #include <stdarg.h> | 9 | #include <stdarg.h> |
| 10 | 10 | ||
| 11 | #include <linux/compiler.h> | ||
| 11 | #include <linux/types.h> | 12 | #include <linux/types.h> |
| 12 | #include <linux/printk.h> | 13 | #include <linux/printk.h> |
| 13 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile new file mode 100644 index 000000000000..feaa64ac4630 --- /dev/null +++ b/tools/virtio/ringtest/Makefile | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | all: | ||
| 2 | |||
| 3 | all: ring virtio_ring_0_9 virtio_ring_poll | ||
| 4 | |||
| 5 | CFLAGS += -Wall | ||
| 6 | CFLAGS += -pthread -O2 -ggdb | ||
| 7 | LDFLAGS += -pthread -O2 -ggdb | ||
| 8 | |||
| 9 | main.o: main.c main.h | ||
| 10 | ring.o: ring.c main.h | ||
| 11 | virtio_ring_0_9.o: virtio_ring_0_9.c main.h | ||
| 12 | virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h | ||
| 13 | ring: ring.o main.o | ||
| 14 | virtio_ring_0_9: virtio_ring_0_9.o main.o | ||
| 15 | virtio_ring_poll: virtio_ring_poll.o main.o | ||
| 16 | clean: | ||
| 17 | -rm main.o | ||
| 18 | -rm ring.o ring | ||
| 19 | -rm virtio_ring_0_9.o virtio_ring_0_9 | ||
| 20 | -rm virtio_ring_poll.o virtio_ring_poll | ||
| 21 | |||
| 22 | .PHONY: all clean | ||
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README new file mode 100644 index 000000000000..34e94c46104f --- /dev/null +++ b/tools/virtio/ringtest/README | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | Partial implementation of various ring layouts, useful to tune virtio design. | ||
| 2 | Uses shared memory heavily. | ||
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c new file mode 100644 index 000000000000..3a5ff438bd62 --- /dev/null +++ b/tools/virtio/ringtest/main.c | |||
| @@ -0,0 +1,366 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Red Hat, Inc. | ||
| 3 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 5 | * | ||
| 6 | * Command line processing and common functions for ring benchmarking. | ||
| 7 | */ | ||
| 8 | #define _GNU_SOURCE | ||
| 9 | #include <getopt.h> | ||
| 10 | #include <pthread.h> | ||
| 11 | #include <assert.h> | ||
| 12 | #include <sched.h> | ||
| 13 | #include "main.h" | ||
| 14 | #include <sys/eventfd.h> | ||
| 15 | #include <stdlib.h> | ||
| 16 | #include <stdio.h> | ||
| 17 | #include <unistd.h> | ||
| 18 | #include <limits.h> | ||
| 19 | |||
| 20 | int runcycles = 10000000; | ||
| 21 | int max_outstanding = INT_MAX; | ||
| 22 | int batch = 1; | ||
| 23 | |||
| 24 | bool do_sleep = false; | ||
| 25 | bool do_relax = false; | ||
| 26 | bool do_exit = true; | ||
| 27 | |||
| 28 | unsigned ring_size = 256; | ||
| 29 | |||
| 30 | static int kickfd = -1; | ||
| 31 | static int callfd = -1; | ||
| 32 | |||
| 33 | void notify(int fd) | ||
| 34 | { | ||
| 35 | unsigned long long v = 1; | ||
| 36 | int r; | ||
| 37 | |||
| 38 | vmexit(); | ||
| 39 | r = write(fd, &v, sizeof v); | ||
| 40 | assert(r == sizeof v); | ||
| 41 | vmentry(); | ||
| 42 | } | ||
| 43 | |||
| 44 | void wait_for_notify(int fd) | ||
| 45 | { | ||
| 46 | unsigned long long v = 1; | ||
| 47 | int r; | ||
| 48 | |||
| 49 | vmexit(); | ||
| 50 | r = read(fd, &v, sizeof v); | ||
| 51 | assert(r == sizeof v); | ||
| 52 | vmentry(); | ||
| 53 | } | ||
| 54 | |||
| 55 | void kick(void) | ||
| 56 | { | ||
| 57 | notify(kickfd); | ||
| 58 | } | ||
| 59 | |||
| 60 | void wait_for_kick(void) | ||
| 61 | { | ||
| 62 | wait_for_notify(kickfd); | ||
| 63 | } | ||
| 64 | |||
| 65 | void call(void) | ||
| 66 | { | ||
| 67 | notify(callfd); | ||
| 68 | } | ||
| 69 | |||
| 70 | void wait_for_call(void) | ||
| 71 | { | ||
| 72 | wait_for_notify(callfd); | ||
| 73 | } | ||
| 74 | |||
| 75 | void set_affinity(const char *arg) | ||
| 76 | { | ||
| 77 | cpu_set_t cpuset; | ||
| 78 | int ret; | ||
| 79 | pthread_t self; | ||
| 80 | long int cpu; | ||
| 81 | char *endptr; | ||
| 82 | |||
| 83 | if (!arg) | ||
| 84 | return; | ||
| 85 | |||
| 86 | cpu = strtol(arg, &endptr, 0); | ||
| 87 | assert(!*endptr); | ||
| 88 | |||
| 89 | assert(cpu >= 0 || cpu < CPU_SETSIZE); | ||
| 90 | |||
| 91 | self = pthread_self(); | ||
| 92 | CPU_ZERO(&cpuset); | ||
| 93 | CPU_SET(cpu, &cpuset); | ||
| 94 | |||
| 95 | ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset); | ||
| 96 | assert(!ret); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void run_guest(void) | ||
| 100 | { | ||
| 101 | int completed_before; | ||
| 102 | int completed = 0; | ||
| 103 | int started = 0; | ||
| 104 | int bufs = runcycles; | ||
| 105 | int spurious = 0; | ||
| 106 | int r; | ||
| 107 | unsigned len; | ||
| 108 | void *buf; | ||
| 109 | int tokick = batch; | ||
| 110 | |||
| 111 | for (;;) { | ||
| 112 | if (do_sleep) | ||
| 113 | disable_call(); | ||
| 114 | completed_before = completed; | ||
| 115 | do { | ||
| 116 | if (started < bufs && | ||
| 117 | started - completed < max_outstanding) { | ||
| 118 | r = add_inbuf(0, NULL, "Hello, world!"); | ||
| 119 | if (__builtin_expect(r == 0, true)) { | ||
| 120 | ++started; | ||
| 121 | if (!--tokick) { | ||
| 122 | tokick = batch; | ||
| 123 | if (do_sleep) | ||
| 124 | kick_available(); | ||
| 125 | } | ||
| 126 | |||
| 127 | } | ||
| 128 | } else | ||
| 129 | r = -1; | ||
| 130 | |||
| 131 | /* Flush out completed bufs if any */ | ||
| 132 | if (get_buf(&len, &buf)) { | ||
| 133 | ++completed; | ||
| 134 | if (__builtin_expect(completed == bufs, false)) | ||
| 135 | return; | ||
| 136 | r = 0; | ||
| 137 | } | ||
| 138 | } while (r == 0); | ||
| 139 | if (completed == completed_before) | ||
| 140 | ++spurious; | ||
| 141 | assert(completed <= bufs); | ||
| 142 | assert(started <= bufs); | ||
| 143 | if (do_sleep) { | ||
| 144 | if (enable_call()) | ||
| 145 | wait_for_call(); | ||
| 146 | } else { | ||
| 147 | poll_used(); | ||
| 148 | } | ||
| 149 | } | ||
| 150 | } | ||
| 151 | |||
| 152 | static void run_host(void) | ||
| 153 | { | ||
| 154 | int completed_before; | ||
| 155 | int completed = 0; | ||
| 156 | int spurious = 0; | ||
| 157 | int bufs = runcycles; | ||
| 158 | unsigned len; | ||
| 159 | void *buf; | ||
| 160 | |||
| 161 | for (;;) { | ||
| 162 | if (do_sleep) { | ||
| 163 | if (enable_kick()) | ||
| 164 | wait_for_kick(); | ||
| 165 | } else { | ||
| 166 | poll_avail(); | ||
| 167 | } | ||
| 168 | if (do_sleep) | ||
| 169 | disable_kick(); | ||
| 170 | completed_before = completed; | ||
| 171 | while (__builtin_expect(use_buf(&len, &buf), true)) { | ||
| 172 | if (do_sleep) | ||
| 173 | call_used(); | ||
| 174 | ++completed; | ||
| 175 | if (__builtin_expect(completed == bufs, false)) | ||
| 176 | return; | ||
| 177 | } | ||
| 178 | if (completed == completed_before) | ||
| 179 | ++spurious; | ||
| 180 | assert(completed <= bufs); | ||
| 181 | if (completed == bufs) | ||
| 182 | break; | ||
| 183 | } | ||
| 184 | } | ||
| 185 | |||
| 186 | void *start_guest(void *arg) | ||
| 187 | { | ||
| 188 | set_affinity(arg); | ||
| 189 | run_guest(); | ||
| 190 | pthread_exit(NULL); | ||
| 191 | } | ||
| 192 | |||
| 193 | void *start_host(void *arg) | ||
| 194 | { | ||
| 195 | set_affinity(arg); | ||
| 196 | run_host(); | ||
| 197 | pthread_exit(NULL); | ||
| 198 | } | ||
| 199 | |||
| 200 | static const char optstring[] = ""; | ||
| 201 | static const struct option longopts[] = { | ||
| 202 | { | ||
| 203 | .name = "help", | ||
| 204 | .has_arg = no_argument, | ||
| 205 | .val = 'h', | ||
| 206 | }, | ||
| 207 | { | ||
| 208 | .name = "host-affinity", | ||
| 209 | .has_arg = required_argument, | ||
| 210 | .val = 'H', | ||
| 211 | }, | ||
| 212 | { | ||
| 213 | .name = "guest-affinity", | ||
| 214 | .has_arg = required_argument, | ||
| 215 | .val = 'G', | ||
| 216 | }, | ||
| 217 | { | ||
| 218 | .name = "ring-size", | ||
| 219 | .has_arg = required_argument, | ||
| 220 | .val = 'R', | ||
| 221 | }, | ||
| 222 | { | ||
| 223 | .name = "run-cycles", | ||
| 224 | .has_arg = required_argument, | ||
| 225 | .val = 'C', | ||
| 226 | }, | ||
| 227 | { | ||
| 228 | .name = "outstanding", | ||
| 229 | .has_arg = required_argument, | ||
| 230 | .val = 'o', | ||
| 231 | }, | ||
| 232 | { | ||
| 233 | .name = "batch", | ||
| 234 | .has_arg = required_argument, | ||
| 235 | .val = 'b', | ||
| 236 | }, | ||
| 237 | { | ||
| 238 | .name = "sleep", | ||
| 239 | .has_arg = no_argument, | ||
| 240 | .val = 's', | ||
| 241 | }, | ||
| 242 | { | ||
| 243 | .name = "relax", | ||
| 244 | .has_arg = no_argument, | ||
| 245 | .val = 'x', | ||
| 246 | }, | ||
| 247 | { | ||
| 248 | .name = "exit", | ||
| 249 | .has_arg = no_argument, | ||
| 250 | .val = 'e', | ||
| 251 | }, | ||
| 252 | { | ||
| 253 | } | ||
| 254 | }; | ||
| 255 | |||
| 256 | static void help(void) | ||
| 257 | { | ||
| 258 | fprintf(stderr, "Usage: <test> [--help]" | ||
| 259 | " [--host-affinity H]" | ||
| 260 | " [--guest-affinity G]" | ||
| 261 | " [--ring-size R (default: %d)]" | ||
| 262 | " [--run-cycles C (default: %d)]" | ||
| 263 | " [--batch b]" | ||
| 264 | " [--outstanding o]" | ||
| 265 | " [--sleep]" | ||
| 266 | " [--relax]" | ||
| 267 | " [--exit]" | ||
| 268 | "\n", | ||
| 269 | ring_size, | ||
| 270 | runcycles); | ||
| 271 | } | ||
| 272 | |||
| 273 | int main(int argc, char **argv) | ||
| 274 | { | ||
| 275 | int ret; | ||
| 276 | pthread_t host, guest; | ||
| 277 | void *tret; | ||
| 278 | char *host_arg = NULL; | ||
| 279 | char *guest_arg = NULL; | ||
| 280 | char *endptr; | ||
| 281 | long int c; | ||
| 282 | |||
| 283 | kickfd = eventfd(0, 0); | ||
| 284 | assert(kickfd >= 0); | ||
| 285 | callfd = eventfd(0, 0); | ||
| 286 | assert(callfd >= 0); | ||
| 287 | |||
| 288 | for (;;) { | ||
| 289 | int o = getopt_long(argc, argv, optstring, longopts, NULL); | ||
| 290 | switch (o) { | ||
| 291 | case -1: | ||
| 292 | goto done; | ||
| 293 | case '?': | ||
| 294 | help(); | ||
| 295 | exit(2); | ||
| 296 | case 'H': | ||
| 297 | host_arg = optarg; | ||
| 298 | break; | ||
| 299 | case 'G': | ||
| 300 | guest_arg = optarg; | ||
| 301 | break; | ||
| 302 | case 'R': | ||
| 303 | ring_size = strtol(optarg, &endptr, 0); | ||
| 304 | assert(ring_size && !(ring_size & (ring_size - 1))); | ||
| 305 | assert(!*endptr); | ||
| 306 | break; | ||
| 307 | case 'C': | ||
| 308 | c = strtol(optarg, &endptr, 0); | ||
| 309 | assert(!*endptr); | ||
| 310 | assert(c > 0 && c < INT_MAX); | ||
| 311 | runcycles = c; | ||
| 312 | break; | ||
| 313 | case 'o': | ||
| 314 | c = strtol(optarg, &endptr, 0); | ||
| 315 | assert(!*endptr); | ||
| 316 | assert(c > 0 && c < INT_MAX); | ||
| 317 | max_outstanding = c; | ||
| 318 | break; | ||
| 319 | case 'b': | ||
| 320 | c = strtol(optarg, &endptr, 0); | ||
| 321 | assert(!*endptr); | ||
| 322 | assert(c > 0 && c < INT_MAX); | ||
| 323 | batch = c; | ||
| 324 | break; | ||
| 325 | case 's': | ||
| 326 | do_sleep = true; | ||
| 327 | break; | ||
| 328 | case 'x': | ||
| 329 | do_relax = true; | ||
| 330 | break; | ||
| 331 | case 'e': | ||
| 332 | do_exit = true; | ||
| 333 | break; | ||
| 334 | default: | ||
| 335 | help(); | ||
| 336 | exit(4); | ||
| 337 | break; | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 341 | /* does nothing here, used to make sure all smp APIs compile */ | ||
| 342 | smp_acquire(); | ||
| 343 | smp_release(); | ||
| 344 | smp_mb(); | ||
| 345 | done: | ||
| 346 | |||
| 347 | if (batch > max_outstanding) | ||
| 348 | batch = max_outstanding; | ||
| 349 | |||
| 350 | if (optind < argc) { | ||
| 351 | help(); | ||
| 352 | exit(4); | ||
| 353 | } | ||
| 354 | alloc_ring(); | ||
| 355 | |||
| 356 | ret = pthread_create(&host, NULL, start_host, host_arg); | ||
| 357 | assert(!ret); | ||
| 358 | ret = pthread_create(&guest, NULL, start_guest, guest_arg); | ||
| 359 | assert(!ret); | ||
| 360 | |||
| 361 | ret = pthread_join(guest, &tret); | ||
| 362 | assert(!ret); | ||
| 363 | ret = pthread_join(host, &tret); | ||
| 364 | assert(!ret); | ||
| 365 | return 0; | ||
| 366 | } | ||
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h new file mode 100644 index 000000000000..16917acb0ade --- /dev/null +++ b/tools/virtio/ringtest/main.h | |||
| @@ -0,0 +1,119 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Red Hat, Inc. | ||
| 3 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 5 | * | ||
| 6 | * Common macros and functions for ring benchmarking. | ||
| 7 | */ | ||
| 8 | #ifndef MAIN_H | ||
| 9 | #define MAIN_H | ||
| 10 | |||
| 11 | #include <stdbool.h> | ||
| 12 | |||
| 13 | extern bool do_exit; | ||
| 14 | |||
| 15 | #if defined(__x86_64__) || defined(__i386__) | ||
| 16 | #include "x86intrin.h" | ||
| 17 | |||
| 18 | static inline void wait_cycles(unsigned long long cycles) | ||
| 19 | { | ||
| 20 | unsigned long long t; | ||
| 21 | |||
| 22 | t = __rdtsc(); | ||
| 23 | while (__rdtsc() - t < cycles) {} | ||
| 24 | } | ||
| 25 | |||
| 26 | #define VMEXIT_CYCLES 500 | ||
| 27 | #define VMENTRY_CYCLES 500 | ||
| 28 | |||
| 29 | #else | ||
| 30 | static inline void wait_cycles(unsigned long long cycles) | ||
| 31 | { | ||
| 32 | _Exit(5); | ||
| 33 | } | ||
| 34 | #define VMEXIT_CYCLES 0 | ||
| 35 | #define VMENTRY_CYCLES 0 | ||
| 36 | #endif | ||
| 37 | |||
| 38 | static inline void vmexit(void) | ||
| 39 | { | ||
| 40 | if (!do_exit) | ||
| 41 | return; | ||
| 42 | |||
| 43 | wait_cycles(VMEXIT_CYCLES); | ||
| 44 | } | ||
| 45 | static inline void vmentry(void) | ||
| 46 | { | ||
| 47 | if (!do_exit) | ||
| 48 | return; | ||
| 49 | |||
| 50 | wait_cycles(VMENTRY_CYCLES); | ||
| 51 | } | ||
| 52 | |||
| 53 | /* implemented by ring */ | ||
| 54 | void alloc_ring(void); | ||
| 55 | /* guest side */ | ||
| 56 | int add_inbuf(unsigned, void *, void *); | ||
| 57 | void *get_buf(unsigned *, void **); | ||
| 58 | void disable_call(); | ||
| 59 | bool enable_call(); | ||
| 60 | void kick_available(); | ||
| 61 | void poll_used(); | ||
| 62 | /* host side */ | ||
| 63 | void disable_kick(); | ||
| 64 | bool enable_kick(); | ||
| 65 | bool use_buf(unsigned *, void **); | ||
| 66 | void call_used(); | ||
| 67 | void poll_avail(); | ||
| 68 | |||
| 69 | /* implemented by main */ | ||
| 70 | extern bool do_sleep; | ||
| 71 | void kick(void); | ||
| 72 | void wait_for_kick(void); | ||
| 73 | void call(void); | ||
| 74 | void wait_for_call(void); | ||
| 75 | |||
| 76 | extern unsigned ring_size; | ||
| 77 | |||
| 78 | /* Compiler barrier - similar to what Linux uses */ | ||
| 79 | #define barrier() asm volatile("" ::: "memory") | ||
| 80 | |||
| 81 | /* Is there a portable way to do this? */ | ||
| 82 | #if defined(__x86_64__) || defined(__i386__) | ||
| 83 | #define cpu_relax() asm ("rep; nop" ::: "memory") | ||
| 84 | #else | ||
| 85 | #define cpu_relax() assert(0) | ||
| 86 | #endif | ||
| 87 | |||
| 88 | extern bool do_relax; | ||
| 89 | |||
| 90 | static inline void busy_wait(void) | ||
| 91 | { | ||
| 92 | if (do_relax) | ||
| 93 | cpu_relax(); | ||
| 94 | else | ||
| 95 | /* prevent compiler from removing busy loops */ | ||
| 96 | barrier(); | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized | ||
| 101 | * with other __ATOMIC_SEQ_CST calls. | ||
| 102 | */ | ||
| 103 | #define smp_mb() __sync_synchronize() | ||
| 104 | |||
| 105 | /* | ||
| 106 | * This abuses the atomic builtins for thread fences, and | ||
| 107 | * adds a compiler barrier. | ||
| 108 | */ | ||
| 109 | #define smp_release() do { \ | ||
| 110 | barrier(); \ | ||
| 111 | __atomic_thread_fence(__ATOMIC_RELEASE); \ | ||
| 112 | } while (0) | ||
| 113 | |||
| 114 | #define smp_acquire() do { \ | ||
| 115 | __atomic_thread_fence(__ATOMIC_ACQUIRE); \ | ||
| 116 | barrier(); \ | ||
| 117 | } while (0) | ||
| 118 | |||
| 119 | #endif | ||
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c new file mode 100644 index 000000000000..c25c8d248b6b --- /dev/null +++ b/tools/virtio/ringtest/ring.c | |||
| @@ -0,0 +1,272 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Red Hat, Inc. | ||
| 3 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 5 | * | ||
| 6 | * Simple descriptor-based ring. virtio 0.9 compatible event index is used for | ||
| 7 | * signalling, unconditionally. | ||
| 8 | */ | ||
| 9 | #define _GNU_SOURCE | ||
| 10 | #include "main.h" | ||
| 11 | #include <stdlib.h> | ||
| 12 | #include <stdio.h> | ||
| 13 | #include <string.h> | ||
| 14 | |||
| 15 | /* Next - Where next entry will be written. | ||
| 16 | * Prev - "Next" value when event triggered previously. | ||
| 17 | * Event - Peer requested event after writing this entry. | ||
| 18 | */ | ||
| 19 | static inline bool need_event(unsigned short event, | ||
| 20 | unsigned short next, | ||
| 21 | unsigned short prev) | ||
| 22 | { | ||
| 23 | return (unsigned short)(next - event - 1) < (unsigned short)(next - prev); | ||
| 24 | } | ||
| 25 | |||
| 26 | /* Design: | ||
| 27 | * Guest adds descriptors with unique index values and DESC_HW in flags. | ||
| 28 | * Host overwrites used descriptors with correct len, index, and DESC_HW clear. | ||
| 29 | * Flags are always set last. | ||
| 30 | */ | ||
| 31 | #define DESC_HW 0x1 | ||
| 32 | |||
| 33 | struct desc { | ||
| 34 | unsigned short flags; | ||
| 35 | unsigned short index; | ||
| 36 | unsigned len; | ||
| 37 | unsigned long long addr; | ||
| 38 | }; | ||
| 39 | |||
| 40 | /* how much padding is needed to avoid false cache sharing */ | ||
| 41 | #define HOST_GUEST_PADDING 0x80 | ||
| 42 | |||
| 43 | /* Mostly read */ | ||
| 44 | struct event { | ||
| 45 | unsigned short kick_index; | ||
| 46 | unsigned char reserved0[HOST_GUEST_PADDING - 2]; | ||
| 47 | unsigned short call_index; | ||
| 48 | unsigned char reserved1[HOST_GUEST_PADDING - 2]; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct data { | ||
| 52 | void *buf; /* descriptor is writeable, we can't get buf from there */ | ||
| 53 | void *data; | ||
| 54 | } *data; | ||
| 55 | |||
| 56 | struct desc *ring; | ||
| 57 | struct event *event; | ||
| 58 | |||
| 59 | struct guest { | ||
| 60 | unsigned avail_idx; | ||
| 61 | unsigned last_used_idx; | ||
| 62 | unsigned num_free; | ||
| 63 | unsigned kicked_avail_idx; | ||
| 64 | unsigned char reserved[HOST_GUEST_PADDING - 12]; | ||
| 65 | } guest; | ||
| 66 | |||
| 67 | struct host { | ||
| 68 | /* we do not need to track last avail index | ||
| 69 | * unless we have more than one in flight. | ||
| 70 | */ | ||
| 71 | unsigned used_idx; | ||
| 72 | unsigned called_used_idx; | ||
| 73 | unsigned char reserved[HOST_GUEST_PADDING - 4]; | ||
| 74 | } host; | ||
| 75 | |||
| 76 | /* implemented by ring */ | ||
| 77 | void alloc_ring(void) | ||
| 78 | { | ||
| 79 | int ret; | ||
| 80 | int i; | ||
| 81 | |||
| 82 | ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring); | ||
| 83 | if (ret) { | ||
| 84 | perror("Unable to allocate ring buffer.\n"); | ||
| 85 | exit(3); | ||
| 86 | } | ||
| 87 | event = malloc(sizeof *event); | ||
| 88 | if (!event) { | ||
| 89 | perror("Unable to allocate event buffer.\n"); | ||
| 90 | exit(3); | ||
| 91 | } | ||
| 92 | memset(event, 0, sizeof *event); | ||
| 93 | guest.avail_idx = 0; | ||
| 94 | guest.kicked_avail_idx = -1; | ||
| 95 | guest.last_used_idx = 0; | ||
| 96 | host.used_idx = 0; | ||
| 97 | host.called_used_idx = -1; | ||
| 98 | for (i = 0; i < ring_size; ++i) { | ||
| 99 | struct desc desc = { | ||
| 100 | .index = i, | ||
| 101 | }; | ||
| 102 | ring[i] = desc; | ||
| 103 | } | ||
| 104 | guest.num_free = ring_size; | ||
| 105 | data = malloc(ring_size * sizeof *data); | ||
| 106 | if (!data) { | ||
| 107 | perror("Unable to allocate data buffer.\n"); | ||
| 108 | exit(3); | ||
| 109 | } | ||
| 110 | memset(data, 0, ring_size * sizeof *data); | ||
| 111 | } | ||
| 112 | |||
| 113 | /* guest side */ | ||
| 114 | int add_inbuf(unsigned len, void *buf, void *datap) | ||
| 115 | { | ||
| 116 | unsigned head, index; | ||
| 117 | |||
| 118 | if (!guest.num_free) | ||
| 119 | return -1; | ||
| 120 | |||
| 121 | guest.num_free--; | ||
| 122 | head = (ring_size - 1) & (guest.avail_idx++); | ||
| 123 | |||
| 124 | /* Start with a write. On MESI architectures this helps | ||
| 125 | * avoid a shared state with consumer that is polling this descriptor. | ||
| 126 | */ | ||
| 127 | ring[head].addr = (unsigned long)(void*)buf; | ||
| 128 | ring[head].len = len; | ||
| 129 | /* read below might bypass write above. That is OK because it's just an | ||
| 130 | * optimization. If this happens, we will get the cache line in a | ||
| 131 | * shared state which is unfortunate, but probably not worth it to | ||
| 132 | * add an explicit full barrier to avoid this. | ||
| 133 | */ | ||
| 134 | barrier(); | ||
| 135 | index = ring[head].index; | ||
| 136 | data[index].buf = buf; | ||
| 137 | data[index].data = datap; | ||
| 138 | /* Barrier A (for pairing) */ | ||
| 139 | smp_release(); | ||
| 140 | ring[head].flags = DESC_HW; | ||
| 141 | |||
| 142 | return 0; | ||
| 143 | } | ||
| 144 | |||
| 145 | void *get_buf(unsigned *lenp, void **bufp) | ||
| 146 | { | ||
| 147 | unsigned head = (ring_size - 1) & guest.last_used_idx; | ||
| 148 | unsigned index; | ||
| 149 | void *datap; | ||
| 150 | |||
| 151 | if (ring[head].flags & DESC_HW) | ||
| 152 | return NULL; | ||
| 153 | /* Barrier B (for pairing) */ | ||
| 154 | smp_acquire(); | ||
| 155 | *lenp = ring[head].len; | ||
| 156 | index = ring[head].index & (ring_size - 1); | ||
| 157 | datap = data[index].data; | ||
| 158 | *bufp = data[index].buf; | ||
| 159 | data[index].buf = NULL; | ||
| 160 | data[index].data = NULL; | ||
| 161 | guest.num_free++; | ||
| 162 | guest.last_used_idx++; | ||
| 163 | return datap; | ||
| 164 | } | ||
| 165 | |||
| 166 | void poll_used(void) | ||
| 167 | { | ||
| 168 | unsigned head = (ring_size - 1) & guest.last_used_idx; | ||
| 169 | |||
| 170 | while (ring[head].flags & DESC_HW) | ||
| 171 | busy_wait(); | ||
| 172 | } | ||
| 173 | |||
| 174 | void disable_call() | ||
| 175 | { | ||
| 176 | /* Doing nothing to disable calls might cause | ||
| 177 | * extra interrupts, but reduces the number of cache misses. | ||
| 178 | */ | ||
| 179 | } | ||
| 180 | |||
| 181 | bool enable_call() | ||
| 182 | { | ||
| 183 | unsigned head = (ring_size - 1) & guest.last_used_idx; | ||
| 184 | |||
| 185 | event->call_index = guest.last_used_idx; | ||
| 186 | /* Flush call index write */ | ||
| 187 | /* Barrier D (for pairing) */ | ||
| 188 | smp_mb(); | ||
| 189 | return ring[head].flags & DESC_HW; | ||
| 190 | } | ||
| 191 | |||
| 192 | void kick_available(void) | ||
| 193 | { | ||
| 194 | /* Flush in previous flags write */ | ||
| 195 | /* Barrier C (for pairing) */ | ||
| 196 | smp_mb(); | ||
| 197 | if (!need_event(event->kick_index, | ||
| 198 | guest.avail_idx, | ||
| 199 | guest.kicked_avail_idx)) | ||
| 200 | return; | ||
| 201 | |||
| 202 | guest.kicked_avail_idx = guest.avail_idx; | ||
| 203 | kick(); | ||
| 204 | } | ||
| 205 | |||
| 206 | /* host side */ | ||
| 207 | void disable_kick() | ||
| 208 | { | ||
| 209 | /* Doing nothing to disable kicks might cause | ||
| 210 | * extra interrupts, but reduces the number of cache misses. | ||
| 211 | */ | ||
| 212 | } | ||
| 213 | |||
| 214 | bool enable_kick() | ||
| 215 | { | ||
| 216 | unsigned head = (ring_size - 1) & host.used_idx; | ||
| 217 | |||
| 218 | event->kick_index = host.used_idx; | ||
| 219 | /* Barrier C (for pairing) */ | ||
| 220 | smp_mb(); | ||
| 221 | return !(ring[head].flags & DESC_HW); | ||
| 222 | } | ||
| 223 | |||
| 224 | void poll_avail(void) | ||
| 225 | { | ||
| 226 | unsigned head = (ring_size - 1) & host.used_idx; | ||
| 227 | |||
| 228 | while (!(ring[head].flags & DESC_HW)) | ||
| 229 | busy_wait(); | ||
| 230 | } | ||
| 231 | |||
| 232 | bool use_buf(unsigned *lenp, void **bufp) | ||
| 233 | { | ||
| 234 | unsigned head = (ring_size - 1) & host.used_idx; | ||
| 235 | |||
| 236 | if (!(ring[head].flags & DESC_HW)) | ||
| 237 | return false; | ||
| 238 | |||
| 239 | /* make sure length read below is not speculated */ | ||
| 240 | /* Barrier A (for pairing) */ | ||
| 241 | smp_acquire(); | ||
| 242 | |||
| 243 | /* simple in-order completion: we don't need | ||
| 244 | * to touch index at all. This also means we | ||
| 245 | * can just modify the descriptor in-place. | ||
| 246 | */ | ||
| 247 | ring[head].len--; | ||
| 248 | /* Make sure len is valid before flags. | ||
| 249 | * Note: alternative is to write len and flags in one access - | ||
| 250 | * possible on 64 bit architectures but wmb is free on Intel anyway | ||
| 251 | * so I have no way to test whether it's a gain. | ||
| 252 | */ | ||
| 253 | /* Barrier B (for pairing) */ | ||
| 254 | smp_release(); | ||
| 255 | ring[head].flags = 0; | ||
| 256 | host.used_idx++; | ||
| 257 | return true; | ||
| 258 | } | ||
| 259 | |||
| 260 | void call_used(void) | ||
| 261 | { | ||
| 262 | /* Flush in previous flags write */ | ||
| 263 | /* Barrier D (for pairing) */ | ||
| 264 | smp_mb(); | ||
| 265 | if (!need_event(event->call_index, | ||
| 266 | host.used_idx, | ||
| 267 | host.called_used_idx)) | ||
| 268 | return; | ||
| 269 | |||
| 270 | host.called_used_idx = host.used_idx; | ||
| 271 | call(); | ||
| 272 | } | ||
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh new file mode 100755 index 000000000000..52b0f71ffa8d --- /dev/null +++ b/tools/virtio/ringtest/run-on-all.sh | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | |||
| 3 | #use last CPU for host. Why not the first? | ||
| 4 | #many devices tend to use cpu0 by default so | ||
| 5 | #it tends to be busier | ||
| 6 | HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1) | ||
| 7 | |||
| 8 | #run command on all cpus | ||
| 9 | for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n); | ||
| 10 | do | ||
| 11 | #Don't run guest and host on same CPU | ||
| 12 | #It actually works ok if using signalling | ||
| 13 | if | ||
| 14 | (echo "$@" | grep -e "--sleep" > /dev/null) || \ | ||
| 15 | test $HOST_AFFINITY '!=' $cpu | ||
| 16 | then | ||
| 17 | echo "GUEST AFFINITY $cpu" | ||
| 18 | "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu | ||
| 19 | fi | ||
| 20 | done | ||
| 21 | echo "NO GUEST AFFINITY" | ||
| 22 | "$@" --host-affinity $HOST_AFFINITY | ||
| 23 | echo "NO AFFINITY" | ||
| 24 | "$@" | ||
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c new file mode 100644 index 000000000000..47c9a1a18d36 --- /dev/null +++ b/tools/virtio/ringtest/virtio_ring_0_9.c | |||
| @@ -0,0 +1,316 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Red Hat, Inc. | ||
| 3 | * Author: Michael S. Tsirkin <mst@redhat.com> | ||
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
| 5 | * | ||
| 6 | * Partial implementation of virtio 0.9. event index is used for signalling, | ||
| 7 | * unconditionally. Design roughly follows linux kernel implementation in order | ||
| 8 | * to be able to judge its performance. | ||
| 9 | */ | ||
| 10 | #define _GNU_SOURCE | ||
| 11 | #include "main.h" | ||
| 12 | #include <stdlib.h> | ||
| 13 | #include <stdio.h> | ||
| 14 | #include <assert.h> | ||
| 15 | #include <string.h> | ||
| 16 | #include <linux/virtio_ring.h> | ||
| 17 | |||
| 18 | struct data { | ||
| 19 | void *data; | ||
| 20 | } *data; | ||
| 21 | |||
| 22 | struct vring ring; | ||
| 23 | |||
| 24 | /* enabling the below activates experimental ring polling code | ||
| 25 | * (which skips index reads on consumer in favor of looking at | ||
| 26 | * high bits of ring id ^ 0x8000). | ||
| 27 | */ | ||
| 28 | /* #ifdef RING_POLL */ | ||
| 29 | |||
| 30 | /* how much padding is needed to avoid false cache sharing */ | ||
| 31 | #define HOST_GUEST_PADDING 0x80 | ||
| 32 | |||
| 33 | struct guest { | ||
| 34 | unsigned short avail_idx; | ||
| 35 | unsigned short last_used_idx; | ||
| 36 | unsigned short num_free; | ||
| 37 | unsigned short kicked_avail_idx; | ||
| 38 | unsigned short free_head; | ||
| 39 | unsigned char reserved[HOST_GUEST_PADDING - 10]; | ||
| 40 | } guest; | ||
| 41 | |||
| 42 | struct host { | ||
| 43 | /* we do not need to track last avail index | ||
| 44 | * unless we have more than one in flight. | ||
| 45 | */ | ||
| 46 | unsigned short used_idx; | ||
| 47 | unsigned short called_used_idx; | ||
| 48 | unsigned char reserved[HOST_GUEST_PADDING - 4]; | ||
| 49 | } host; | ||
| 50 | |||
| 51 | /* implemented by ring */ | ||
| 52 | void alloc_ring(void) | ||
| 53 | { | ||
| 54 | int ret; | ||
| 55 | int i; | ||
| 56 | void *p; | ||
| 57 | |||
| 58 | ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000)); | ||
| 59 | if (ret) { | ||
| 60 | perror("Unable to allocate ring buffer.\n"); | ||
| 61 | exit(3); | ||
| 62 | } | ||
| 63 | memset(p, 0, vring_size(ring_size, 0x1000)); | ||
| 64 | vring_init(&ring, ring_size, p, 0x1000); | ||
| 65 | |||
| 66 | guest.avail_idx = 0; | ||
| 67 | guest.kicked_avail_idx = -1; | ||
| 68 | guest.last_used_idx = 0; | ||
| 69 | /* Put everything in free lists. */ | ||
| 70 | guest.free_head = 0; | ||
| 71 | for (i = 0; i < ring_size - 1; i++) | ||
| 72 | ring.desc[i].next = i + 1; | ||
| 73 | host.used_idx = 0; | ||
| 74 | host.called_used_idx = -1; | ||
| 75 | guest.num_free = ring_size; | ||
| 76 | data = malloc(ring_size * sizeof *data); | ||
| 77 | if (!data) { | ||
| 78 | perror("Unable to allocate data buffer.\n"); | ||
| 79 | exit(3); | ||
| 80 | } | ||
| 81 | memset(data, 0, ring_size * sizeof *data); | ||
| 82 | } | ||
| 83 | |||
| 84 | /* guest side */ | ||
| 85 | int add_inbuf(unsigned len, void *buf, void *datap) | ||
| 86 | { | ||
| 87 | unsigned head, avail; | ||
| 88 | struct vring_desc *desc; | ||
| 89 | |||
| 90 | if (!guest.num_free) | ||
| 91 | return -1; | ||
| 92 | |||
| 93 | head = guest.free_head; | ||
| 94 | guest.num_free--; | ||
| 95 | |||
| 96 | desc = ring.desc; | ||
| 97 | desc[head].flags = VRING_DESC_F_NEXT; | ||
| 98 | desc[head].addr = (unsigned long)(void *)buf; | ||
| 99 | desc[head].len = len; | ||
| 100 | /* We do it like this to simulate the way | ||
| 101 | * we'd have to flip it if we had multiple | ||
| 102 | * descriptors. | ||
| 103 | */ | ||
| 104 | desc[head].flags &= ~VRING_DESC_F_NEXT; | ||
| 105 | guest.free_head = desc[head].next; | ||
| 106 | |||
| 107 | data[head].data = datap; | ||
| 108 | |||
| 109 | #ifdef RING_POLL | ||
| 110 | /* Barrier A (for pairing) */ | ||
| 111 | smp_release(); | ||
| 112 | avail = guest.avail_idx++; | ||
| 113 | ring.avail->ring[avail & (ring_size - 1)] = | ||
| 114 | (head | (avail & ~(ring_size - 1))) ^ 0x8000; | ||
| 115 | #else | ||
| 116 | avail = (ring_size - 1) & (guest.avail_idx++); | ||
| 117 | ring.avail->ring[avail] = head; | ||
| 118 | /* Barrier A (for pairing) */ | ||
| 119 | smp_release(); | ||
| 120 | #endif | ||
| 121 | ring.avail->idx = guest.avail_idx; | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | void *get_buf(unsigned *lenp, void **bufp) | ||
| 126 | { | ||
| 127 | unsigned head; | ||
| 128 | unsigned index; | ||
| 129 | void *datap; | ||
| 130 | |||
| 131 | #ifdef RING_POLL | ||
| 132 | head = (ring_size - 1) & guest.last_used_idx; | ||
| 133 | index = ring.used->ring[head].id; | ||
| 134 | if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) | ||
| 135 | return NULL; | ||
| 136 | /* Barrier B (for pairing) */ | ||
| 137 | smp_acquire(); | ||
| 138 | index &= ring_size - 1; | ||
| 139 | #else | ||
| 140 | if (ring.used->idx == guest.last_used_idx) | ||
| 141 | return NULL; | ||
| 142 | /* Barrier B (for pairing) */ | ||
| 143 | smp_acquire(); | ||
| 144 | head = (ring_size - 1) & guest.last_used_idx; | ||
| 145 | index = ring.used->ring[head].id; | ||
| 146 | #endif | ||
| 147 | *lenp = ring.used->ring[head].len; | ||
| 148 | datap = data[index].data; | ||
| 149 | *bufp = (void*)(unsigned long)ring.desc[index].addr; | ||
| 150 | data[index].data = NULL; | ||
| 151 | ring.desc[index].next = guest.free_head; | ||
| 152 | guest.free_head = index; | ||
| 153 | guest.num_free++; | ||
| 154 | guest.last_used_idx++; | ||
| 155 | return datap; | ||
| 156 | } | ||
| 157 | |||
| 158 | void poll_used(void) | ||
| 159 | { | ||
| 160 | #ifdef RING_POLL | ||
| 161 | unsigned head = (ring_size - 1) & guest.last_used_idx; | ||
| 162 | |||
| 163 | for (;;) { | ||
| 164 | unsigned index = ring.used->ring[head].id; | ||
| 165 | |||
| 166 | if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1)) | ||
| 167 | busy_wait(); | ||
| 168 | else | ||
| 169 | break; | ||
| 170 | } | ||
| 171 | #else | ||
| 172 | unsigned head = guest.last_used_idx; | ||
| 173 | |||
| 174 | while (ring.used->idx == head) | ||
| 175 | busy_wait(); | ||
| 176 | #endif | ||
| 177 | } | ||
| 178 | |||
| 179 | void disable_call() | ||
| 180 | { | ||
| 181 | /* Doing nothing to disable calls might cause | ||
| 182 | * extra interrupts, but reduces the number of cache misses. | ||
| 183 | */ | ||
| 184 | } | ||
| 185 | |||
| 186 | bool enable_call() | ||
| 187 | { | ||
| 188 | unsigned short last_used_idx; | ||
| 189 | |||
| 190 | vring_used_event(&ring) = (last_used_idx = guest.last_used_idx); | ||
| 191 | /* Flush call index write */ | ||
| 192 | /* Barrier D (for pairing) */ | ||
| 193 | smp_mb(); | ||
| 194 | #ifdef RING_POLL | ||
| 195 | { | ||
| 196 | unsigned short head = last_used_idx & (ring_size - 1); | ||
| 197 | unsigned index = ring.used->ring[head].id; | ||
| 198 | |||
| 199 | return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1); | ||
| 200 | } | ||
| 201 | #else | ||
| 202 | return ring.used->idx == last_used_idx; | ||
| 203 | #endif | ||
| 204 | } | ||
| 205 | |||
| 206 | void kick_available(void) | ||
| 207 | { | ||
| 208 | /* Flush in previous flags write */ | ||
| 209 | /* Barrier C (for pairing) */ | ||
| 210 | smp_mb(); | ||
| 211 | if (!vring_need_event(vring_avail_event(&ring), | ||
| 212 | guest.avail_idx, | ||
| 213 | guest.kicked_avail_idx)) | ||
| 214 | return; | ||
| 215 | |||
| 216 | guest.kicked_avail_idx = guest.avail_idx; | ||
| 217 | kick(); | ||
| 218 | } | ||
| 219 | |||
| 220 | /* host side */ | ||
| 221 | void disable_kick() | ||
| 222 | { | ||
| 223 | /* Doing nothing to disable kicks might cause | ||
| 224 | * extra interrupts, but reduces the number of cache misses. | ||
| 225 | */ | ||
| 226 | } | ||
| 227 | |||
| 228 | bool enable_kick() | ||
| 229 | { | ||
| 230 | unsigned head = host.used_idx; | ||
| 231 | |||
| 232 | vring_avail_event(&ring) = head; | ||
| 233 | /* Barrier C (for pairing) */ | ||
| 234 | smp_mb(); | ||
| 235 | #ifdef RING_POLL | ||
| 236 | { | ||
| 237 | unsigned index = ring.avail->ring[head & (ring_size - 1)]; | ||
| 238 | |||
| 239 | return (index ^ head ^ 0x8000) & ~(ring_size - 1); | ||
| 240 | } | ||
| 241 | #else | ||
| 242 | return head == ring.avail->idx; | ||
| 243 | #endif | ||
| 244 | } | ||
| 245 | |||
| 246 | void poll_avail(void) | ||
| 247 | { | ||
| 248 | unsigned head = host.used_idx; | ||
| 249 | #ifdef RING_POLL | ||
| 250 | for (;;) { | ||
| 251 | unsigned index = ring.avail->ring[head & (ring_size - 1)]; | ||
| 252 | if ((index ^ head ^ 0x8000) & ~(ring_size - 1)) | ||
| 253 | busy_wait(); | ||
| 254 | else | ||
| 255 | break; | ||
| 256 | } | ||
| 257 | #else | ||
| 258 | while (ring.avail->idx == head) | ||
| 259 | busy_wait(); | ||
| 260 | #endif | ||
| 261 | } | ||
| 262 | |||
| 263 | bool use_buf(unsigned *lenp, void **bufp) | ||
| 264 | { | ||
| 265 | unsigned used_idx = host.used_idx; | ||
| 266 | struct vring_desc *desc; | ||
| 267 | unsigned head; | ||
| 268 | |||
| 269 | #ifdef RING_POLL | ||
| 270 | head = ring.avail->ring[used_idx & (ring_size - 1)]; | ||
| 271 | if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1)) | ||
| 272 | return false; | ||
| 273 | /* Barrier A (for pairing) */ | ||
| 274 | smp_acquire(); | ||
| 275 | |||
| 276 | used_idx &= ring_size - 1; | ||
| 277 | desc = &ring.desc[head & (ring_size - 1)]; | ||
| 278 | #else | ||
| 279 | if (used_idx == ring.avail->idx) | ||
| 280 | return false; | ||
| 281 | |||
| 282 | /* Barrier A (for pairing) */ | ||
| 283 | smp_acquire(); | ||
| 284 | |||
| 285 | used_idx &= ring_size - 1; | ||
| 286 | head = ring.avail->ring[used_idx]; | ||
| 287 | desc = &ring.desc[head]; | ||
| 288 | #endif | ||
| 289 | |||
| 290 | *lenp = desc->len; | ||
| 291 | *bufp = (void *)(unsigned long)desc->addr; | ||
| 292 | |||
| 293 | /* now update used ring */ | ||
| 294 | ring.used->ring[used_idx].id = head; | ||
| 295 | ring.used->ring[used_idx].len = desc->len - 1; | ||
| 296 | /* Barrier B (for pairing) */ | ||
| 297 | smp_release(); | ||
| 298 | host.used_idx++; | ||
| 299 | ring.used->idx = host.used_idx; | ||
| 300 | |||
| 301 | return true; | ||
| 302 | } | ||
| 303 | |||
| 304 | void call_used(void) | ||
| 305 | { | ||
| 306 | /* Flush in previous flags write */ | ||
| 307 | /* Barrier D (for pairing) */ | ||
| 308 | smp_mb(); | ||
| 309 | if (!vring_need_event(vring_used_event(&ring), | ||
| 310 | host.used_idx, | ||
| 311 | host.called_used_idx)) | ||
| 312 | return; | ||
| 313 | |||
| 314 | host.called_used_idx = host.used_idx; | ||
| 315 | call(); | ||
| 316 | } | ||
diff --git a/tools/virtio/ringtest/virtio_ring_poll.c b/tools/virtio/ringtest/virtio_ring_poll.c new file mode 100644 index 000000000000..84fc2c557aaa --- /dev/null +++ b/tools/virtio/ringtest/virtio_ring_poll.c | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | #define RING_POLL 1 | ||
| 2 | #include "virtio_ring_0_9.c" | ||
