diff options
45 files changed, 748 insertions, 409 deletions
diff --git a/Documentation/devicetree/bindings/arm/pmu.txt b/Documentation/devicetree/bindings/arm/pmu.txt index 6e54a9d88b7a..3b5f5d1088c6 100644 --- a/Documentation/devicetree/bindings/arm/pmu.txt +++ b/Documentation/devicetree/bindings/arm/pmu.txt | |||
@@ -26,6 +26,13 @@ Required properties: | |||
26 | 26 | ||
27 | Optional properties: | 27 | Optional properties: |
28 | 28 | ||
29 | - interrupt-affinity : Valid only when using SPIs, specifies a list of phandles | ||
30 | to CPU nodes corresponding directly to the affinity of | ||
31 | the SPIs listed in the interrupts property. | ||
32 | |||
33 | This property should be present when there is more than | ||
34 | a single SPI. | ||
35 | |||
29 | - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd | 36 | - qcom,no-pc-write : Indicates that this PMU doesn't support the 0xc and 0xd |
30 | events. | 37 | events. |
31 | 38 | ||
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 4cf48c3aca13..405aa1883307 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -269,6 +269,16 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) | |||
269 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); | 269 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
270 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | 270 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
271 | 271 | ||
272 | static inline bool __kvm_cpu_uses_extended_idmap(void) | ||
273 | { | ||
274 | return false; | ||
275 | } | ||
276 | |||
277 | static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, | ||
278 | pgd_t *hyp_pgd, | ||
279 | pgd_t *merged_hyp_pgd, | ||
280 | unsigned long hyp_idmap_start) { } | ||
281 | |||
272 | #endif /* !__ASSEMBLY__ */ | 282 | #endif /* !__ASSEMBLY__ */ |
273 | 283 | ||
274 | #endif /* __ARM_KVM_MMU_H__ */ | 284 | #endif /* __ARM_KVM_MMU_H__ */ |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 7a301be9ac67..8b60fde5ce48 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifdef CONFIG_ARM_KERNMEM_PERMS | 11 | #ifdef CONFIG_ARM_KERNMEM_PERMS |
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | #define PROC_INFO \ | 15 | #define PROC_INFO \ |
16 | . = ALIGN(4); \ | 16 | . = ALIGN(4); \ |
17 | VMLINUX_SYMBOL(__proc_info_begin) = .; \ | 17 | VMLINUX_SYMBOL(__proc_info_begin) = .; \ |
@@ -23,7 +23,7 @@ | |||
23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ | 23 | VMLINUX_SYMBOL(__idmap_text_start) = .; \ |
24 | *(.idmap.text) \ | 24 | *(.idmap.text) \ |
25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ | 25 | VMLINUX_SYMBOL(__idmap_text_end) = .; \ |
26 | . = ALIGN(32); \ | 26 | . = ALIGN(PAGE_SIZE); \ |
27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | 27 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ |
28 | *(.hyp.idmap.text) \ | 28 | *(.hyp.idmap.text) \ |
29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; | 29 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; |
@@ -343,8 +343,11 @@ SECTIONS | |||
343 | */ | 343 | */ |
344 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") | 344 | ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support") |
345 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") | 345 | ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined") |
346 | |||
346 | /* | 347 | /* |
347 | * The HYP init code can't be more than a page long. | 348 | * The HYP init code can't be more than a page long, |
349 | * and should not cross a page boundary. | ||
348 | * The above comment applies as well. | 350 | * The above comment applies as well. |
349 | */ | 351 | */ |
350 | ASSERT(((__hyp_idmap_text_end - __hyp_idmap_text_start) <= PAGE_SIZE), "HYP init code too big") | 352 | ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, |
353 | "HYP init code too big or misaligned") | ||
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 15b050d46fc9..1d5accbd3dcf 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -35,9 +35,9 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | |||
35 | 35 | ||
36 | static pgd_t *boot_hyp_pgd; | 36 | static pgd_t *boot_hyp_pgd; |
37 | static pgd_t *hyp_pgd; | 37 | static pgd_t *hyp_pgd; |
38 | static pgd_t *merged_hyp_pgd; | ||
38 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); | 39 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
39 | 40 | ||
40 | static void *init_bounce_page; | ||
41 | static unsigned long hyp_idmap_start; | 41 | static unsigned long hyp_idmap_start; |
42 | static unsigned long hyp_idmap_end; | 42 | static unsigned long hyp_idmap_end; |
43 | static phys_addr_t hyp_idmap_vector; | 43 | static phys_addr_t hyp_idmap_vector; |
@@ -405,9 +405,6 @@ void free_boot_hyp_pgd(void) | |||
405 | if (hyp_pgd) | 405 | if (hyp_pgd) |
406 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 406 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
407 | 407 | ||
408 | free_page((unsigned long)init_bounce_page); | ||
409 | init_bounce_page = NULL; | ||
410 | |||
411 | mutex_unlock(&kvm_hyp_pgd_mutex); | 408 | mutex_unlock(&kvm_hyp_pgd_mutex); |
412 | } | 409 | } |
413 | 410 | ||
@@ -438,6 +435,11 @@ void free_hyp_pgds(void) | |||
438 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); | 435 | free_pages((unsigned long)hyp_pgd, hyp_pgd_order); |
439 | hyp_pgd = NULL; | 436 | hyp_pgd = NULL; |
440 | } | 437 | } |
438 | if (merged_hyp_pgd) { | ||
439 | clear_page(merged_hyp_pgd); | ||
440 | free_page((unsigned long)merged_hyp_pgd); | ||
441 | merged_hyp_pgd = NULL; | ||
442 | } | ||
441 | 443 | ||
442 | mutex_unlock(&kvm_hyp_pgd_mutex); | 444 | mutex_unlock(&kvm_hyp_pgd_mutex); |
443 | } | 445 | } |
@@ -1622,12 +1624,18 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |||
1622 | 1624 | ||
1623 | phys_addr_t kvm_mmu_get_httbr(void) | 1625 | phys_addr_t kvm_mmu_get_httbr(void) |
1624 | { | 1626 | { |
1625 | return virt_to_phys(hyp_pgd); | 1627 | if (__kvm_cpu_uses_extended_idmap()) |
1628 | return virt_to_phys(merged_hyp_pgd); | ||
1629 | else | ||
1630 | return virt_to_phys(hyp_pgd); | ||
1626 | } | 1631 | } |
1627 | 1632 | ||
1628 | phys_addr_t kvm_mmu_get_boot_httbr(void) | 1633 | phys_addr_t kvm_mmu_get_boot_httbr(void) |
1629 | { | 1634 | { |
1630 | return virt_to_phys(boot_hyp_pgd); | 1635 | if (__kvm_cpu_uses_extended_idmap()) |
1636 | return virt_to_phys(merged_hyp_pgd); | ||
1637 | else | ||
1638 | return virt_to_phys(boot_hyp_pgd); | ||
1631 | } | 1639 | } |
1632 | 1640 | ||
1633 | phys_addr_t kvm_get_idmap_vector(void) | 1641 | phys_addr_t kvm_get_idmap_vector(void) |
@@ -1643,39 +1651,11 @@ int kvm_mmu_init(void) | |||
1643 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); | 1651 | hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end); |
1644 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); | 1652 | hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init); |
1645 | 1653 | ||
1646 | if ((hyp_idmap_start ^ hyp_idmap_end) & PAGE_MASK) { | 1654 | /* |
1647 | /* | 1655 | * We rely on the linker script to ensure at build time that the HYP |
1648 | * Our init code is crossing a page boundary. Allocate | 1656 | * init code does not cross a page boundary. |
1649 | * a bounce page, copy the code over and use that. | 1657 | */ |
1650 | */ | 1658 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
1651 | size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start; | ||
1652 | phys_addr_t phys_base; | ||
1653 | |||
1654 | init_bounce_page = (void *)__get_free_page(GFP_KERNEL); | ||
1655 | if (!init_bounce_page) { | ||
1656 | kvm_err("Couldn't allocate HYP init bounce page\n"); | ||
1657 | err = -ENOMEM; | ||
1658 | goto out; | ||
1659 | } | ||
1660 | |||
1661 | memcpy(init_bounce_page, __hyp_idmap_text_start, len); | ||
1662 | /* | ||
1663 | * Warning: the code we just copied to the bounce page | ||
1664 | * must be flushed to the point of coherency. | ||
1665 | * Otherwise, the data may be sitting in L2, and HYP | ||
1666 | * mode won't be able to observe it as it runs with | ||
1667 | * caches off at that point. | ||
1668 | */ | ||
1669 | kvm_flush_dcache_to_poc(init_bounce_page, len); | ||
1670 | |||
1671 | phys_base = kvm_virt_to_phys(init_bounce_page); | ||
1672 | hyp_idmap_vector += phys_base - hyp_idmap_start; | ||
1673 | hyp_idmap_start = phys_base; | ||
1674 | hyp_idmap_end = phys_base + len; | ||
1675 | |||
1676 | kvm_info("Using HYP init bounce page @%lx\n", | ||
1677 | (unsigned long)phys_base); | ||
1678 | } | ||
1679 | 1659 | ||
1680 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1660 | hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
1681 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); | 1661 | boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); |
@@ -1698,6 +1678,17 @@ int kvm_mmu_init(void) | |||
1698 | goto out; | 1678 | goto out; |
1699 | } | 1679 | } |
1700 | 1680 | ||
1681 | if (__kvm_cpu_uses_extended_idmap()) { | ||
1682 | merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
1683 | if (!merged_hyp_pgd) { | ||
1684 | kvm_err("Failed to allocate extra HYP pgd\n"); | ||
1685 | goto out; | ||
1686 | } | ||
1687 | __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, | ||
1688 | hyp_idmap_start); | ||
1689 | return 0; | ||
1690 | } | ||
1691 | |||
1701 | /* Map the very same page at the trampoline VA */ | 1692 | /* Map the very same page at the trampoline VA */ |
1702 | err = __create_hyp_mappings(boot_hyp_pgd, | 1693 | err = __create_hyp_mappings(boot_hyp_pgd, |
1703 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, | 1694 | TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 34f487d5d84e..b8d96f1554af 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -368,6 +368,27 @@ config ARM64_ERRATUM_832075 | |||
368 | 368 | ||
369 | If unsure, say Y. | 369 | If unsure, say Y. |
370 | 370 | ||
371 | config ARM64_ERRATUM_845719 | ||
372 | bool "Cortex-A53: 845719: a load might read incorrect data" | ||
373 | depends on COMPAT | ||
374 | default y | ||
375 | help | ||
376 | This option adds an alternative code sequence to work around ARM | ||
377 | erratum 845719 on Cortex-A53 parts up to r0p4. | ||
378 | |||
379 | When running a compat (AArch32) userspace on an affected Cortex-A53 | ||
380 | part, a load at EL0 from a virtual address that matches the bottom 32 | ||
381 | bits of the virtual address used by a recent load at (AArch64) EL1 | ||
382 | might return incorrect data. | ||
383 | |||
384 | The workaround is to write the contextidr_el1 register on exception | ||
385 | return to a 32-bit task. | ||
386 | Please note that this does not necessarily enable the workaround, | ||
387 | as it depends on the alternative framework, which will only patch | ||
388 | the kernel if an affected CPU is detected. | ||
389 | |||
390 | If unsure, say Y. | ||
391 | |||
371 | endmenu | 392 | endmenu |
372 | 393 | ||
373 | 394 | ||
@@ -455,8 +476,8 @@ config SCHED_SMT | |||
455 | places. If unsure say N here. | 476 | places. If unsure say N here. |
456 | 477 | ||
457 | config NR_CPUS | 478 | config NR_CPUS |
458 | int "Maximum number of CPUs (2-64)" | 479 | int "Maximum number of CPUs (2-4096)" |
459 | range 2 64 | 480 | range 2 4096 |
460 | depends on SMP | 481 | depends on SMP |
461 | # These have to remain sorted largest to smallest | 482 | # These have to remain sorted largest to smallest |
462 | default "64" | 483 | default "64" |
@@ -470,6 +491,10 @@ config HOTPLUG_CPU | |||
470 | 491 | ||
471 | source kernel/Kconfig.preempt | 492 | source kernel/Kconfig.preempt |
472 | 493 | ||
494 | config UP_LATE_INIT | ||
495 | def_bool y | ||
496 | depends on !SMP | ||
497 | |||
473 | config HZ | 498 | config HZ |
474 | int | 499 | int |
475 | default 100 | 500 | default 100 |
@@ -670,7 +695,7 @@ source "fs/Kconfig.binfmt" | |||
670 | 695 | ||
671 | config COMPAT | 696 | config COMPAT |
672 | bool "Kernel support for 32-bit EL0" | 697 | bool "Kernel support for 32-bit EL0" |
673 | depends on !ARM64_64K_PAGES | 698 | depends on !ARM64_64K_PAGES || EXPERT |
674 | select COMPAT_BINFMT_ELF | 699 | select COMPAT_BINFMT_ELF |
675 | select HAVE_UID16 | 700 | select HAVE_UID16 |
676 | select OLD_SIGSUSPEND3 | 701 | select OLD_SIGSUSPEND3 |
@@ -681,6 +706,10 @@ config COMPAT | |||
681 | the user helper functions, VFP support and the ptrace interface are | 706 | the user helper functions, VFP support and the ptrace interface are |
682 | handled appropriately by the kernel. | 707 | handled appropriately by the kernel. |
683 | 708 | ||
709 | If you also enabled CONFIG_ARM64_64K_PAGES, please be aware that you | ||
710 | will only be able to execute AArch32 binaries that were compiled with | ||
711 | 64k aligned segments. | ||
712 | |||
684 | If you want to execute 32-bit userspace applications, say Y. | 713 | If you want to execute 32-bit userspace applications, say Y. |
685 | 714 | ||
686 | config SYSVIPC_COMPAT | 715 | config SYSVIPC_COMPAT |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 69ceedc982a5..4d2a925998f9 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/ | |||
48 | core-$(CONFIG_XEN) += arch/arm64/xen/ | 48 | core-$(CONFIG_XEN) += arch/arm64/xen/ |
49 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ | 49 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ |
50 | libs-y := arch/arm64/lib/ $(libs-y) | 50 | libs-y := arch/arm64/lib/ $(libs-y) |
51 | libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ | 51 | core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a |
52 | 52 | ||
53 | # Default target when executing plain make | 53 | # Default target when executing plain make |
54 | KBUILD_IMAGE := Image.gz | 54 | KBUILD_IMAGE := Image.gz |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index af6a452b1aac..4e03d8dd23f6 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -31,8 +31,12 @@ CONFIG_MODULES=y | |||
31 | CONFIG_MODULE_UNLOAD=y | 31 | CONFIG_MODULE_UNLOAD=y |
32 | # CONFIG_BLK_DEV_BSG is not set | 32 | # CONFIG_BLK_DEV_BSG is not set |
33 | # CONFIG_IOSCHED_DEADLINE is not set | 33 | # CONFIG_IOSCHED_DEADLINE is not set |
34 | CONFIG_ARCH_EXYNOS7=y | ||
34 | CONFIG_ARCH_FSL_LS2085A=y | 35 | CONFIG_ARCH_FSL_LS2085A=y |
35 | CONFIG_ARCH_MEDIATEK=y | 36 | CONFIG_ARCH_MEDIATEK=y |
37 | CONFIG_ARCH_SEATTLE=y | ||
38 | CONFIG_ARCH_TEGRA=y | ||
39 | CONFIG_ARCH_TEGRA_132_SOC=y | ||
36 | CONFIG_ARCH_THUNDER=y | 40 | CONFIG_ARCH_THUNDER=y |
37 | CONFIG_ARCH_VEXPRESS=y | 41 | CONFIG_ARCH_VEXPRESS=y |
38 | CONFIG_ARCH_XGENE=y | 42 | CONFIG_ARCH_XGENE=y |
@@ -62,6 +66,7 @@ CONFIG_BPF_JIT=y | |||
62 | # CONFIG_WIRELESS is not set | 66 | # CONFIG_WIRELESS is not set |
63 | CONFIG_NET_9P=y | 67 | CONFIG_NET_9P=y |
64 | CONFIG_NET_9P_VIRTIO=y | 68 | CONFIG_NET_9P_VIRTIO=y |
69 | # CONFIG_TEGRA_AHB is not set | ||
65 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 70 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
66 | CONFIG_DEVTMPFS=y | 71 | CONFIG_DEVTMPFS=y |
67 | CONFIG_DEVTMPFS_MOUNT=y | 72 | CONFIG_DEVTMPFS_MOUNT=y |
@@ -81,6 +86,7 @@ CONFIG_NETDEVICES=y | |||
81 | CONFIG_TUN=y | 86 | CONFIG_TUN=y |
82 | CONFIG_VIRTIO_NET=y | 87 | CONFIG_VIRTIO_NET=y |
83 | CONFIG_NET_XGENE=y | 88 | CONFIG_NET_XGENE=y |
89 | CONFIG_SKY2=y | ||
84 | CONFIG_SMC91X=y | 90 | CONFIG_SMC91X=y |
85 | CONFIG_SMSC911X=y | 91 | CONFIG_SMSC911X=y |
86 | # CONFIG_WLAN is not set | 92 | # CONFIG_WLAN is not set |
@@ -100,6 +106,8 @@ CONFIG_SPI=y | |||
100 | CONFIG_SPI_PL022=y | 106 | CONFIG_SPI_PL022=y |
101 | CONFIG_GPIO_PL061=y | 107 | CONFIG_GPIO_PL061=y |
102 | CONFIG_GPIO_XGENE=y | 108 | CONFIG_GPIO_XGENE=y |
109 | CONFIG_POWER_RESET_XGENE=y | ||
110 | CONFIG_POWER_RESET_SYSCON=y | ||
103 | # CONFIG_HWMON is not set | 111 | # CONFIG_HWMON is not set |
104 | CONFIG_REGULATOR=y | 112 | CONFIG_REGULATOR=y |
105 | CONFIG_REGULATOR_FIXED_VOLTAGE=y | 113 | CONFIG_REGULATOR_FIXED_VOLTAGE=y |
@@ -112,10 +120,10 @@ CONFIG_LOGO=y | |||
112 | CONFIG_USB=y | 120 | CONFIG_USB=y |
113 | CONFIG_USB_EHCI_HCD=y | 121 | CONFIG_USB_EHCI_HCD=y |
114 | CONFIG_USB_EHCI_HCD_PLATFORM=y | 122 | CONFIG_USB_EHCI_HCD_PLATFORM=y |
115 | CONFIG_USB_ISP1760_HCD=y | ||
116 | CONFIG_USB_OHCI_HCD=y | 123 | CONFIG_USB_OHCI_HCD=y |
117 | CONFIG_USB_OHCI_HCD_PLATFORM=y | 124 | CONFIG_USB_OHCI_HCD_PLATFORM=y |
118 | CONFIG_USB_STORAGE=y | 125 | CONFIG_USB_STORAGE=y |
126 | CONFIG_USB_ISP1760=y | ||
119 | CONFIG_USB_ULPI=y | 127 | CONFIG_USB_ULPI=y |
120 | CONFIG_MMC=y | 128 | CONFIG_MMC=y |
121 | CONFIG_MMC_ARMMMCI=y | 129 | CONFIG_MMC_ARMMMCI=y |
@@ -125,6 +133,7 @@ CONFIG_MMC_SPI=y | |||
125 | CONFIG_RTC_CLASS=y | 133 | CONFIG_RTC_CLASS=y |
126 | CONFIG_RTC_DRV_EFI=y | 134 | CONFIG_RTC_DRV_EFI=y |
127 | CONFIG_RTC_DRV_XGENE=y | 135 | CONFIG_RTC_DRV_XGENE=y |
136 | CONFIG_VIRTIO_PCI=y | ||
128 | CONFIG_VIRTIO_BALLOON=y | 137 | CONFIG_VIRTIO_BALLOON=y |
129 | CONFIG_VIRTIO_MMIO=y | 138 | CONFIG_VIRTIO_MMIO=y |
130 | # CONFIG_IOMMU_SUPPORT is not set | 139 | # CONFIG_IOMMU_SUPPORT is not set |
@@ -143,8 +152,10 @@ CONFIG_CUSE=y | |||
143 | CONFIG_VFAT_FS=y | 152 | CONFIG_VFAT_FS=y |
144 | CONFIG_TMPFS=y | 153 | CONFIG_TMPFS=y |
145 | CONFIG_HUGETLBFS=y | 154 | CONFIG_HUGETLBFS=y |
155 | CONFIG_EFIVAR_FS=y | ||
146 | # CONFIG_MISC_FILESYSTEMS is not set | 156 | # CONFIG_MISC_FILESYSTEMS is not set |
147 | CONFIG_NFS_FS=y | 157 | CONFIG_NFS_FS=y |
158 | CONFIG_NFS_V4=y | ||
148 | CONFIG_ROOT_NFS=y | 159 | CONFIG_ROOT_NFS=y |
149 | CONFIG_9P_FS=y | 160 | CONFIG_9P_FS=y |
150 | CONFIG_NLS_CODEPAGE_437=y | 161 | CONFIG_NLS_CODEPAGE_437=y |
@@ -159,7 +170,6 @@ CONFIG_LOCKUP_DETECTOR=y | |||
159 | # CONFIG_SCHED_DEBUG is not set | 170 | # CONFIG_SCHED_DEBUG is not set |
160 | # CONFIG_DEBUG_PREEMPT is not set | 171 | # CONFIG_DEBUG_PREEMPT is not set |
161 | # CONFIG_FTRACE is not set | 172 | # CONFIG_FTRACE is not set |
162 | CONFIG_KEYS=y | ||
163 | CONFIG_SECURITY=y | 173 | CONFIG_SECURITY=y |
164 | CONFIG_CRYPTO_ANSI_CPRNG=y | 174 | CONFIG_CRYPTO_ANSI_CPRNG=y |
165 | CONFIG_ARM64_CRYPTO=y | 175 | CONFIG_ARM64_CRYPTO=y |
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S index 432e4841cd81..a2a7fbcacc14 100644 --- a/arch/arm64/crypto/aes-ce-ccm-core.S +++ b/arch/arm64/crypto/aes-ce-ccm-core.S | |||
@@ -101,19 +101,19 @@ ENTRY(ce_aes_ccm_final) | |||
101 | 0: mov v4.16b, v3.16b | 101 | 0: mov v4.16b, v3.16b |
102 | 1: ld1 {v5.2d}, [x2], #16 /* load next round key */ | 102 | 1: ld1 {v5.2d}, [x2], #16 /* load next round key */ |
103 | aese v0.16b, v4.16b | 103 | aese v0.16b, v4.16b |
104 | aese v1.16b, v4.16b | ||
105 | aesmc v0.16b, v0.16b | 104 | aesmc v0.16b, v0.16b |
105 | aese v1.16b, v4.16b | ||
106 | aesmc v1.16b, v1.16b | 106 | aesmc v1.16b, v1.16b |
107 | 2: ld1 {v3.2d}, [x2], #16 /* load next round key */ | 107 | 2: ld1 {v3.2d}, [x2], #16 /* load next round key */ |
108 | aese v0.16b, v5.16b | 108 | aese v0.16b, v5.16b |
109 | aese v1.16b, v5.16b | ||
110 | aesmc v0.16b, v0.16b | 109 | aesmc v0.16b, v0.16b |
110 | aese v1.16b, v5.16b | ||
111 | aesmc v1.16b, v1.16b | 111 | aesmc v1.16b, v1.16b |
112 | 3: ld1 {v4.2d}, [x2], #16 /* load next round key */ | 112 | 3: ld1 {v4.2d}, [x2], #16 /* load next round key */ |
113 | subs w3, w3, #3 | 113 | subs w3, w3, #3 |
114 | aese v0.16b, v3.16b | 114 | aese v0.16b, v3.16b |
115 | aese v1.16b, v3.16b | ||
116 | aesmc v0.16b, v0.16b | 115 | aesmc v0.16b, v0.16b |
116 | aese v1.16b, v3.16b | ||
117 | aesmc v1.16b, v1.16b | 117 | aesmc v1.16b, v1.16b |
118 | bpl 1b | 118 | bpl 1b |
119 | aese v0.16b, v4.16b | 119 | aese v0.16b, v4.16b |
@@ -146,19 +146,19 @@ ENDPROC(ce_aes_ccm_final) | |||
146 | ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ | 146 | ld1 {v5.2d}, [x10], #16 /* load 2nd round key */ |
147 | 2: /* inner loop: 3 rounds, 2x interleaved */ | 147 | 2: /* inner loop: 3 rounds, 2x interleaved */ |
148 | aese v0.16b, v4.16b | 148 | aese v0.16b, v4.16b |
149 | aese v1.16b, v4.16b | ||
150 | aesmc v0.16b, v0.16b | 149 | aesmc v0.16b, v0.16b |
150 | aese v1.16b, v4.16b | ||
151 | aesmc v1.16b, v1.16b | 151 | aesmc v1.16b, v1.16b |
152 | 3: ld1 {v3.2d}, [x10], #16 /* load next round key */ | 152 | 3: ld1 {v3.2d}, [x10], #16 /* load next round key */ |
153 | aese v0.16b, v5.16b | 153 | aese v0.16b, v5.16b |
154 | aese v1.16b, v5.16b | ||
155 | aesmc v0.16b, v0.16b | 154 | aesmc v0.16b, v0.16b |
155 | aese v1.16b, v5.16b | ||
156 | aesmc v1.16b, v1.16b | 156 | aesmc v1.16b, v1.16b |
157 | 4: ld1 {v4.2d}, [x10], #16 /* load next round key */ | 157 | 4: ld1 {v4.2d}, [x10], #16 /* load next round key */ |
158 | subs w7, w7, #3 | 158 | subs w7, w7, #3 |
159 | aese v0.16b, v3.16b | 159 | aese v0.16b, v3.16b |
160 | aese v1.16b, v3.16b | ||
161 | aesmc v0.16b, v0.16b | 160 | aesmc v0.16b, v0.16b |
161 | aese v1.16b, v3.16b | ||
162 | aesmc v1.16b, v1.16b | 162 | aesmc v1.16b, v1.16b |
163 | ld1 {v5.2d}, [x10], #16 /* load next round key */ | 163 | ld1 {v5.2d}, [x10], #16 /* load next round key */ |
164 | bpl 2b | 164 | bpl 2b |
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S index 685a18f731eb..78f3cfe92c08 100644 --- a/arch/arm64/crypto/aes-ce.S +++ b/arch/arm64/crypto/aes-ce.S | |||
@@ -45,18 +45,14 @@ | |||
45 | 45 | ||
46 | .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 | 46 | .macro do_enc_Nx, de, mc, k, i0, i1, i2, i3 |
47 | aes\de \i0\().16b, \k\().16b | 47 | aes\de \i0\().16b, \k\().16b |
48 | .ifnb \i1 | ||
49 | aes\de \i1\().16b, \k\().16b | ||
50 | .ifnb \i3 | ||
51 | aes\de \i2\().16b, \k\().16b | ||
52 | aes\de \i3\().16b, \k\().16b | ||
53 | .endif | ||
54 | .endif | ||
55 | aes\mc \i0\().16b, \i0\().16b | 48 | aes\mc \i0\().16b, \i0\().16b |
56 | .ifnb \i1 | 49 | .ifnb \i1 |
50 | aes\de \i1\().16b, \k\().16b | ||
57 | aes\mc \i1\().16b, \i1\().16b | 51 | aes\mc \i1\().16b, \i1\().16b |
58 | .ifnb \i3 | 52 | .ifnb \i3 |
53 | aes\de \i2\().16b, \k\().16b | ||
59 | aes\mc \i2\().16b, \i2\().16b | 54 | aes\mc \i2\().16b, \i2\().16b |
55 | aes\de \i3\().16b, \k\().16b | ||
60 | aes\mc \i3\().16b, \i3\().16b | 56 | aes\mc \i3\().16b, \i3\().16b |
61 | .endif | 57 | .endif |
62 | .endif | 58 | .endif |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 750bac4e637e..144b64ad96c3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -159,4 +159,52 @@ lr .req x30 // link register | |||
159 | orr \rd, \lbits, \hbits, lsl #32 | 159 | orr \rd, \lbits, \hbits, lsl #32 |
160 | .endm | 160 | .endm |
161 | 161 | ||
162 | /* | ||
163 | * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where | ||
164 | * <symbol> is within the range +/- 4 GB of the PC. | ||
165 | */ | ||
166 | /* | ||
167 | * @dst: destination register (64 bit wide) | ||
168 | * @sym: name of the symbol | ||
169 | * @tmp: optional scratch register to be used if <dst> == sp, which | ||
170 | * is not allowed in an adrp instruction | ||
171 | */ | ||
172 | .macro adr_l, dst, sym, tmp= | ||
173 | .ifb \tmp | ||
174 | adrp \dst, \sym | ||
175 | add \dst, \dst, :lo12:\sym | ||
176 | .else | ||
177 | adrp \tmp, \sym | ||
178 | add \dst, \tmp, :lo12:\sym | ||
179 | .endif | ||
180 | .endm | ||
181 | |||
182 | /* | ||
183 | * @dst: destination register (32 or 64 bit wide) | ||
184 | * @sym: name of the symbol | ||
185 | * @tmp: optional 64-bit scratch register to be used if <dst> is a | ||
186 | * 32-bit wide register, in which case it cannot be used to hold | ||
187 | * the address | ||
188 | */ | ||
189 | .macro ldr_l, dst, sym, tmp= | ||
190 | .ifb \tmp | ||
191 | adrp \dst, \sym | ||
192 | ldr \dst, [\dst, :lo12:\sym] | ||
193 | .else | ||
194 | adrp \tmp, \sym | ||
195 | ldr \dst, [\tmp, :lo12:\sym] | ||
196 | .endif | ||
197 | .endm | ||
198 | |||
199 | /* | ||
200 | * @src: source register (32 or 64 bit wide) | ||
201 | * @sym: name of the symbol | ||
202 | * @tmp: mandatory 64-bit scratch register to calculate the address | ||
203 | * while <src> needs to be preserved. | ||
204 | */ | ||
205 | .macro str_l, src, sym, tmp | ||
206 | adrp \tmp, \sym | ||
207 | str \src, [\tmp, :lo12:\sym] | ||
208 | .endm | ||
209 | |||
162 | #endif /* __ASM_ASSEMBLER_H */ | 210 | #endif /* __ASM_ASSEMBLER_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index b6c16d5f622f..82cb9f98ba1a 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -23,11 +23,24 @@ | |||
23 | 23 | ||
24 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 | 24 | #define ARM64_WORKAROUND_CLEAN_CACHE 0 |
25 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 | 25 | #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 |
26 | #define ARM64_WORKAROUND_845719 2 | ||
26 | 27 | ||
27 | #define ARM64_NCAPS 2 | 28 | #define ARM64_NCAPS 3 |
28 | 29 | ||
29 | #ifndef __ASSEMBLY__ | 30 | #ifndef __ASSEMBLY__ |
30 | 31 | ||
32 | struct arm64_cpu_capabilities { | ||
33 | const char *desc; | ||
34 | u16 capability; | ||
35 | bool (*matches)(const struct arm64_cpu_capabilities *); | ||
36 | union { | ||
37 | struct { /* To be used for erratum handling only */ | ||
38 | u32 midr_model; | ||
39 | u32 midr_range_min, midr_range_max; | ||
40 | }; | ||
41 | }; | ||
42 | }; | ||
43 | |||
31 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 44 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
32 | 45 | ||
33 | static inline bool cpu_have_feature(unsigned int num) | 46 | static inline bool cpu_have_feature(unsigned int num) |
@@ -51,7 +64,10 @@ static inline void cpus_set_cap(unsigned int num) | |||
51 | __set_bit(num, cpu_hwcaps); | 64 | __set_bit(num, cpu_hwcaps); |
52 | } | 65 | } |
53 | 66 | ||
67 | void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, | ||
68 | const char *info); | ||
54 | void check_local_cpu_errata(void); | 69 | void check_local_cpu_errata(void); |
70 | void check_local_cpu_features(void); | ||
55 | bool cpu_supports_mixed_endian_el0(void); | 71 | bool cpu_supports_mixed_endian_el0(void); |
56 | bool system_supports_mixed_endian_el0(void); | 72 | bool system_supports_mixed_endian_el0(void); |
57 | 73 | ||
diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h deleted file mode 100644 index e3bd983d3661..000000000000 --- a/arch/arm64/include/asm/cputable.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm64/include/asm/cputable.h | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_CPUTABLE_H | ||
19 | #define __ASM_CPUTABLE_H | ||
20 | |||
21 | struct cpu_info { | ||
22 | unsigned int cpu_id_val; | ||
23 | unsigned int cpu_id_mask; | ||
24 | const char *cpu_name; | ||
25 | unsigned long (*cpu_setup)(void); | ||
26 | }; | ||
27 | |||
28 | extern struct cpu_info *lookup_processor_type(unsigned int); | ||
29 | |||
30 | #endif | ||
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index 6932bb57dba0..9437e3dc5833 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -97,7 +97,7 @@ static inline int dma_set_mask(struct device *dev, u64 mask) | |||
97 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | 97 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
98 | { | 98 | { |
99 | if (!dev->dma_mask) | 99 | if (!dev->dma_mask) |
100 | return 0; | 100 | return false; |
101 | 101 | ||
102 | return addr + size - 1 <= *dev->dma_mask; | 102 | return addr + size - 1 <= *dev->dma_mask; |
103 | } | 103 | } |
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index defa0ff98250..926495686554 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h | |||
@@ -33,6 +33,7 @@ | |||
33 | enum fixed_addresses { | 33 | enum fixed_addresses { |
34 | FIX_HOLE, | 34 | FIX_HOLE, |
35 | FIX_EARLYCON_MEM_BASE, | 35 | FIX_EARLYCON_MEM_BASE, |
36 | FIX_TEXT_POKE0, | ||
36 | __end_of_permanent_fixed_addresses, | 37 | __end_of_permanent_fixed_addresses, |
37 | 38 | ||
38 | /* | 39 | /* |
@@ -49,7 +50,6 @@ enum fixed_addresses { | |||
49 | 50 | ||
50 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | 51 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, |
51 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | 52 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
52 | FIX_TEXT_POKE0, | ||
53 | __end_of_fixed_addresses | 53 | __end_of_fixed_addresses |
54 | }; | 54 | }; |
55 | 55 | ||
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index d2f49423c5dc..f81b328d9cf4 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
@@ -285,6 +285,7 @@ bool aarch64_insn_is_nop(u32 insn); | |||
285 | int aarch64_insn_read(void *addr, u32 *insnp); | 285 | int aarch64_insn_read(void *addr, u32 *insnp); |
286 | int aarch64_insn_write(void *addr, u32 insn); | 286 | int aarch64_insn_write(void *addr, u32 insn); |
287 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); | 287 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); |
288 | u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); | ||
288 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | 289 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, |
289 | u32 insn, u64 imm); | 290 | u32 insn, u64 imm); |
290 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, | 291 | u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 36250705dc4c..61505676d085 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #include <asm/pgalloc.h> | 68 | #include <asm/pgalloc.h> |
69 | #include <asm/cachetype.h> | 69 | #include <asm/cachetype.h> |
70 | #include <asm/cacheflush.h> | 70 | #include <asm/cacheflush.h> |
71 | #include <asm/mmu_context.h> | ||
72 | #include <asm/pgtable.h> | ||
71 | 73 | ||
72 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 74 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
73 | 75 | ||
@@ -269,5 +271,36 @@ static inline void __kvm_flush_dcache_pud(pud_t pud) | |||
269 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); | 271 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
270 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | 272 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); |
271 | 273 | ||
274 | static inline bool __kvm_cpu_uses_extended_idmap(void) | ||
275 | { | ||
276 | return __cpu_uses_extended_idmap(); | ||
277 | } | ||
278 | |||
279 | static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd, | ||
280 | pgd_t *hyp_pgd, | ||
281 | pgd_t *merged_hyp_pgd, | ||
282 | unsigned long hyp_idmap_start) | ||
283 | { | ||
284 | int idmap_idx; | ||
285 | |||
286 | /* | ||
287 | * Use the first entry to access the HYP mappings. It is | ||
288 | * guaranteed to be free, otherwise we wouldn't use an | ||
289 | * extended idmap. | ||
290 | */ | ||
291 | VM_BUG_ON(pgd_val(merged_hyp_pgd[0])); | ||
292 | merged_hyp_pgd[0] = __pgd(__pa(hyp_pgd) | PMD_TYPE_TABLE); | ||
293 | |||
294 | /* | ||
295 | * Create another extended level entry that points to the boot HYP map, | ||
296 | * which contains an ID mapping of the HYP init code. We essentially | ||
297 | * merge the boot and runtime HYP maps by doing so, but they don't | ||
298 | * overlap anyway, so this is fine. | ||
299 | */ | ||
300 | idmap_idx = hyp_idmap_start >> VA_BITS; | ||
301 | VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx])); | ||
302 | merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE); | ||
303 | } | ||
304 | |||
272 | #endif /* __ASSEMBLY__ */ | 305 | #endif /* __ASSEMBLY__ */ |
273 | #endif /* __ARM64_KVM_MMU_H__ */ | 306 | #endif /* __ARM64_KVM_MMU_H__ */ |
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 101a42bde728..8ec41e5f56f0 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h | |||
@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) | |||
64 | : "r" (ttbr)); | 64 | : "r" (ttbr)); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | ||
68 | * TCR.T0SZ value to use when the ID map is active. Usually equals | ||
69 | * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in | ||
70 | * physical memory, in which case it will be smaller. | ||
71 | */ | ||
72 | extern u64 idmap_t0sz; | ||
73 | |||
74 | static inline bool __cpu_uses_extended_idmap(void) | ||
75 | { | ||
76 | return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && | ||
77 | unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); | ||
78 | } | ||
79 | |||
80 | static inline void __cpu_set_tcr_t0sz(u64 t0sz) | ||
81 | { | ||
82 | unsigned long tcr; | ||
83 | |||
84 | if (__cpu_uses_extended_idmap()) | ||
85 | asm volatile ( | ||
86 | " mrs %0, tcr_el1 ;" | ||
87 | " bfi %0, %1, %2, %3 ;" | ||
88 | " msr tcr_el1, %0 ;" | ||
89 | " isb" | ||
90 | : "=&r" (tcr) | ||
91 | : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Set TCR.T0SZ to the value appropriate for activating the identity map. | ||
96 | */ | ||
97 | static inline void cpu_set_idmap_tcr_t0sz(void) | ||
98 | { | ||
99 | __cpu_set_tcr_t0sz(idmap_t0sz); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Set TCR.T0SZ to its default value (based on VA_BITS) | ||
104 | */ | ||
105 | static inline void cpu_set_default_tcr_t0sz(void) | ||
106 | { | ||
107 | __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); | ||
108 | } | ||
109 | |||
67 | static inline void switch_new_context(struct mm_struct *mm) | 110 | static inline void switch_new_context(struct mm_struct *mm) |
68 | { | 111 | { |
69 | unsigned long flags; | 112 | unsigned long flags; |
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 8fc8fa280e92..7d9c7e4a424b 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h | |||
@@ -33,7 +33,9 @@ | |||
33 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) | 33 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) |
34 | * map the kernel. With the 64K page configuration, swapper and idmap need to | 34 | * map the kernel. With the 64K page configuration, swapper and idmap need to |
35 | * map to pte level. The swapper also maps the FDT (see __create_page_tables | 35 | * map to pte level. The swapper also maps the FDT (see __create_page_tables |
36 | * for more information). | 36 | * for more information). Note that the number of ID map translation levels |
37 | * could be increased on the fly if system RAM is out of reach for the default | ||
38 | * VA range, so 3 pages are reserved in all cases. | ||
37 | */ | 39 | */ |
38 | #ifdef CONFIG_ARM64_64K_PAGES | 40 | #ifdef CONFIG_ARM64_64K_PAGES |
39 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) | 41 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) |
@@ -42,7 +44,7 @@ | |||
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) | 46 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) |
45 | #define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) | 47 | #define IDMAP_DIR_SIZE (3 * PAGE_SIZE) |
46 | 48 | ||
47 | #ifndef __ASSEMBLY__ | 49 | #ifndef __ASSEMBLY__ |
48 | 50 | ||
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 80f3d241cff8..59bfae75dc98 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -143,7 +143,12 @@ | |||
143 | /* | 143 | /* |
144 | * TCR flags. | 144 | * TCR flags. |
145 | */ | 145 | */ |
146 | #define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) | 146 | #define TCR_T0SZ_OFFSET 0 |
147 | #define TCR_T1SZ_OFFSET 16 | ||
148 | #define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) | ||
149 | #define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) | ||
150 | #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) | ||
151 | #define TCR_TxSZ_WIDTH 6 | ||
147 | #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) | 152 | #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) |
148 | #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) | 153 | #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) |
149 | #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) | 154 | #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) |
diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h index e6f087806aaf..b7710a59672c 100644 --- a/arch/arm64/include/asm/pmu.h +++ b/arch/arm64/include/asm/pmu.h | |||
@@ -44,6 +44,7 @@ struct pmu_hw_events { | |||
44 | struct arm_pmu { | 44 | struct arm_pmu { |
45 | struct pmu pmu; | 45 | struct pmu pmu; |
46 | cpumask_t active_irqs; | 46 | cpumask_t active_irqs; |
47 | int *irq_affinity; | ||
47 | const char *name; | 48 | const char *name; |
48 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 49 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
49 | void (*enable)(struct hw_perf_event *evt, int idx); | 50 | void (*enable)(struct hw_perf_event *evt, int idx); |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 941c375616e2..220633b791b8 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -45,15 +45,6 @@ do { \ | |||
45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ | 45 | cpu_do_switch_mm(virt_to_phys(pgd),mm); \ |
46 | } while (0) | 46 | } while (0) |
47 | 47 | ||
48 | #define cpu_get_pgd() \ | ||
49 | ({ \ | ||
50 | unsigned long pg; \ | ||
51 | asm("mrs %0, ttbr0_el1\n" \ | ||
52 | : "=r" (pg)); \ | ||
53 | pg &= ~0xffff000000003ffful; \ | ||
54 | (pgd_t *)phys_to_virt(pg); \ | ||
55 | }) | ||
56 | |||
57 | #endif /* __ASSEMBLY__ */ | 48 | #endif /* __ASSEMBLY__ */ |
58 | #endif /* __KERNEL__ */ | 49 | #endif /* __KERNEL__ */ |
59 | #endif /* __ASM_PROCFNS_H */ | 50 | #endif /* __ASM_PROCFNS_H */ |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 20e9591a60cf..d2c37a1df0eb 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -127,7 +127,11 @@ extern void release_thread(struct task_struct *); | |||
127 | 127 | ||
128 | unsigned long get_wchan(struct task_struct *p); | 128 | unsigned long get_wchan(struct task_struct *p); |
129 | 129 | ||
130 | #define cpu_relax() barrier() | 130 | static inline void cpu_relax(void) |
131 | { | ||
132 | asm volatile("yield" ::: "memory"); | ||
133 | } | ||
134 | |||
131 | #define cpu_relax_lowlatency() cpu_relax() | 135 | #define cpu_relax_lowlatency() cpu_relax() |
132 | 136 | ||
133 | /* Thread switching */ | 137 | /* Thread switching */ |
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 59e282311b58..8dcd61e32176 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h | |||
@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void) | |||
40 | extern u64 __cpu_logical_map[NR_CPUS]; | 40 | extern u64 __cpu_logical_map[NR_CPUS]; |
41 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] | 41 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] |
42 | 42 | ||
43 | void __init do_post_cpus_up_work(void); | ||
44 | |||
43 | #endif /* __ASM_SMP_PLAT_H */ | 45 | #endif /* __ASM_SMP_PLAT_H */ |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 27224426e0bf..cef934a90f17 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -406,7 +406,7 @@ __SYSCALL(__NR_vfork, sys_vfork) | |||
406 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ | 406 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ |
407 | __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ | 407 | __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ |
408 | #define __NR_mmap2 192 | 408 | #define __NR_mmap2 192 |
409 | __SYSCALL(__NR_mmap2, sys_mmap_pgoff) | 409 | __SYSCALL(__NR_mmap2, compat_sys_mmap2_wrapper) |
410 | #define __NR_truncate64 193 | 410 | #define __NR_truncate64 193 |
411 | __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) | 411 | __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) |
412 | #define __NR_ftruncate64 194 | 412 | #define __NR_ftruncate64 194 |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 5ee07eee80c2..b12e15b80516 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -12,12 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg | |||
12 | CFLAGS_REMOVE_return_address.o = -pg | 12 | CFLAGS_REMOVE_return_address.o = -pg |
13 | 13 | ||
14 | # Object file lists. | 14 | # Object file lists. |
15 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 15 | arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ |
16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
18 | hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ | 18 | hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ |
19 | return_address.o cpuinfo.o cpu_errata.o \ | 19 | return_address.o cpuinfo.o cpu_errata.o \ |
20 | alternative.o cacheinfo.o | 20 | cpufeature.o alternative.o cacheinfo.o |
21 | 21 | ||
22 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 22 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
23 | sys_compat.o entry32.o \ | 23 | sys_compat.o entry32.o \ |
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index ad7821d64a1d..21033bba9390 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
25 | #include <asm/alternative.h> | 25 | #include <asm/alternative.h> |
26 | #include <asm/cpufeature.h> | 26 | #include <asm/cpufeature.h> |
27 | #include <asm/insn.h> | ||
27 | #include <linux/stop_machine.h> | 28 | #include <linux/stop_machine.h> |
28 | 29 | ||
29 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; | 30 | extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; |
@@ -33,6 +34,48 @@ struct alt_region { | |||
33 | struct alt_instr *end; | 34 | struct alt_instr *end; |
34 | }; | 35 | }; |
35 | 36 | ||
37 | /* | ||
38 | * Decode the imm field of a b/bl instruction, and return the byte | ||
39 | * offset as a signed value (so it can be used when computing a new | ||
40 | * branch target). | ||
41 | */ | ||
42 | static s32 get_branch_offset(u32 insn) | ||
43 | { | ||
44 | s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); | ||
45 | |||
46 | /* sign-extend the immediate before turning it into a byte offset */ | ||
47 | return (imm << 6) >> 4; | ||
48 | } | ||
49 | |||
50 | static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) | ||
51 | { | ||
52 | u32 insn; | ||
53 | |||
54 | aarch64_insn_read(altinsnptr, &insn); | ||
55 | |||
56 | /* Stop the world on instructions we don't support... */ | ||
57 | BUG_ON(aarch64_insn_is_cbz(insn)); | ||
58 | BUG_ON(aarch64_insn_is_cbnz(insn)); | ||
59 | BUG_ON(aarch64_insn_is_bcond(insn)); | ||
60 | /* ... and there is probably more. */ | ||
61 | |||
62 | if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { | ||
63 | enum aarch64_insn_branch_type type; | ||
64 | unsigned long target; | ||
65 | |||
66 | if (aarch64_insn_is_b(insn)) | ||
67 | type = AARCH64_INSN_BRANCH_NOLINK; | ||
68 | else | ||
69 | type = AARCH64_INSN_BRANCH_LINK; | ||
70 | |||
71 | target = (unsigned long)altinsnptr + get_branch_offset(insn); | ||
72 | insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, | ||
73 | target, type); | ||
74 | } | ||
75 | |||
76 | return insn; | ||
77 | } | ||
78 | |||
36 | static int __apply_alternatives(void *alt_region) | 79 | static int __apply_alternatives(void *alt_region) |
37 | { | 80 | { |
38 | struct alt_instr *alt; | 81 | struct alt_instr *alt; |
@@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region) | |||
40 | u8 *origptr, *replptr; | 83 | u8 *origptr, *replptr; |
41 | 84 | ||
42 | for (alt = region->begin; alt < region->end; alt++) { | 85 | for (alt = region->begin; alt < region->end; alt++) { |
86 | u32 insn; | ||
87 | int i; | ||
88 | |||
43 | if (!cpus_have_cap(alt->cpufeature)) | 89 | if (!cpus_have_cap(alt->cpufeature)) |
44 | continue; | 90 | continue; |
45 | 91 | ||
46 | BUG_ON(alt->alt_len > alt->orig_len); | 92 | BUG_ON(alt->alt_len != alt->orig_len); |
47 | 93 | ||
48 | pr_info_once("patching kernel code\n"); | 94 | pr_info_once("patching kernel code\n"); |
49 | 95 | ||
50 | origptr = (u8 *)&alt->orig_offset + alt->orig_offset; | 96 | origptr = (u8 *)&alt->orig_offset + alt->orig_offset; |
51 | replptr = (u8 *)&alt->alt_offset + alt->alt_offset; | 97 | replptr = (u8 *)&alt->alt_offset + alt->alt_offset; |
52 | memcpy(origptr, replptr, alt->alt_len); | 98 | |
99 | for (i = 0; i < alt->alt_len; i += sizeof(insn)) { | ||
100 | insn = get_alt_insn(origptr + i, replptr + i); | ||
101 | aarch64_insn_write(origptr + i, insn); | ||
102 | } | ||
103 | |||
53 | flush_icache_range((uintptr_t)origptr, | 104 | flush_icache_range((uintptr_t)origptr, |
54 | (uintptr_t)(origptr + alt->alt_len)); | 105 | (uintptr_t)(origptr + alt->alt_len)); |
55 | } | 106 | } |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 56cadd3606bf..da675cc5dfae 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/thread_info.h> | 25 | #include <asm/thread_info.h> |
26 | #include <asm/memory.h> | 26 | #include <asm/memory.h> |
27 | #include <asm/cputable.h> | ||
28 | #include <asm/smp_plat.h> | 27 | #include <asm/smp_plat.h> |
29 | #include <asm/suspend.h> | 28 | #include <asm/suspend.h> |
30 | #include <asm/vdso_datapage.h> | 29 | #include <asm/vdso_datapage.h> |
@@ -70,9 +69,6 @@ int main(void) | |||
70 | BLANK(); | 69 | BLANK(); |
71 | DEFINE(PAGE_SZ, PAGE_SIZE); | 70 | DEFINE(PAGE_SZ, PAGE_SIZE); |
72 | BLANK(); | 71 | BLANK(); |
73 | DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info)); | ||
74 | DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup)); | ||
75 | BLANK(); | ||
76 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); | 72 | DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); |
77 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); | 73 | DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); |
78 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); | 74 | DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index fa62637e63a8..6ffd91438560 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -16,8 +16,6 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #define pr_fmt(fmt) "alternatives: " fmt | ||
20 | |||
21 | #include <linux/types.h> | 19 | #include <linux/types.h> |
22 | #include <asm/cpu.h> | 20 | #include <asm/cpu.h> |
23 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
@@ -26,27 +24,11 @@ | |||
26 | #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) | 24 | #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) |
27 | #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) | 25 | #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) |
28 | 26 | ||
29 | /* | ||
30 | * Add a struct or another datatype to the union below if you need | ||
31 | * different means to detect an affected CPU. | ||
32 | */ | ||
33 | struct arm64_cpu_capabilities { | ||
34 | const char *desc; | ||
35 | u16 capability; | ||
36 | bool (*is_affected)(struct arm64_cpu_capabilities *); | ||
37 | union { | ||
38 | struct { | ||
39 | u32 midr_model; | ||
40 | u32 midr_range_min, midr_range_max; | ||
41 | }; | ||
42 | }; | ||
43 | }; | ||
44 | |||
45 | #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ | 27 | #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ |
46 | MIDR_ARCHITECTURE_MASK) | 28 | MIDR_ARCHITECTURE_MASK) |
47 | 29 | ||
48 | static bool __maybe_unused | 30 | static bool __maybe_unused |
49 | is_affected_midr_range(struct arm64_cpu_capabilities *entry) | 31 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry) |
50 | { | 32 | { |
51 | u32 midr = read_cpuid_id(); | 33 | u32 midr = read_cpuid_id(); |
52 | 34 | ||
@@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry) | |||
59 | } | 41 | } |
60 | 42 | ||
61 | #define MIDR_RANGE(model, min, max) \ | 43 | #define MIDR_RANGE(model, min, max) \ |
62 | .is_affected = is_affected_midr_range, \ | 44 | .matches = is_affected_midr_range, \ |
63 | .midr_model = model, \ | 45 | .midr_model = model, \ |
64 | .midr_range_min = min, \ | 46 | .midr_range_min = min, \ |
65 | .midr_range_max = max | 47 | .midr_range_max = max |
66 | 48 | ||
67 | struct arm64_cpu_capabilities arm64_errata[] = { | 49 | const struct arm64_cpu_capabilities arm64_errata[] = { |
68 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ | 50 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
69 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | 51 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
70 | defined(CONFIG_ARM64_ERRATUM_824069) | 52 | defined(CONFIG_ARM64_ERRATUM_824069) |
@@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = { | |||
88 | /* Cortex-A57 r0p0 - r1p2 */ | 70 | /* Cortex-A57 r0p0 - r1p2 */ |
89 | .desc = "ARM erratum 832075", | 71 | .desc = "ARM erratum 832075", |
90 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | 72 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, |
91 | MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), | 73 | MIDR_RANGE(MIDR_CORTEX_A57, 0x00, |
74 | (1 << MIDR_VARIANT_SHIFT) | 2), | ||
75 | }, | ||
76 | #endif | ||
77 | #ifdef CONFIG_ARM64_ERRATUM_845719 | ||
78 | { | ||
79 | /* Cortex-A53 r0p[01234] */ | ||
80 | .desc = "ARM erratum 845719", | ||
81 | .capability = ARM64_WORKAROUND_845719, | ||
82 | MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), | ||
92 | }, | 83 | }, |
93 | #endif | 84 | #endif |
94 | { | 85 | { |
@@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = { | |||
97 | 88 | ||
98 | void check_local_cpu_errata(void) | 89 | void check_local_cpu_errata(void) |
99 | { | 90 | { |
100 | struct arm64_cpu_capabilities *cpus = arm64_errata; | 91 | check_cpu_capabilities(arm64_errata, "enabling workaround for"); |
101 | int i; | ||
102 | |||
103 | for (i = 0; cpus[i].desc; i++) { | ||
104 | if (!cpus[i].is_affected(&cpus[i])) | ||
105 | continue; | ||
106 | |||
107 | if (!cpus_have_cap(cpus[i].capability)) | ||
108 | pr_info("enabling workaround for %s\n", cpus[i].desc); | ||
109 | cpus_set_cap(cpus[i].capability); | ||
110 | } | ||
111 | } | 92 | } |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c new file mode 100644 index 000000000000..3d9967e43d89 --- /dev/null +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Contains CPU feature definitions | ||
3 | * | ||
4 | * Copyright (C) 2015 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #define pr_fmt(fmt) "alternatives: " fmt | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <asm/cpu.h> | ||
23 | #include <asm/cpufeature.h> | ||
24 | |||
25 | static const struct arm64_cpu_capabilities arm64_features[] = { | ||
26 | {}, | ||
27 | }; | ||
28 | |||
29 | void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, | ||
30 | const char *info) | ||
31 | { | ||
32 | int i; | ||
33 | |||
34 | for (i = 0; caps[i].desc; i++) { | ||
35 | if (!caps[i].matches(&caps[i])) | ||
36 | continue; | ||
37 | |||
38 | if (!cpus_have_cap(caps[i].capability)) | ||
39 | pr_info("%s %s\n", info, caps[i].desc); | ||
40 | cpus_set_cap(caps[i].capability); | ||
41 | } | ||
42 | } | ||
43 | |||
44 | void check_local_cpu_features(void) | ||
45 | { | ||
46 | check_cpu_capabilities(arm64_features, "detected feature"); | ||
47 | } | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 929855691dae..75d5a867e7fb 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | |||
236 | cpuinfo_detect_icache_policy(info); | 236 | cpuinfo_detect_icache_policy(info); |
237 | 237 | ||
238 | check_local_cpu_errata(); | 238 | check_local_cpu_errata(); |
239 | check_local_cpu_features(); | ||
239 | update_cpu_features(info); | 240 | update_cpu_features(info); |
240 | } | 241 | } |
241 | 242 | ||
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c deleted file mode 100644 index fd3993cb060f..000000000000 --- a/arch/arm64/kernel/cputable.c +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm64/kernel/cputable.c | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software: you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | |||
21 | #include <asm/cputable.h> | ||
22 | |||
23 | extern unsigned long __cpu_setup(void); | ||
24 | |||
25 | struct cpu_info cpu_table[] = { | ||
26 | { | ||
27 | .cpu_id_val = 0x000f0000, | ||
28 | .cpu_id_mask = 0x000f0000, | ||
29 | .cpu_name = "AArch64 Processor", | ||
30 | .cpu_setup = __cpu_setup, | ||
31 | }, | ||
32 | { /* Empty */ }, | ||
33 | }; | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cf21bb3bf752..959fe8733560 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -21,8 +21,10 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/linkage.h> | 22 | #include <linux/linkage.h> |
23 | 23 | ||
24 | #include <asm/alternative-asm.h> | ||
24 | #include <asm/assembler.h> | 25 | #include <asm/assembler.h> |
25 | #include <asm/asm-offsets.h> | 26 | #include <asm/asm-offsets.h> |
27 | #include <asm/cpufeature.h> | ||
26 | #include <asm/errno.h> | 28 | #include <asm/errno.h> |
27 | #include <asm/esr.h> | 29 | #include <asm/esr.h> |
28 | #include <asm/thread_info.h> | 30 | #include <asm/thread_info.h> |
@@ -120,6 +122,24 @@ | |||
120 | ct_user_enter | 122 | ct_user_enter |
121 | ldr x23, [sp, #S_SP] // load return stack pointer | 123 | ldr x23, [sp, #S_SP] // load return stack pointer |
122 | msr sp_el0, x23 | 124 | msr sp_el0, x23 |
125 | |||
126 | #ifdef CONFIG_ARM64_ERRATUM_845719 | ||
127 | alternative_insn \ | ||
128 | "nop", \ | ||
129 | "tbz x22, #4, 1f", \ | ||
130 | ARM64_WORKAROUND_845719 | ||
131 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
132 | alternative_insn \ | ||
133 | "nop; nop", \ | ||
134 | "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ | ||
135 | ARM64_WORKAROUND_845719 | ||
136 | #else | ||
137 | alternative_insn \ | ||
138 | "nop", \ | ||
139 | "msr contextidr_el1, xzr; 1:", \ | ||
140 | ARM64_WORKAROUND_845719 | ||
141 | #endif | ||
142 | #endif | ||
123 | .endif | 143 | .endif |
124 | msr elr_el1, x21 // set up the return data | 144 | msr elr_el1, x21 // set up the return data |
125 | msr spsr_el1, x22 | 145 | msr spsr_el1, x22 |
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S index 9a8f6ae2530e..bd9bfaa9269b 100644 --- a/arch/arm64/kernel/entry32.S +++ b/arch/arm64/kernel/entry32.S | |||
@@ -19,9 +19,12 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/linkage.h> | 21 | #include <linux/linkage.h> |
22 | #include <linux/const.h> | ||
22 | 23 | ||
23 | #include <asm/assembler.h> | 24 | #include <asm/assembler.h> |
24 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | #include <asm/errno.h> | ||
27 | #include <asm/page.h> | ||
25 | 28 | ||
26 | /* | 29 | /* |
27 | * System call wrappers for the AArch32 compatibility layer. | 30 | * System call wrappers for the AArch32 compatibility layer. |
@@ -54,6 +57,21 @@ ENTRY(compat_sys_fstatfs64_wrapper) | |||
54 | ENDPROC(compat_sys_fstatfs64_wrapper) | 57 | ENDPROC(compat_sys_fstatfs64_wrapper) |
55 | 58 | ||
56 | /* | 59 | /* |
60 | * Note: off_4k (w5) is always in units of 4K. If we can't do the | ||
61 | * requested offset because it is not page-aligned, we return -EINVAL. | ||
62 | */ | ||
63 | ENTRY(compat_sys_mmap2_wrapper) | ||
64 | #if PAGE_SHIFT > 12 | ||
65 | tst w5, #~PAGE_MASK >> 12 | ||
66 | b.ne 1f | ||
67 | lsr w5, w5, #PAGE_SHIFT - 12 | ||
68 | #endif | ||
69 | b sys_mmap_pgoff | ||
70 | 1: mov x0, #-EINVAL | ||
71 | ret | ||
72 | ENDPROC(compat_sys_mmap2_wrapper) | ||
73 | |||
74 | /* | ||
57 | * Wrappers for AArch32 syscalls that either take 64-bit parameters | 75 | * Wrappers for AArch32 syscalls that either take 64-bit parameters |
58 | * in registers or that take 32-bit parameters which require sign | 76 | * in registers or that take 32-bit parameters which require sign |
59 | * extension. | 77 | * extension. |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 07f930540f4a..19f915e8f6e0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -36,7 +36,7 @@ | |||
36 | #include <asm/page.h> | 36 | #include <asm/page.h> |
37 | #include <asm/virt.h> | 37 | #include <asm/virt.h> |
38 | 38 | ||
39 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) | 39 | #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
40 | 40 | ||
41 | #if (TEXT_OFFSET & 0xfff) != 0 | 41 | #if (TEXT_OFFSET & 0xfff) != 0 |
42 | #error TEXT_OFFSET must be at least 4KB aligned | 42 | #error TEXT_OFFSET must be at least 4KB aligned |
@@ -46,13 +46,6 @@ | |||
46 | #error TEXT_OFFSET must be less than 2MB | 46 | #error TEXT_OFFSET must be less than 2MB |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | .macro pgtbl, ttb0, ttb1, virt_to_phys | ||
50 | ldr \ttb1, =swapper_pg_dir | ||
51 | ldr \ttb0, =idmap_pg_dir | ||
52 | add \ttb1, \ttb1, \virt_to_phys | ||
53 | add \ttb0, \ttb0, \virt_to_phys | ||
54 | .endm | ||
55 | |||
56 | #ifdef CONFIG_ARM64_64K_PAGES | 49 | #ifdef CONFIG_ARM64_64K_PAGES |
57 | #define BLOCK_SHIFT PAGE_SHIFT | 50 | #define BLOCK_SHIFT PAGE_SHIFT |
58 | #define BLOCK_SIZE PAGE_SIZE | 51 | #define BLOCK_SIZE PAGE_SIZE |
@@ -63,7 +56,7 @@ | |||
63 | #define TABLE_SHIFT PUD_SHIFT | 56 | #define TABLE_SHIFT PUD_SHIFT |
64 | #endif | 57 | #endif |
65 | 58 | ||
66 | #define KERNEL_START KERNEL_RAM_VADDR | 59 | #define KERNEL_START _text |
67 | #define KERNEL_END _end | 60 | #define KERNEL_END _end |
68 | 61 | ||
69 | /* | 62 | /* |
@@ -240,40 +233,43 @@ section_table: | |||
240 | #endif | 233 | #endif |
241 | 234 | ||
242 | ENTRY(stext) | 235 | ENTRY(stext) |
243 | mov x21, x0 // x21=FDT | 236 | bl preserve_boot_args |
244 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode | 237 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode |
245 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | 238 | adrp x24, __PHYS_OFFSET |
246 | bl set_cpu_boot_mode_flag | 239 | bl set_cpu_boot_mode_flag |
247 | mrs x22, midr_el1 // x22=cpuid | 240 | |
248 | mov x0, x22 | ||
249 | bl lookup_processor_type | ||
250 | mov x23, x0 // x23=current cpu_table | ||
251 | /* | ||
252 | * __error_p may end up out of range for cbz if text areas are | ||
253 | * aligned up to section sizes. | ||
254 | */ | ||
255 | cbnz x23, 1f // invalid processor (x23=0)? | ||
256 | b __error_p | ||
257 | 1: | ||
258 | bl __vet_fdt | 241 | bl __vet_fdt |
259 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 | 242 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 |
260 | /* | 243 | /* |
261 | * The following calls CPU specific code in a position independent | 244 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
262 | * manner. See arch/arm64/mm/proc.S for details. x23 = base of | 245 | * details. |
263 | * cpu_info structure selected by lookup_processor_type above. | ||
264 | * On return, the CPU will be ready for the MMU to be turned on and | 246 | * On return, the CPU will be ready for the MMU to be turned on and |
265 | * the TCR will have been set. | 247 | * the TCR will have been set. |
266 | */ | 248 | */ |
267 | ldr x27, __switch_data // address to jump to after | 249 | ldr x27, =__mmap_switched // address to jump to after |
268 | // MMU has been enabled | 250 | // MMU has been enabled |
269 | adrp lr, __enable_mmu // return (PIC) address | 251 | adr_l lr, __enable_mmu // return (PIC) address |
270 | add lr, lr, #:lo12:__enable_mmu | 252 | b __cpu_setup // initialise processor |
271 | ldr x12, [x23, #CPU_INFO_SETUP] | ||
272 | add x12, x12, x28 // __virt_to_phys | ||
273 | br x12 // initialise processor | ||
274 | ENDPROC(stext) | 253 | ENDPROC(stext) |
275 | 254 | ||
276 | /* | 255 | /* |
256 | * Preserve the arguments passed by the bootloader in x0 .. x3 | ||
257 | */ | ||
258 | preserve_boot_args: | ||
259 | mov x21, x0 // x21=FDT | ||
260 | |||
261 | adr_l x0, boot_args // record the contents of | ||
262 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | ||
263 | stp x2, x3, [x0, #16] | ||
264 | |||
265 | dmb sy // needed before dc ivac with | ||
266 | // MMU off | ||
267 | |||
268 | add x1, x0, #0x20 // 4 x 8 bytes | ||
269 | b __inval_cache_range // tail call | ||
270 | ENDPROC(preserve_boot_args) | ||
271 | |||
272 | /* | ||
277 | * Determine validity of the x21 FDT pointer. | 273 | * Determine validity of the x21 FDT pointer. |
278 | * The dtb must be 8-byte aligned and live in the first 512M of memory. | 274 | * The dtb must be 8-byte aligned and live in the first 512M of memory. |
279 | */ | 275 | */ |
@@ -356,7 +352,8 @@ ENDPROC(__vet_fdt) | |||
356 | * - pgd entry for fixed mappings (TTBR1) | 352 | * - pgd entry for fixed mappings (TTBR1) |
357 | */ | 353 | */ |
358 | __create_page_tables: | 354 | __create_page_tables: |
359 | pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses | 355 | adrp x25, idmap_pg_dir |
356 | adrp x26, swapper_pg_dir | ||
360 | mov x27, lr | 357 | mov x27, lr |
361 | 358 | ||
362 | /* | 359 | /* |
@@ -385,12 +382,50 @@ __create_page_tables: | |||
385 | * Create the identity mapping. | 382 | * Create the identity mapping. |
386 | */ | 383 | */ |
387 | mov x0, x25 // idmap_pg_dir | 384 | mov x0, x25 // idmap_pg_dir |
388 | ldr x3, =KERNEL_START | 385 | adrp x3, KERNEL_START // __pa(KERNEL_START) |
389 | add x3, x3, x28 // __pa(KERNEL_START) | 386 | |
387 | #ifndef CONFIG_ARM64_VA_BITS_48 | ||
388 | #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) | ||
389 | #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) | ||
390 | |||
391 | /* | ||
392 | * If VA_BITS < 48, it may be too small to allow for an ID mapping to be | ||
393 | * created that covers system RAM if that is located sufficiently high | ||
394 | * in the physical address space. So for the ID map, use an extended | ||
395 | * virtual range in that case, by configuring an additional translation | ||
396 | * level. | ||
397 | * First, we have to verify our assumption that the current value of | ||
398 | * VA_BITS was chosen such that all translation levels are fully | ||
399 | * utilised, and that lowering T0SZ will always result in an additional | ||
400 | * translation level to be configured. | ||
401 | */ | ||
402 | #if VA_BITS != EXTRA_SHIFT | ||
403 | #error "Mismatch between VA_BITS and page size/number of translation levels" | ||
404 | #endif | ||
405 | |||
406 | /* | ||
407 | * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the | ||
408 | * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), | ||
409 | * this number conveniently equals the number of leading zeroes in | ||
410 | * the physical address of KERNEL_END. | ||
411 | */ | ||
412 | adrp x5, KERNEL_END | ||
413 | clz x5, x5 | ||
414 | cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? | ||
415 | b.ge 1f // .. then skip additional level | ||
416 | |||
417 | adr_l x6, idmap_t0sz | ||
418 | str x5, [x6] | ||
419 | dmb sy | ||
420 | dc ivac, x6 // Invalidate potentially stale cache line | ||
421 | |||
422 | create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 | ||
423 | 1: | ||
424 | #endif | ||
425 | |||
390 | create_pgd_entry x0, x3, x5, x6 | 426 | create_pgd_entry x0, x3, x5, x6 |
391 | ldr x6, =KERNEL_END | ||
392 | mov x5, x3 // __pa(KERNEL_START) | 427 | mov x5, x3 // __pa(KERNEL_START) |
393 | add x6, x6, x28 // __pa(KERNEL_END) | 428 | adr_l x6, KERNEL_END // __pa(KERNEL_END) |
394 | create_block_map x0, x7, x3, x5, x6 | 429 | create_block_map x0, x7, x3, x5, x6 |
395 | 430 | ||
396 | /* | 431 | /* |
@@ -399,7 +434,7 @@ __create_page_tables: | |||
399 | mov x0, x26 // swapper_pg_dir | 434 | mov x0, x26 // swapper_pg_dir |
400 | mov x5, #PAGE_OFFSET | 435 | mov x5, #PAGE_OFFSET |
401 | create_pgd_entry x0, x5, x3, x6 | 436 | create_pgd_entry x0, x5, x3, x6 |
402 | ldr x6, =KERNEL_END | 437 | ldr x6, =KERNEL_END // __va(KERNEL_END) |
403 | mov x3, x24 // phys offset | 438 | mov x3, x24 // phys offset |
404 | create_block_map x0, x7, x3, x5, x6 | 439 | create_block_map x0, x7, x3, x5, x6 |
405 | 440 | ||
@@ -426,6 +461,7 @@ __create_page_tables: | |||
426 | */ | 461 | */ |
427 | mov x0, x25 | 462 | mov x0, x25 |
428 | add x1, x26, #SWAPPER_DIR_SIZE | 463 | add x1, x26, #SWAPPER_DIR_SIZE |
464 | dmb sy | ||
429 | bl __inval_cache_range | 465 | bl __inval_cache_range |
430 | 466 | ||
431 | mov lr, x27 | 467 | mov lr, x27 |
@@ -433,37 +469,22 @@ __create_page_tables: | |||
433 | ENDPROC(__create_page_tables) | 469 | ENDPROC(__create_page_tables) |
434 | .ltorg | 470 | .ltorg |
435 | 471 | ||
436 | .align 3 | ||
437 | .type __switch_data, %object | ||
438 | __switch_data: | ||
439 | .quad __mmap_switched | ||
440 | .quad __bss_start // x6 | ||
441 | .quad __bss_stop // x7 | ||
442 | .quad processor_id // x4 | ||
443 | .quad __fdt_pointer // x5 | ||
444 | .quad memstart_addr // x6 | ||
445 | .quad init_thread_union + THREAD_START_SP // sp | ||
446 | |||
447 | /* | 472 | /* |
448 | * The following fragment of code is executed with the MMU on in MMU mode, and | 473 | * The following fragment of code is executed with the MMU enabled. |
449 | * uses absolute addresses; this is not position independent. | ||
450 | */ | 474 | */ |
475 | .set initial_sp, init_thread_union + THREAD_START_SP | ||
451 | __mmap_switched: | 476 | __mmap_switched: |
452 | adr x3, __switch_data + 8 | 477 | adr_l x6, __bss_start |
478 | adr_l x7, __bss_stop | ||
453 | 479 | ||
454 | ldp x6, x7, [x3], #16 | ||
455 | 1: cmp x6, x7 | 480 | 1: cmp x6, x7 |
456 | b.hs 2f | 481 | b.hs 2f |
457 | str xzr, [x6], #8 // Clear BSS | 482 | str xzr, [x6], #8 // Clear BSS |
458 | b 1b | 483 | b 1b |
459 | 2: | 484 | 2: |
460 | ldp x4, x5, [x3], #16 | 485 | adr_l sp, initial_sp, x4 |
461 | ldr x6, [x3], #8 | 486 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
462 | ldr x16, [x3] | 487 | str_l x24, memstart_addr, x6 // Save PHYS_OFFSET |
463 | mov sp, x16 | ||
464 | str x22, [x4] // Save processor ID | ||
465 | str x21, [x5] // Save FDT pointer | ||
466 | str x24, [x6] // Save PHYS_OFFSET | ||
467 | mov x29, #0 | 488 | mov x29, #0 |
468 | b start_kernel | 489 | b start_kernel |
469 | ENDPROC(__mmap_switched) | 490 | ENDPROC(__mmap_switched) |
@@ -566,8 +587,7 @@ ENDPROC(el2_setup) | |||
566 | * in x20. See arch/arm64/include/asm/virt.h for more info. | 587 | * in x20. See arch/arm64/include/asm/virt.h for more info. |
567 | */ | 588 | */ |
568 | ENTRY(set_cpu_boot_mode_flag) | 589 | ENTRY(set_cpu_boot_mode_flag) |
569 | ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode | 590 | adr_l x1, __boot_cpu_mode |
570 | add x1, x1, x28 | ||
571 | cmp w20, #BOOT_CPU_MODE_EL2 | 591 | cmp w20, #BOOT_CPU_MODE_EL2 |
572 | b.ne 1f | 592 | b.ne 1f |
573 | add x1, x1, #4 | 593 | add x1, x1, #4 |
@@ -588,29 +608,21 @@ ENDPROC(set_cpu_boot_mode_flag) | |||
588 | .align L1_CACHE_SHIFT | 608 | .align L1_CACHE_SHIFT |
589 | ENTRY(__boot_cpu_mode) | 609 | ENTRY(__boot_cpu_mode) |
590 | .long BOOT_CPU_MODE_EL2 | 610 | .long BOOT_CPU_MODE_EL2 |
591 | .long 0 | 611 | .long BOOT_CPU_MODE_EL1 |
592 | .popsection | 612 | .popsection |
593 | 613 | ||
594 | #ifdef CONFIG_SMP | 614 | #ifdef CONFIG_SMP |
595 | .align 3 | ||
596 | 1: .quad . | ||
597 | .quad secondary_holding_pen_release | ||
598 | |||
599 | /* | 615 | /* |
600 | * This provides a "holding pen" for platforms to hold all secondary | 616 | * This provides a "holding pen" for platforms to hold all secondary |
601 | * cores are held until we're ready for them to initialise. | 617 | * cores are held until we're ready for them to initialise. |
602 | */ | 618 | */ |
603 | ENTRY(secondary_holding_pen) | 619 | ENTRY(secondary_holding_pen) |
604 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode | 620 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode |
605 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | ||
606 | bl set_cpu_boot_mode_flag | 621 | bl set_cpu_boot_mode_flag |
607 | mrs x0, mpidr_el1 | 622 | mrs x0, mpidr_el1 |
608 | ldr x1, =MPIDR_HWID_BITMASK | 623 | ldr x1, =MPIDR_HWID_BITMASK |
609 | and x0, x0, x1 | 624 | and x0, x0, x1 |
610 | adr x1, 1b | 625 | adr_l x3, secondary_holding_pen_release |
611 | ldp x2, x3, [x1] | ||
612 | sub x1, x1, x2 | ||
613 | add x3, x3, x1 | ||
614 | pen: ldr x4, [x3] | 626 | pen: ldr x4, [x3] |
615 | cmp x4, x0 | 627 | cmp x4, x0 |
616 | b.eq secondary_startup | 628 | b.eq secondary_startup |
@@ -624,7 +636,6 @@ ENDPROC(secondary_holding_pen) | |||
624 | */ | 636 | */ |
625 | ENTRY(secondary_entry) | 637 | ENTRY(secondary_entry) |
626 | bl el2_setup // Drop to EL1 | 638 | bl el2_setup // Drop to EL1 |
627 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | ||
628 | bl set_cpu_boot_mode_flag | 639 | bl set_cpu_boot_mode_flag |
629 | b secondary_startup | 640 | b secondary_startup |
630 | ENDPROC(secondary_entry) | 641 | ENDPROC(secondary_entry) |
@@ -633,16 +644,9 @@ ENTRY(secondary_startup) | |||
633 | /* | 644 | /* |
634 | * Common entry point for secondary CPUs. | 645 | * Common entry point for secondary CPUs. |
635 | */ | 646 | */ |
636 | mrs x22, midr_el1 // x22=cpuid | 647 | adrp x25, idmap_pg_dir |
637 | mov x0, x22 | 648 | adrp x26, swapper_pg_dir |
638 | bl lookup_processor_type | 649 | bl __cpu_setup // initialise processor |
639 | mov x23, x0 // x23=current cpu_table | ||
640 | cbz x23, __error_p // invalid processor (x23=0)? | ||
641 | |||
642 | pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 | ||
643 | ldr x12, [x23, #CPU_INFO_SETUP] | ||
644 | add x12, x12, x28 // __virt_to_phys | ||
645 | blr x12 // initialise processor | ||
646 | 650 | ||
647 | ldr x21, =secondary_data | 651 | ldr x21, =secondary_data |
648 | ldr x27, =__secondary_switched // address to jump to after enabling the MMU | 652 | ldr x27, =__secondary_switched // address to jump to after enabling the MMU |
@@ -658,11 +662,12 @@ ENDPROC(__secondary_switched) | |||
658 | #endif /* CONFIG_SMP */ | 662 | #endif /* CONFIG_SMP */ |
659 | 663 | ||
660 | /* | 664 | /* |
661 | * Setup common bits before finally enabling the MMU. Essentially this is just | 665 | * Enable the MMU. |
662 | * loading the page table pointer and vector base registers. | ||
663 | * | 666 | * |
664 | * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on | 667 | * x0 = SCTLR_EL1 value for turning on the MMU. |
665 | * the MMU. | 668 | * x27 = *virtual* address to jump to upon completion |
669 | * | ||
670 | * other registers depend on the function called upon completion | ||
666 | */ | 671 | */ |
667 | __enable_mmu: | 672 | __enable_mmu: |
668 | ldr x5, =vectors | 673 | ldr x5, =vectors |
@@ -670,89 +675,7 @@ __enable_mmu: | |||
670 | msr ttbr0_el1, x25 // load TTBR0 | 675 | msr ttbr0_el1, x25 // load TTBR0 |
671 | msr ttbr1_el1, x26 // load TTBR1 | 676 | msr ttbr1_el1, x26 // load TTBR1 |
672 | isb | 677 | isb |
673 | b __turn_mmu_on | ||
674 | ENDPROC(__enable_mmu) | ||
675 | |||
676 | /* | ||
677 | * Enable the MMU. This completely changes the structure of the visible memory | ||
678 | * space. You will not be able to trace execution through this. | ||
679 | * | ||
680 | * x0 = system control register | ||
681 | * x27 = *virtual* address to jump to upon completion | ||
682 | * | ||
683 | * other registers depend on the function called upon completion | ||
684 | * | ||
685 | * We align the entire function to the smallest power of two larger than it to | ||
686 | * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET | ||
687 | * close to the end of a 512MB or 1GB block we might require an additional | ||
688 | * table to map the entire function. | ||
689 | */ | ||
690 | .align 4 | ||
691 | __turn_mmu_on: | ||
692 | msr sctlr_el1, x0 | 678 | msr sctlr_el1, x0 |
693 | isb | 679 | isb |
694 | br x27 | 680 | br x27 |
695 | ENDPROC(__turn_mmu_on) | 681 | ENDPROC(__enable_mmu) |
696 | |||
697 | /* | ||
698 | * Calculate the start of physical memory. | ||
699 | */ | ||
700 | __calc_phys_offset: | ||
701 | adr x0, 1f | ||
702 | ldp x1, x2, [x0] | ||
703 | sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET | ||
704 | add x24, x2, x28 // x24 = PHYS_OFFSET | ||
705 | ret | ||
706 | ENDPROC(__calc_phys_offset) | ||
707 | |||
708 | .align 3 | ||
709 | 1: .quad . | ||
710 | .quad PAGE_OFFSET | ||
711 | |||
712 | /* | ||
713 | * Exception handling. Something went wrong and we can't proceed. We ought to | ||
714 | * tell the user, but since we don't have any guarantee that we're even | ||
715 | * running on the right architecture, we do virtually nothing. | ||
716 | */ | ||
717 | __error_p: | ||
718 | ENDPROC(__error_p) | ||
719 | |||
720 | __error: | ||
721 | 1: nop | ||
722 | b 1b | ||
723 | ENDPROC(__error) | ||
724 | |||
725 | /* | ||
726 | * This function gets the processor ID in w0 and searches the cpu_table[] for | ||
727 | * a match. It returns a pointer to the struct cpu_info it found. The | ||
728 | * cpu_table[] must end with an empty (all zeros) structure. | ||
729 | * | ||
730 | * This routine can be called via C code and it needs to work with the MMU | ||
731 | * both disabled and enabled (the offset is calculated automatically). | ||
732 | */ | ||
733 | ENTRY(lookup_processor_type) | ||
734 | adr x1, __lookup_processor_type_data | ||
735 | ldp x2, x3, [x1] | ||
736 | sub x1, x1, x2 // get offset between VA and PA | ||
737 | add x3, x3, x1 // convert VA to PA | ||
738 | 1: | ||
739 | ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask | ||
740 | cbz w5, 2f // end of list? | ||
741 | and w6, w6, w0 | ||
742 | cmp w5, w6 | ||
743 | b.eq 3f | ||
744 | add x3, x3, #CPU_INFO_SZ | ||
745 | b 1b | ||
746 | 2: | ||
747 | mov x3, #0 // unknown processor | ||
748 | 3: | ||
749 | mov x0, x3 | ||
750 | ret | ||
751 | ENDPROC(lookup_processor_type) | ||
752 | |||
753 | .align 3 | ||
754 | .type __lookup_processor_type_data, %object | ||
755 | __lookup_processor_type_data: | ||
756 | .quad . | ||
757 | .quad cpu_table | ||
758 | .size __lookup_processor_type_data, . - __lookup_processor_type_data | ||
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index c8eca88f12e6..924902083e47 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -265,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) | |||
265 | return aarch64_insn_patch_text_sync(addrs, insns, cnt); | 265 | return aarch64_insn_patch_text_sync(addrs, insns, cnt); |
266 | } | 266 | } |
267 | 267 | ||
268 | u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | 268 | static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, |
269 | u32 insn, u64 imm) | 269 | u32 *maskp, int *shiftp) |
270 | { | 270 | { |
271 | u32 immlo, immhi, lomask, himask, mask; | 271 | u32 mask; |
272 | int shift; | 272 | int shift; |
273 | 273 | ||
274 | switch (type) { | 274 | switch (type) { |
275 | case AARCH64_INSN_IMM_ADR: | ||
276 | lomask = 0x3; | ||
277 | himask = 0x7ffff; | ||
278 | immlo = imm & lomask; | ||
279 | imm >>= 2; | ||
280 | immhi = imm & himask; | ||
281 | imm = (immlo << 24) | (immhi); | ||
282 | mask = (lomask << 24) | (himask); | ||
283 | shift = 5; | ||
284 | break; | ||
285 | case AARCH64_INSN_IMM_26: | 275 | case AARCH64_INSN_IMM_26: |
286 | mask = BIT(26) - 1; | 276 | mask = BIT(26) - 1; |
287 | shift = 0; | 277 | shift = 0; |
@@ -320,9 +310,68 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | |||
320 | shift = 16; | 310 | shift = 16; |
321 | break; | 311 | break; |
322 | default: | 312 | default: |
323 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | 313 | return -EINVAL; |
324 | type); | 314 | } |
325 | return 0; | 315 | |
316 | *maskp = mask; | ||
317 | *shiftp = shift; | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | #define ADR_IMM_HILOSPLIT 2 | ||
323 | #define ADR_IMM_SIZE SZ_2M | ||
324 | #define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) | ||
325 | #define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) | ||
326 | #define ADR_IMM_LOSHIFT 29 | ||
327 | #define ADR_IMM_HISHIFT 5 | ||
328 | |||
329 | u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) | ||
330 | { | ||
331 | u32 immlo, immhi, mask; | ||
332 | int shift; | ||
333 | |||
334 | switch (type) { | ||
335 | case AARCH64_INSN_IMM_ADR: | ||
336 | shift = 0; | ||
337 | immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; | ||
338 | immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; | ||
339 | insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; | ||
340 | mask = ADR_IMM_SIZE - 1; | ||
341 | break; | ||
342 | default: | ||
343 | if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { | ||
344 | pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", | ||
345 | type); | ||
346 | return 0; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | return (insn >> shift) & mask; | ||
351 | } | ||
352 | |||
353 | u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | ||
354 | u32 insn, u64 imm) | ||
355 | { | ||
356 | u32 immlo, immhi, mask; | ||
357 | int shift; | ||
358 | |||
359 | switch (type) { | ||
360 | case AARCH64_INSN_IMM_ADR: | ||
361 | shift = 0; | ||
362 | immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; | ||
363 | imm >>= ADR_IMM_HILOSPLIT; | ||
364 | immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; | ||
365 | imm = immlo | immhi; | ||
366 | mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | | ||
367 | (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); | ||
368 | break; | ||
369 | default: | ||
370 | if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { | ||
371 | pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", | ||
372 | type); | ||
373 | return 0; | ||
374 | } | ||
326 | } | 375 | } |
327 | 376 | ||
328 | /* Update the immediate field. */ | 377 | /* Update the immediate field. */ |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 25a5308744b1..195991dadc37 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -25,8 +25,10 @@ | |||
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/export.h> | 27 | #include <linux/export.h> |
28 | #include <linux/of.h> | ||
28 | #include <linux/perf_event.h> | 29 | #include <linux/perf_event.h> |
29 | #include <linux/platform_device.h> | 30 | #include <linux/platform_device.h> |
31 | #include <linux/slab.h> | ||
30 | #include <linux/spinlock.h> | 32 | #include <linux/spinlock.h> |
31 | #include <linux/uaccess.h> | 33 | #include <linux/uaccess.h> |
32 | 34 | ||
@@ -322,22 +324,31 @@ out: | |||
322 | } | 324 | } |
323 | 325 | ||
324 | static int | 326 | static int |
325 | validate_event(struct pmu_hw_events *hw_events, | 327 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
326 | struct perf_event *event) | 328 | struct perf_event *event) |
327 | { | 329 | { |
328 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 330 | struct arm_pmu *armpmu; |
329 | struct hw_perf_event fake_event = event->hw; | 331 | struct hw_perf_event fake_event = event->hw; |
330 | struct pmu *leader_pmu = event->group_leader->pmu; | 332 | struct pmu *leader_pmu = event->group_leader->pmu; |
331 | 333 | ||
332 | if (is_software_event(event)) | 334 | if (is_software_event(event)) |
333 | return 1; | 335 | return 1; |
334 | 336 | ||
337 | /* | ||
338 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | ||
339 | * core perf code won't check that the pmu->ctx == leader->ctx | ||
340 | * until after pmu->event_init(event). | ||
341 | */ | ||
342 | if (event->pmu != pmu) | ||
343 | return 0; | ||
344 | |||
335 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 345 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
336 | return 1; | 346 | return 1; |
337 | 347 | ||
338 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | 348 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) |
339 | return 1; | 349 | return 1; |
340 | 350 | ||
351 | armpmu = to_arm_pmu(event->pmu); | ||
341 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; | 352 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; |
342 | } | 353 | } |
343 | 354 | ||
@@ -355,15 +366,15 @@ validate_group(struct perf_event *event) | |||
355 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | 366 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); |
356 | fake_pmu.used_mask = fake_used_mask; | 367 | fake_pmu.used_mask = fake_used_mask; |
357 | 368 | ||
358 | if (!validate_event(&fake_pmu, leader)) | 369 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
359 | return -EINVAL; | 370 | return -EINVAL; |
360 | 371 | ||
361 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 372 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
362 | if (!validate_event(&fake_pmu, sibling)) | 373 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
363 | return -EINVAL; | 374 | return -EINVAL; |
364 | } | 375 | } |
365 | 376 | ||
366 | if (!validate_event(&fake_pmu, event)) | 377 | if (!validate_event(event->pmu, &fake_pmu, event)) |
367 | return -EINVAL; | 378 | return -EINVAL; |
368 | 379 | ||
369 | return 0; | 380 | return 0; |
@@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) | |||
396 | free_percpu_irq(irq, &cpu_hw_events); | 407 | free_percpu_irq(irq, &cpu_hw_events); |
397 | } else { | 408 | } else { |
398 | for (i = 0; i < irqs; ++i) { | 409 | for (i = 0; i < irqs; ++i) { |
399 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) | 410 | int cpu = i; |
411 | |||
412 | if (armpmu->irq_affinity) | ||
413 | cpu = armpmu->irq_affinity[i]; | ||
414 | |||
415 | if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) | ||
400 | continue; | 416 | continue; |
401 | irq = platform_get_irq(pmu_device, i); | 417 | irq = platform_get_irq(pmu_device, i); |
402 | if (irq > 0) | 418 | if (irq > 0) |
@@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
450 | on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); | 466 | on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); |
451 | } else { | 467 | } else { |
452 | for (i = 0; i < irqs; ++i) { | 468 | for (i = 0; i < irqs; ++i) { |
469 | int cpu = i; | ||
470 | |||
453 | err = 0; | 471 | err = 0; |
454 | irq = platform_get_irq(pmu_device, i); | 472 | irq = platform_get_irq(pmu_device, i); |
455 | if (irq <= 0) | 473 | if (irq <= 0) |
456 | continue; | 474 | continue; |
457 | 475 | ||
476 | if (armpmu->irq_affinity) | ||
477 | cpu = armpmu->irq_affinity[i]; | ||
478 | |||
458 | /* | 479 | /* |
459 | * If we have a single PMU interrupt that we can't shift, | 480 | * If we have a single PMU interrupt that we can't shift, |
460 | * assume that we're running on a uniprocessor machine and | 481 | * assume that we're running on a uniprocessor machine and |
461 | * continue. Otherwise, continue without this interrupt. | 482 | * continue. Otherwise, continue without this interrupt. |
462 | */ | 483 | */ |
463 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | 484 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
464 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | 485 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", |
465 | irq, i); | 486 | irq, cpu); |
466 | continue; | 487 | continue; |
467 | } | 488 | } |
468 | 489 | ||
@@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
476 | return err; | 497 | return err; |
477 | } | 498 | } |
478 | 499 | ||
479 | cpumask_set_cpu(i, &armpmu->active_irqs); | 500 | cpumask_set_cpu(cpu, &armpmu->active_irqs); |
480 | } | 501 | } |
481 | } | 502 | } |
482 | 503 | ||
@@ -1289,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = { | |||
1289 | 1310 | ||
1290 | static int armpmu_device_probe(struct platform_device *pdev) | 1311 | static int armpmu_device_probe(struct platform_device *pdev) |
1291 | { | 1312 | { |
1313 | int i, *irqs; | ||
1314 | |||
1292 | if (!cpu_pmu) | 1315 | if (!cpu_pmu) |
1293 | return -ENODEV; | 1316 | return -ENODEV; |
1294 | 1317 | ||
1318 | irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | ||
1319 | if (!irqs) | ||
1320 | return -ENOMEM; | ||
1321 | |||
1322 | for (i = 0; i < pdev->num_resources; ++i) { | ||
1323 | struct device_node *dn; | ||
1324 | int cpu; | ||
1325 | |||
1326 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", | ||
1327 | i); | ||
1328 | if (!dn) { | ||
1329 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | ||
1330 | of_node_full_name(dn), i); | ||
1331 | break; | ||
1332 | } | ||
1333 | |||
1334 | for_each_possible_cpu(cpu) | ||
1335 | if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) | ||
1336 | break; | ||
1337 | |||
1338 | of_node_put(dn); | ||
1339 | if (cpu >= nr_cpu_ids) { | ||
1340 | pr_warn("Failed to find logical CPU for %s\n", | ||
1341 | dn->name); | ||
1342 | break; | ||
1343 | } | ||
1344 | |||
1345 | irqs[i] = cpu; | ||
1346 | } | ||
1347 | |||
1348 | if (i == pdev->num_resources) | ||
1349 | cpu_pmu->irq_affinity = irqs; | ||
1350 | else | ||
1351 | kfree(irqs); | ||
1352 | |||
1295 | cpu_pmu->plat_device = pdev; | 1353 | cpu_pmu->plat_device = pdev; |
1296 | return 0; | 1354 | return 0; |
1297 | } | 1355 | } |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index e8420f635bd4..51ef97274b52 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <asm/cpu.h> | 50 | #include <asm/cpu.h> |
51 | #include <asm/cputype.h> | 51 | #include <asm/cputype.h> |
52 | #include <asm/elf.h> | 52 | #include <asm/elf.h> |
53 | #include <asm/cputable.h> | ||
54 | #include <asm/cpufeature.h> | 53 | #include <asm/cpufeature.h> |
55 | #include <asm/cpu_ops.h> | 54 | #include <asm/cpu_ops.h> |
56 | #include <asm/sections.h> | 55 | #include <asm/sections.h> |
@@ -62,9 +61,7 @@ | |||
62 | #include <asm/memblock.h> | 61 | #include <asm/memblock.h> |
63 | #include <asm/psci.h> | 62 | #include <asm/psci.h> |
64 | #include <asm/efi.h> | 63 | #include <asm/efi.h> |
65 | 64 | #include <asm/virt.h> | |
66 | unsigned int processor_id; | ||
67 | EXPORT_SYMBOL(processor_id); | ||
68 | 65 | ||
69 | unsigned long elf_hwcap __read_mostly; | 66 | unsigned long elf_hwcap __read_mostly; |
70 | EXPORT_SYMBOL_GPL(elf_hwcap); | 67 | EXPORT_SYMBOL_GPL(elf_hwcap); |
@@ -83,7 +80,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; | |||
83 | 80 | ||
84 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 81 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
85 | 82 | ||
86 | static const char *cpu_name; | ||
87 | phys_addr_t __fdt_pointer __initdata; | 83 | phys_addr_t __fdt_pointer __initdata; |
88 | 84 | ||
89 | /* | 85 | /* |
@@ -119,6 +115,11 @@ void __init early_print(const char *str, ...) | |||
119 | printk("%s", buf); | 115 | printk("%s", buf); |
120 | } | 116 | } |
121 | 117 | ||
118 | /* | ||
119 | * The recorded values of x0 .. x3 upon kernel entry. | ||
120 | */ | ||
121 | u64 __cacheline_aligned boot_args[4]; | ||
122 | |||
122 | void __init smp_setup_processor_id(void) | 123 | void __init smp_setup_processor_id(void) |
123 | { | 124 | { |
124 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; | 125 | u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
@@ -207,24 +208,38 @@ static void __init smp_build_mpidr_hash(void) | |||
207 | } | 208 | } |
208 | #endif | 209 | #endif |
209 | 210 | ||
211 | static void __init hyp_mode_check(void) | ||
212 | { | ||
213 | if (is_hyp_mode_available()) | ||
214 | pr_info("CPU: All CPU(s) started at EL2\n"); | ||
215 | else if (is_hyp_mode_mismatched()) | ||
216 | WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, | ||
217 | "CPU: CPUs started in inconsistent modes"); | ||
218 | else | ||
219 | pr_info("CPU: All CPU(s) started at EL1\n"); | ||
220 | } | ||
221 | |||
222 | void __init do_post_cpus_up_work(void) | ||
223 | { | ||
224 | hyp_mode_check(); | ||
225 | apply_alternatives_all(); | ||
226 | } | ||
227 | |||
228 | #ifdef CONFIG_UP_LATE_INIT | ||
229 | void __init up_late_init(void) | ||
230 | { | ||
231 | do_post_cpus_up_work(); | ||
232 | } | ||
233 | #endif /* CONFIG_UP_LATE_INIT */ | ||
234 | |||
210 | static void __init setup_processor(void) | 235 | static void __init setup_processor(void) |
211 | { | 236 | { |
212 | struct cpu_info *cpu_info; | ||
213 | u64 features, block; | 237 | u64 features, block; |
214 | u32 cwg; | 238 | u32 cwg; |
215 | int cls; | 239 | int cls; |
216 | 240 | ||
217 | cpu_info = lookup_processor_type(read_cpuid_id()); | 241 | printk("CPU: AArch64 Processor [%08x] revision %d\n", |
218 | if (!cpu_info) { | 242 | read_cpuid_id(), read_cpuid_id() & 15); |
219 | printk("CPU configuration botched (ID %08x), unable to continue.\n", | ||
220 | read_cpuid_id()); | ||
221 | while (1); | ||
222 | } | ||
223 | |||
224 | cpu_name = cpu_info->cpu_name; | ||
225 | |||
226 | printk("CPU: %s [%08x] revision %d\n", | ||
227 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15); | ||
228 | 243 | ||
229 | sprintf(init_utsname()->machine, ELF_PLATFORM); | 244 | sprintf(init_utsname()->machine, ELF_PLATFORM); |
230 | elf_hwcap = 0; | 245 | elf_hwcap = 0; |
@@ -402,6 +417,12 @@ void __init setup_arch(char **cmdline_p) | |||
402 | conswitchp = &dummy_con; | 417 | conswitchp = &dummy_con; |
403 | #endif | 418 | #endif |
404 | #endif | 419 | #endif |
420 | if (boot_args[1] || boot_args[2] || boot_args[3]) { | ||
421 | pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" | ||
422 | "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" | ||
423 | "This indicates a broken bootloader or old kernel\n", | ||
424 | boot_args[1], boot_args[2], boot_args[3]); | ||
425 | } | ||
405 | } | 426 | } |
406 | 427 | ||
407 | static int __init arm64_device_init(void) | 428 | static int __init arm64_device_init(void) |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 328b8ce4b007..ffe8e1b814e0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) | |||
151 | */ | 151 | */ |
152 | cpu_set_reserved_ttbr0(); | 152 | cpu_set_reserved_ttbr0(); |
153 | flush_tlb_all(); | 153 | flush_tlb_all(); |
154 | cpu_set_default_tcr_t0sz(); | ||
154 | 155 | ||
155 | preempt_disable(); | 156 | preempt_disable(); |
156 | trace_hardirqs_off(); | 157 | trace_hardirqs_off(); |
@@ -309,7 +310,7 @@ void cpu_die(void) | |||
309 | void __init smp_cpus_done(unsigned int max_cpus) | 310 | void __init smp_cpus_done(unsigned int max_cpus) |
310 | { | 311 | { |
311 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); | 312 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); |
312 | apply_alternatives_all(); | 313 | do_post_cpus_up_work(); |
313 | } | 314 | } |
314 | 315 | ||
315 | void __init smp_prepare_boot_cpu(void) | 316 | void __init smp_prepare_boot_cpu(void) |
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 2d5ab3c90b82..a40b1343b819 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c | |||
@@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void); | |||
37 | asmlinkage long compat_sys_fadvise64_64_wrapper(void); | 37 | asmlinkage long compat_sys_fadvise64_64_wrapper(void); |
38 | asmlinkage long compat_sys_sync_file_range2_wrapper(void); | 38 | asmlinkage long compat_sys_sync_file_range2_wrapper(void); |
39 | asmlinkage long compat_sys_fallocate_wrapper(void); | 39 | asmlinkage long compat_sys_fallocate_wrapper(void); |
40 | asmlinkage long compat_sys_mmap2_wrapper(void); | ||
40 | 41 | ||
41 | #undef __SYSCALL | 42 | #undef __SYSCALL |
42 | #define __SYSCALL(nr, sym) [nr] = sym, | 43 | #define __SYSCALL(nr, sym) [nr] = sym, |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5d9d2dca530d..a2c29865c3fe 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -23,10 +23,14 @@ jiffies = jiffies_64; | |||
23 | 23 | ||
24 | #define HYPERVISOR_TEXT \ | 24 | #define HYPERVISOR_TEXT \ |
25 | /* \ | 25 | /* \ |
26 | * Force the alignment to be compatible with \ | 26 | * Align to 4 KB so that \ |
27 | * the vectors requirements \ | 27 | * a) the HYP vector table is at its minimum \ |
28 | * alignment of 2048 bytes \ | ||
29 | * b) the HYP init code will not cross a page \ | ||
30 | * boundary if its size does not exceed \ | ||
31 | * 4 KB (see related ASSERT() below) \ | ||
28 | */ \ | 32 | */ \ |
29 | . = ALIGN(2048); \ | 33 | . = ALIGN(SZ_4K); \ |
30 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ | 34 | VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ |
31 | *(.hyp.idmap.text) \ | 35 | *(.hyp.idmap.text) \ |
32 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ | 36 | VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ |
@@ -163,10 +167,11 @@ SECTIONS | |||
163 | } | 167 | } |
164 | 168 | ||
165 | /* | 169 | /* |
166 | * The HYP init code can't be more than a page long. | 170 | * The HYP init code can't be more than a page long, |
171 | * and should not cross a page boundary. | ||
167 | */ | 172 | */ |
168 | ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), | 173 | ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, |
169 | "HYP init code too big") | 174 | "HYP init code too big or misaligned") |
170 | 175 | ||
171 | /* | 176 | /* |
172 | * If padding is applied before .head.text, virt<->phys conversions will fail. | 177 | * If padding is applied before .head.text, virt<->phys conversions will fail. |
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index c3191168a994..178ba2248a98 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/assembler.h> | 20 | #include <asm/assembler.h> |
21 | #include <asm/kvm_arm.h> | 21 | #include <asm/kvm_arm.h> |
22 | #include <asm/kvm_mmu.h> | 22 | #include <asm/kvm_mmu.h> |
23 | #include <asm/pgtable-hwdef.h> | ||
23 | 24 | ||
24 | .text | 25 | .text |
25 | .pushsection .hyp.idmap.text, "ax" | 26 | .pushsection .hyp.idmap.text, "ax" |
@@ -65,6 +66,25 @@ __do_hyp_init: | |||
65 | and x4, x4, x5 | 66 | and x4, x4, x5 |
66 | ldr x5, =TCR_EL2_FLAGS | 67 | ldr x5, =TCR_EL2_FLAGS |
67 | orr x4, x4, x5 | 68 | orr x4, x4, x5 |
69 | |||
70 | #ifndef CONFIG_ARM64_VA_BITS_48 | ||
71 | /* | ||
72 | * If we are running with VA_BITS < 48, we may be running with an extra | ||
73 | * level of translation in the ID map. This is only the case if system | ||
74 | * RAM is out of range for the currently configured page size and number | ||
75 | * of translation levels, in which case we will also need the extra | ||
76 | * level for the HYP ID map, or we won't be able to enable the EL2 MMU. | ||
77 | * | ||
78 | * However, at EL2, there is only one TTBR register, and we can't switch | ||
79 | * between translation tables *and* update TCR_EL2.T0SZ at the same | ||
80 | * time. Bottom line: we need the extra level in *both* our translation | ||
81 | * tables. | ||
82 | * | ||
83 | * So use the same T0SZ value we use for the ID map. | ||
84 | */ | ||
85 | ldr_l x5, idmap_t0sz | ||
86 | bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH | ||
87 | #endif | ||
68 | msr tcr_el2, x4 | 88 | msr tcr_el2, x4 |
69 | 89 | ||
70 | ldr x4, =VTCR_EL2_FLAGS | 90 | ldr x4, =VTCR_EL2_FLAGS |
@@ -91,6 +111,10 @@ __do_hyp_init: | |||
91 | msr sctlr_el2, x4 | 111 | msr sctlr_el2, x4 |
92 | isb | 112 | isb |
93 | 113 | ||
114 | /* Skip the trampoline dance if we merged the boot and runtime PGDs */ | ||
115 | cmp x0, x1 | ||
116 | b.eq merged | ||
117 | |||
94 | /* MMU is now enabled. Get ready for the trampoline dance */ | 118 | /* MMU is now enabled. Get ready for the trampoline dance */ |
95 | ldr x4, =TRAMPOLINE_VA | 119 | ldr x4, =TRAMPOLINE_VA |
96 | adr x5, target | 120 | adr x5, target |
@@ -105,6 +129,7 @@ target: /* We're now in the trampoline code, switch page tables */ | |||
105 | tlbi alle2 | 129 | tlbi alle2 |
106 | dsb sy | 130 | dsb sy |
107 | 131 | ||
132 | merged: | ||
108 | /* Set the stack and new vectors */ | 133 | /* Set the stack and new vectors */ |
109 | kern_hyp_va x2 | 134 | kern_hyp_va x2 |
110 | mov sp, x2 | 135 | mov sp, x2 |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 79e01163a981..5b8b664422d3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -40,6 +40,8 @@ | |||
40 | 40 | ||
41 | #include "mm.h" | 41 | #include "mm.h" |
42 | 42 | ||
43 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS); | ||
44 | |||
43 | /* | 45 | /* |
44 | * Empty_zero_page is a special page that is used for zero-initialized data | 46 | * Empty_zero_page is a special page that is used for zero-initialized data |
45 | * and COW. | 47 | * and COW. |
@@ -454,6 +456,7 @@ void __init paging_init(void) | |||
454 | */ | 456 | */ |
455 | cpu_set_reserved_ttbr0(); | 457 | cpu_set_reserved_ttbr0(); |
456 | flush_tlb_all(); | 458 | flush_tlb_all(); |
459 | cpu_set_default_tcr_t0sz(); | ||
457 | } | 460 | } |
458 | 461 | ||
459 | /* | 462 | /* |
@@ -461,8 +464,10 @@ void __init paging_init(void) | |||
461 | */ | 464 | */ |
462 | void setup_mm_for_reboot(void) | 465 | void setup_mm_for_reboot(void) |
463 | { | 466 | { |
464 | cpu_switch_mm(idmap_pg_dir, &init_mm); | 467 | cpu_set_reserved_ttbr0(); |
465 | flush_tlb_all(); | 468 | flush_tlb_all(); |
469 | cpu_set_idmap_tcr_t0sz(); | ||
470 | cpu_switch_mm(idmap_pg_dir, &init_mm); | ||
466 | } | 471 | } |
467 | 472 | ||
468 | /* | 473 | /* |
@@ -627,10 +632,7 @@ void __set_fixmap(enum fixed_addresses idx, | |||
627 | unsigned long addr = __fix_to_virt(idx); | 632 | unsigned long addr = __fix_to_virt(idx); |
628 | pte_t *pte; | 633 | pte_t *pte; |
629 | 634 | ||
630 | if (idx >= __end_of_fixed_addresses) { | 635 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
631 | BUG(); | ||
632 | return; | ||
633 | } | ||
634 | 636 | ||
635 | pte = fixmap_pte(addr); | 637 | pte = fixmap_pte(addr); |
636 | 638 | ||
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 1d3ec3ddd84b..e47ed1c5dce1 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c | |||
@@ -73,7 +73,6 @@ int set_memory_ro(unsigned long addr, int numpages) | |||
73 | __pgprot(PTE_RDONLY), | 73 | __pgprot(PTE_RDONLY), |
74 | __pgprot(PTE_WRITE)); | 74 | __pgprot(PTE_WRITE)); |
75 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
77 | 76 | ||
78 | int set_memory_rw(unsigned long addr, int numpages) | 77 | int set_memory_rw(unsigned long addr, int numpages) |
79 | { | 78 | { |
@@ -81,7 +80,6 @@ int set_memory_rw(unsigned long addr, int numpages) | |||
81 | __pgprot(PTE_WRITE), | 80 | __pgprot(PTE_WRITE), |
82 | __pgprot(PTE_RDONLY)); | 81 | __pgprot(PTE_RDONLY)); |
83 | } | 82 | } |
84 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
85 | 83 | ||
86 | int set_memory_nx(unsigned long addr, int numpages) | 84 | int set_memory_nx(unsigned long addr, int numpages) |
87 | { | 85 | { |
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 005d29e2977d..4c4d93c4bf65 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S | |||
@@ -52,3 +52,13 @@ | |||
52 | mov \reg, #4 // bytes per word | 52 | mov \reg, #4 // bytes per word |
53 | lsl \reg, \reg, \tmp // actual cache line size | 53 | lsl \reg, \reg, \tmp // actual cache line size |
54 | .endm | 54 | .endm |
55 | |||
56 | /* | ||
57 | * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map | ||
58 | */ | ||
59 | .macro tcr_set_idmap_t0sz, valreg, tmpreg | ||
60 | #ifndef CONFIG_ARM64_VA_BITS_48 | ||
61 | ldr_l \tmpreg, idmap_t0sz | ||
62 | bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH | ||
63 | #endif | ||
64 | .endm | ||
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 28eebfb6af76..cdd754e19b9b 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) | |||
156 | msr cpacr_el1, x6 | 156 | msr cpacr_el1, x6 |
157 | msr ttbr0_el1, x1 | 157 | msr ttbr0_el1, x1 |
158 | msr ttbr1_el1, x7 | 158 | msr ttbr1_el1, x7 |
159 | tcr_set_idmap_t0sz x8, x7 | ||
159 | msr tcr_el1, x8 | 160 | msr tcr_el1, x8 |
160 | msr vbar_el1, x9 | 161 | msr vbar_el1, x9 |
161 | msr mdscr_el1, x10 | 162 | msr mdscr_el1, x10 |
@@ -233,6 +234,8 @@ ENTRY(__cpu_setup) | |||
233 | */ | 234 | */ |
234 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ | 235 | ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ |
235 | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 | 236 | TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 |
237 | tcr_set_idmap_t0sz x10, x9 | ||
238 | |||
236 | /* | 239 | /* |
237 | * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in | 240 | * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in |
238 | * TCR_EL1. | 241 | * TCR_EL1. |