diff options
| -rw-r--r-- | arch/arm64/include/asm/dma-contiguous.h | 1 | ||||
| -rw-r--r-- | arch/arm64/include/asm/pgtable.h | 93 | ||||
| -rw-r--r-- | arch/arm64/kernel/process.c | 6 | ||||
| -rw-r--r-- | arch/arm64/kernel/smp.c | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/suspend.c | 8 | ||||
| -rw-r--r-- | arch/arm64/mm/cache.S | 2 | ||||
| -rw-r--r-- | arch/arm64/mm/proc-macros.S | 3 | ||||
| -rw-r--r-- | arch/arm64/mm/proc.S | 2 |
8 files changed, 74 insertions, 43 deletions
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h index d6aacb61ff4a..14c4c0ca7f2a 100644 --- a/arch/arm64/include/asm/dma-contiguous.h +++ b/arch/arm64/include/asm/dma-contiguous.h | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | #ifdef CONFIG_DMA_CMA | 18 | #ifdef CONFIG_DMA_CMA |
| 19 | 19 | ||
| 20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 21 | #include <asm-generic/dma-contiguous.h> | ||
| 22 | 21 | ||
| 23 | static inline void | 22 | static inline void |
| 24 | dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } | 23 | dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7f2b60affbb4..b524dcd17243 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ | 28 | #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ |
| 29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) | 29 | #define PTE_DIRTY (_AT(pteval_t, 1) << 55) |
| 30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) | 30 | #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) |
| 31 | /* bit 57 for PMD_SECT_SPLITTING */ | 31 | #define PTE_WRITE (_AT(pteval_t, 1) << 57) |
| 32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ | 32 | #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ |
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| @@ -67,15 +67,15 @@ extern pgprot_t pgprot_default; | |||
| 67 | 67 | ||
| 68 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) | 68 | #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) |
| 69 | 69 | ||
| 70 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | 70 | #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
| 71 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 71 | #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
| 72 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) | 72 | #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
| 73 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 73 | #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
| 74 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 74 | #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
| 75 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 75 | #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
| 76 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 76 | #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) |
| 77 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) | 77 | #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 78 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) | 78 | #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) |
| 79 | 79 | ||
| 80 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) | 80 | #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) |
| 81 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | 81 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
| @@ -83,13 +83,13 @@ extern pgprot_t pgprot_default; | |||
| 83 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 83 | #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
| 84 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) | 84 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) |
| 85 | 85 | ||
| 86 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) | 86 | #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) |
| 87 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) | 87 | #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
| 88 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) | 88 | #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) |
| 89 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 89 | #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
| 90 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 90 | #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
| 91 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) | 91 | #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) |
| 92 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) | 92 | #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) |
| 93 | 93 | ||
| 94 | #endif /* __ASSEMBLY__ */ | 94 | #endif /* __ASSEMBLY__ */ |
| 95 | 95 | ||
| @@ -140,22 +140,53 @@ extern struct page *empty_zero_page; | |||
| 140 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) | 140 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) |
| 141 | #define pte_young(pte) (pte_val(pte) & PTE_AF) | 141 | #define pte_young(pte) (pte_val(pte) & PTE_AF) |
| 142 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) | 142 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) |
| 143 | #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) | 143 | #define pte_write(pte) (pte_val(pte) & PTE_WRITE) |
| 144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
| 145 | 145 | ||
| 146 | #define pte_valid_user(pte) \ | 146 | #define pte_valid_user(pte) \ |
| 147 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) | 147 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
| 148 | 148 | ||
| 149 | #define PTE_BIT_FUNC(fn,op) \ | 149 | static inline pte_t pte_wrprotect(pte_t pte) |
| 150 | static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } | 150 | { |
| 151 | pte_val(pte) &= ~PTE_WRITE; | ||
| 152 | return pte; | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline pte_t pte_mkwrite(pte_t pte) | ||
| 156 | { | ||
| 157 | pte_val(pte) |= PTE_WRITE; | ||
| 158 | return pte; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline pte_t pte_mkclean(pte_t pte) | ||
| 162 | { | ||
| 163 | pte_val(pte) &= ~PTE_DIRTY; | ||
| 164 | return pte; | ||
| 165 | } | ||
| 166 | |||
| 167 | static inline pte_t pte_mkdirty(pte_t pte) | ||
| 168 | { | ||
| 169 | pte_val(pte) |= PTE_DIRTY; | ||
| 170 | return pte; | ||
| 171 | } | ||
| 151 | 172 | ||
| 152 | PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY); | 173 | static inline pte_t pte_mkold(pte_t pte) |
| 153 | PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY); | 174 | { |
| 154 | PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); | 175 | pte_val(pte) &= ~PTE_AF; |
| 155 | PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); | 176 | return pte; |
| 156 | PTE_BIT_FUNC(mkold, &= ~PTE_AF); | 177 | } |
| 157 | PTE_BIT_FUNC(mkyoung, |= PTE_AF); | 178 | |
| 158 | PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL); | 179 | static inline pte_t pte_mkyoung(pte_t pte) |
| 180 | { | ||
| 181 | pte_val(pte) |= PTE_AF; | ||
| 182 | return pte; | ||
| 183 | } | ||
| 184 | |||
| 185 | static inline pte_t pte_mkspecial(pte_t pte) | ||
| 186 | { | ||
| 187 | pte_val(pte) |= PTE_SPECIAL; | ||
| 188 | return pte; | ||
| 189 | } | ||
| 159 | 190 | ||
| 160 | static inline void set_pte(pte_t *ptep, pte_t pte) | 191 | static inline void set_pte(pte_t *ptep, pte_t pte) |
| 161 | { | 192 | { |
| @@ -170,8 +201,10 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 170 | if (pte_valid_user(pte)) { | 201 | if (pte_valid_user(pte)) { |
| 171 | if (pte_exec(pte)) | 202 | if (pte_exec(pte)) |
| 172 | __sync_icache_dcache(pte, addr); | 203 | __sync_icache_dcache(pte, addr); |
| 173 | if (!pte_dirty(pte)) | 204 | if (pte_dirty(pte) && pte_write(pte)) |
| 174 | pte = pte_wrprotect(pte); | 205 | pte_val(pte) &= ~PTE_RDONLY; |
| 206 | else | ||
| 207 | pte_val(pte) |= PTE_RDONLY; | ||
| 175 | } | 208 | } |
| 176 | 209 | ||
| 177 | set_pte(ptep, pte); | 210 | set_pte(ptep, pte); |
| @@ -345,7 +378,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
| 345 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 378 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 346 | { | 379 | { |
| 347 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 380 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
| 348 | PTE_PROT_NONE | PTE_VALID; | 381 | PTE_PROT_NONE | PTE_VALID | PTE_WRITE; |
| 349 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); | 382 | pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); |
| 350 | return pte; | 383 | return pte; |
| 351 | } | 384 | } |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 248a15db37f2..1c0a9be2ffa8 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
| @@ -85,11 +85,6 @@ EXPORT_SYMBOL_GPL(pm_power_off); | |||
| 85 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); | 85 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
| 86 | EXPORT_SYMBOL_GPL(arm_pm_restart); | 86 | EXPORT_SYMBOL_GPL(arm_pm_restart); |
| 87 | 87 | ||
| 88 | void arch_cpu_idle_prepare(void) | ||
| 89 | { | ||
| 90 | local_fiq_enable(); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | 88 | /* |
| 94 | * This is our default idle handler. | 89 | * This is our default idle handler. |
| 95 | */ | 90 | */ |
| @@ -138,7 +133,6 @@ void machine_restart(char *cmd) | |||
| 138 | 133 | ||
| 139 | /* Disable interrupts first */ | 134 | /* Disable interrupts first */ |
| 140 | local_irq_disable(); | 135 | local_irq_disable(); |
| 141 | local_fiq_disable(); | ||
| 142 | 136 | ||
| 143 | /* Now call the architecture specific reboot code. */ | 137 | /* Now call the architecture specific reboot code. */ |
| 144 | if (arm_pm_restart) | 138 | if (arm_pm_restart) |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 1b7617ab499b..7cfb92a4ab66 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
| @@ -161,7 +161,6 @@ asmlinkage void secondary_start_kernel(void) | |||
| 161 | complete(&cpu_running); | 161 | complete(&cpu_running); |
| 162 | 162 | ||
| 163 | local_irq_enable(); | 163 | local_irq_enable(); |
| 164 | local_fiq_enable(); | ||
| 165 | local_async_enable(); | 164 | local_async_enable(); |
| 166 | 165 | ||
| 167 | /* | 166 | /* |
| @@ -495,7 +494,6 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
| 495 | 494 | ||
| 496 | set_cpu_online(cpu, false); | 495 | set_cpu_online(cpu, false); |
| 497 | 496 | ||
| 498 | local_fiq_disable(); | ||
| 499 | local_irq_disable(); | 497 | local_irq_disable(); |
| 500 | 498 | ||
| 501 | while (1) | 499 | while (1) |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 430344e2c989..1fa9ce4afd8f 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #include <linux/percpu.h> | ||
| 1 | #include <linux/slab.h> | 2 | #include <linux/slab.h> |
| 2 | #include <asm/cacheflush.h> | 3 | #include <asm/cacheflush.h> |
| 3 | #include <asm/cpu_ops.h> | 4 | #include <asm/cpu_ops.h> |
| @@ -89,6 +90,13 @@ int cpu_suspend(unsigned long arg) | |||
| 89 | if (ret == 0) { | 90 | if (ret == 0) { |
| 90 | cpu_switch_mm(mm->pgd, mm); | 91 | cpu_switch_mm(mm->pgd, mm); |
| 91 | flush_tlb_all(); | 92 | flush_tlb_all(); |
| 93 | |||
| 94 | /* | ||
| 95 | * Restore per-cpu offset before any kernel | ||
| 96 | * subsystem relying on it has a chance to run. | ||
| 97 | */ | ||
| 98 | set_my_cpu_offset(per_cpu_offset(cpu)); | ||
| 99 | |||
| 92 | /* | 100 | /* |
| 93 | * Restore HW breakpoint registers to sane values | 101 | * Restore HW breakpoint registers to sane values |
| 94 | * before debug exceptions are possibly reenabled | 102 | * before debug exceptions are possibly reenabled |
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 48a386094fa3..1ea9f26d1b70 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
| @@ -146,7 +146,7 @@ ENDPROC(flush_icache_range) | |||
| 146 | ENDPROC(__flush_cache_user_range) | 146 | ENDPROC(__flush_cache_user_range) |
| 147 | 147 | ||
| 148 | /* | 148 | /* |
| 149 | * __flush_kern_dcache_page(kaddr) | 149 | * __flush_dcache_area(kaddr, size) |
| 150 | * | 150 | * |
| 151 | * Ensure that the data held in the page kaddr is written back to the | 151 | * Ensure that the data held in the page kaddr is written back to the |
| 152 | * page in question. | 152 | * page in question. |
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 8957b822010b..005d29e2977d 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S | |||
| @@ -38,8 +38,7 @@ | |||
| 38 | */ | 38 | */ |
| 39 | .macro dcache_line_size, reg, tmp | 39 | .macro dcache_line_size, reg, tmp |
| 40 | mrs \tmp, ctr_el0 // read CTR | 40 | mrs \tmp, ctr_el0 // read CTR |
| 41 | lsr \tmp, \tmp, #16 | 41 | ubfm \tmp, \tmp, #16, #19 // cache line size encoding |
| 42 | and \tmp, \tmp, #0xf // cache line size encoding | ||
| 43 | mov \reg, #4 // bytes per word | 42 | mov \reg, #4 // bytes per word |
| 44 | lsl \reg, \reg, \tmp // actual cache line size | 43 | lsl \reg, \reg, \tmp // actual cache line size |
| 45 | .endm | 44 | .endm |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index bed1f1de1caf..1333e6f9a8e5 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
| @@ -150,7 +150,7 @@ ENDPROC(cpu_do_resume) | |||
| 150 | #endif | 150 | #endif |
| 151 | 151 | ||
| 152 | /* | 152 | /* |
| 153 | * cpu_switch_mm(pgd_phys, tsk) | 153 | * cpu_do_switch_mm(pgd_phys, tsk) |
| 154 | * | 154 | * |
| 155 | * Set the translation table base pointer to be pgd_phys. | 155 | * Set the translation table base pointer to be pgd_phys. |
| 156 | * | 156 | * |
