aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/system_misc.h1
-rw-r--r--arch/arm64/kernel/process.c12
-rw-r--r--arch/arm64/mm/cache.S73
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/proc.S46
7 files changed, 1 insertions, 141 deletions
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 67d309cc3b6b..c75b8d027eb1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,10 +40,6 @@
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing) 40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache. 41 * VIPT or ASID-tagged VIVT I-cache.
42 * 42 *
43 * flush_cache_all()
44 *
45 * Unconditionally clean and invalidate the entire cache.
46 *
47 * flush_cache_mm(mm) 43 * flush_cache_mm(mm)
48 * 44 *
49 * Clean and invalidate all user space cache entries 45 * Clean and invalidate all user space cache entries
@@ -69,7 +65,6 @@
69 * - kaddr - page address 65 * - kaddr - page address
70 * - size - region size 66 * - size - region size
71 */ 67 */
72extern void flush_cache_all(void);
73extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 68extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
74extern void flush_icache_range(unsigned long start, unsigned long end); 69extern void flush_icache_range(unsigned long start, unsigned long end);
75extern void __flush_dcache_area(void *addr, size_t len); 70extern void __flush_dcache_area(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b791b8..14ad6e4e87d1 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,12 +28,8 @@
28struct mm_struct; 28struct mm_struct;
29struct cpu_suspend_ctx; 29struct cpu_suspend_ctx;
30 30
31extern void cpu_cache_off(void);
32extern void cpu_do_idle(void); 31extern void cpu_do_idle(void);
33extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 32extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
34extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
35void cpu_soft_restart(phys_addr_t cpu_reset,
36 unsigned long addr) __attribute__((noreturn));
37extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); 33extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
38extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); 34extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
39 35
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7a18fabbe0f6..659fbf5925de 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -41,7 +41,6 @@ struct mm_struct;
41extern void show_pte(struct mm_struct *mm, unsigned long addr); 41extern void show_pte(struct mm_struct *mm, unsigned long addr);
42extern void __show_regs(struct pt_regs *); 42extern void __show_regs(struct pt_regs *);
43 43
44void soft_restart(unsigned long);
45extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 44extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
46 45
47#define UDBG_UNDEFINED (1 << 0) 46#define UDBG_UNDEFINED (1 << 0)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b96f45..c506bee6b613 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
58EXPORT_SYMBOL(__stack_chk_guard); 58EXPORT_SYMBOL(__stack_chk_guard);
59#endif 59#endif
60 60
61void soft_restart(unsigned long addr)
62{
63 setup_mm_for_reboot();
64 cpu_soft_restart(virt_to_phys(cpu_reset), addr);
65 /* Should never get here */
66 BUG();
67}
68
69/* 61/*
70 * Function pointers to optional machine specific functions 62 * Function pointers to optional machine specific functions
71 */ 63 */
@@ -136,9 +128,7 @@ void machine_power_off(void)
136 128
137/* 129/*
138 * Restart requires that the secondary CPUs stop performing any activity 130 * Restart requires that the secondary CPUs stop performing any activity
139 * while the primary CPU resets the system. Systems with a single CPU can 131 * while the primary CPU resets the system. Systems with multiple CPUs must
140 * use soft_restart() as their machine descriptor's .restart hook, since that
141 * will cause the only available CPU to reset. Systems with multiple CPUs must
142 * provide a HW restart implementation, to ensure that all CPUs reset at once. 132 * provide a HW restart implementation, to ensure that all CPUs reset at once.
143 * This is required so that any code running after reset on the primary CPU 133 * This is required so that any code running after reset on the primary CPU
144 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 134 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e1562e..f563e9af0d01 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -27,79 +27,6 @@
27#include "proc-macros.S" 27#include "proc-macros.S"
28 28
29/* 29/*
30 * __flush_dcache_all()
31 *
32 * Flush the whole D-cache.
33 *
34 * Corrupted registers: x0-x7, x9-x11
35 */
36__flush_dcache_all:
37 dmb sy // ensure ordering with previous memory accesses
38 mrs x0, clidr_el1 // read clidr
39 and x3, x0, #0x7000000 // extract loc from clidr
40 lsr x3, x3, #23 // left align loc bit field
41 cbz x3, finished // if loc is 0, then no need to clean
42 mov x10, #0 // start clean at cache level 0
43loop1:
44 add x2, x10, x10, lsr #1 // work out 3x current cache level
45 lsr x1, x0, x2 // extract cache type bits from clidr
46 and x1, x1, #7 // mask of the bits for current cache only
47 cmp x1, #2 // see what cache we have at this level
48 b.lt skip // skip if no cache, or just i-cache
49 save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
50 msr csselr_el1, x10 // select current cache level in csselr
51 isb // isb to sych the new cssr&csidr
52 mrs x1, ccsidr_el1 // read the new ccsidr
53 restore_irqs x9
54 and x2, x1, #7 // extract the length of the cache lines
55 add x2, x2, #4 // add 4 (line length offset)
56 mov x4, #0x3ff
57 and x4, x4, x1, lsr #3 // find maximum number on the way size
58 clz w5, w4 // find bit position of way size increment
59 mov x7, #0x7fff
60 and x7, x7, x1, lsr #13 // extract max number of the index size
61loop2:
62 mov x9, x4 // create working copy of max way size
63loop3:
64 lsl x6, x9, x5
65 orr x11, x10, x6 // factor way and cache number into x11
66 lsl x6, x7, x2
67 orr x11, x11, x6 // factor index number into x11
68 dc cisw, x11 // clean & invalidate by set/way
69 subs x9, x9, #1 // decrement the way
70 b.ge loop3
71 subs x7, x7, #1 // decrement the index
72 b.ge loop2
73skip:
74 add x10, x10, #2 // increment cache number
75 cmp x3, x10
76 b.gt loop1
77finished:
78 mov x10, #0 // swith back to cache level 0
79 msr csselr_el1, x10 // select current cache level in csselr
80 dsb sy
81 isb
82 ret
83ENDPROC(__flush_dcache_all)
84
85/*
86 * flush_cache_all()
87 *
88 * Flush the entire cache system. The data cache flush is now achieved
89 * using atomic clean / invalidates working outwards from L1 cache. This
90 * is done using Set/Way based cache maintainance instructions. The
91 * instruction cache can still be invalidated back to the point of
92 * unification in a single instruction.
93 */
94ENTRY(flush_cache_all)
95 mov x12, lr
96 bl __flush_dcache_all
97 mov x0, #0
98 ic ialluis // I+BTB cache invalidate
99 ret x12
100ENDPROC(flush_cache_all)
101
102/*
103 * flush_icache_range(start,end) 30 * flush_icache_range(start,end)
104 * 31 *
105 * Ensure that the I and D caches are coherent within specified region. 32 * Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index b6f14e8d2121..4dfa3975ce5b 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
102/* 102/*
103 * Additional functions defined in assembly. 103 * Additional functions defined in assembly.
104 */ 104 */
105EXPORT_SYMBOL(flush_cache_all);
106EXPORT_SYMBOL(flush_icache_range); 105EXPORT_SYMBOL(flush_icache_range);
107 106
108#ifdef CONFIG_TRANSPARENT_HUGEPAGE 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19b9b..39139a3aa16d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,52 +46,6 @@
46#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 46#define MAIR(attr, mt) ((attr) << ((mt) * 8))
47 47
48/* 48/*
49 * cpu_cache_off()
50 *
51 * Turn the CPU D-cache off.
52 */
53ENTRY(cpu_cache_off)
54 mrs x0, sctlr_el1
55 bic x0, x0, #1 << 2 // clear SCTLR.C
56 msr sctlr_el1, x0
57 isb
58 ret
59ENDPROC(cpu_cache_off)
60
61/*
62 * cpu_reset(loc)
63 *
64 * Perform a soft reset of the system. Put the CPU into the same state
65 * as it would be if it had been reset, and branch to what would be the
66 * reset vector. It must be executed with the flat identity mapping.
67 *
68 * - loc - location to jump to for soft reset
69 */
70 .align 5
71ENTRY(cpu_reset)
72 mrs x1, sctlr_el1
73 bic x1, x1, #1
74 msr sctlr_el1, x1 // disable the MMU
75 isb
76 ret x0
77ENDPROC(cpu_reset)
78
79ENTRY(cpu_soft_restart)
80 /* Save address of cpu_reset() and reset address */
81 mov x19, x0
82 mov x20, x1
83
84 /* Turn D-cache off */
85 bl cpu_cache_off
86
87 /* Push out all dirty data, and ensure cache is empty */
88 bl flush_cache_all
89
90 mov x0, x20
91 ret x19
92ENDPROC(cpu_soft_restart)
93
94/*
95 * cpu_do_idle() 49 * cpu_do_idle()
96 * 50 *
97 * Idle the processor (wait for interrupt). 51 * Idle the processor (wait for interrupt).