aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-03-12 11:01:07 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-03-12 11:01:07 -0400
commitffb12cf002edbc5927079f51bebde428d601f723 (patch)
tree1f04d80df9db8883037d59c81f5836770eecfdc6 /arch/arm64
parent1a75b8e64571a85d5e648cfdf4c40e0d9923abc5 (diff)
parentc1bacbae8192dd2a9ebadd22d793b68054f6c6e5 (diff)
Merge branch 'irq/for-gpio' into irq/core
Merge the request/release callbacks which are in a separate branch for consumption by the gpio folks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/kernel/stacktrace.c6
-rw-r--r--arch/arm64/kvm/hyp.S27
4 files changed, 43 insertions, 8 deletions
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PERCPU_H 16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H 17#define __ASM_PERCPU_H
18 18
19#ifdef CONFIG_SMP
20
19static inline void set_my_cpu_offset(unsigned long off) 21static inline void set_my_cpu_offset(unsigned long off)
20{ 22{
21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); 23 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
36} 38}
37#define __my_cpu_offset __my_cpu_offset() 39#define __my_cpu_offset __my_cpu_offset()
38 40
41#else /* !CONFIG_SMP */
42
43#define set_my_cpu_offset(x) do { } while (0)
44
45#endif /* CONFIG_SMP */
46
39#include <asm-generic/percpu.h> 47#include <asm-generic/percpu.h>
40 48
41#endif /* __ASM_PERCPU_H */ 49#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..aa3917c8b623 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
136/* 136/*
137 * The following only work if pte_present(). Undefined behaviour otherwise. 137 * The following only work if pte_present(). Undefined behaviour otherwise.
138 */ 138 */
139#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) 139#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
140#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 140#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
141#define pte_young(pte) (pte_val(pte) & PTE_AF) 141#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
142#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 142#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
143#define pte_write(pte) (pte_val(pte) & PTE_WRITE) 143#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 144#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
145 145
146#define pte_valid_user(pte) \ 146#define pte_valid_user(pte) \
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c3b6c63ea5fb..38f0558f0c0a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
48 48
49 frame->sp = fp + 0x10; 49 frame->sp = fp + 0x10;
50 frame->fp = *(unsigned long *)(fp); 50 frame->fp = *(unsigned long *)(fp);
51 frame->pc = *(unsigned long *)(fp + 8); 51 /*
52 * -4 here because we care about the PC at time of bl,
53 * not where the return will go.
54 */
55 frame->pc = *(unsigned long *)(fp + 8) - 4;
52 56
53 return 0; 57 return 0;
54} 58}
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36e10ff..2c56012cb2d2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -694,6 +694,24 @@ __hyp_panic_str:
694 694
695 .align 2 695 .align 2
696 696
697/*
698 * u64 kvm_call_hyp(void *hypfn, ...);
699 *
700 * This is not really a variadic function in the classic C-way and care must
701 * be taken when calling this to ensure parameters are passed in registers
702 * only, since the stack will change between the caller and the callee.
703 *
704 * Call the function with the first argument containing a pointer to the
705 * function you wish to call in Hyp mode, and subsequent arguments will be
706 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
707 * function pointer can be passed). The function being called must be mapped
708 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
709 * passed in r0 and r1.
710 *
711 * A function pointer with a value of 0 has a special meaning, and is
712 * used to implement __hyp_get_vectors in the same way as in
713 * arch/arm64/kernel/hyp_stub.S.
714 */
697ENTRY(kvm_call_hyp) 715ENTRY(kvm_call_hyp)
698 hvc #0 716 hvc #0
699 ret 717 ret
@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
737 pop x2, x3 755 pop x2, x3
738 pop x0, x1 756 pop x0, x1
739 757
740 push lr, xzr 758 /* Check for __hyp_get_vectors */
759 cbnz x0, 1f
760 mrs x0, vbar_el2
761 b 2f
762
7631: push lr, xzr
741 764
742 /* 765 /*
743 * Compute the function address in EL2, and shuffle the parameters. 766 * Compute the function address in EL2, and shuffle the parameters.
@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
750 blr lr 773 blr lr
751 774
752 pop lr, xzr 775 pop lr, xzr
753 eret 7762: eret
754 777
755el1_trap: 778el1_trap:
756 /* 779 /*