aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include/asm')
-rw-r--r--arch/arm/include/asm/arch_timer.h19
-rw-r--r--arch/arm/include/asm/hardware/it8152.h2
-rw-r--r--arch/arm/include/asm/mach/pci.h17
-rw-r--r--arch/arm/include/asm/mmu.h7
-rw-r--r--arch/arm/include/asm/mmu_context.h104
-rw-r--r--arch/arm/include/asm/thread_info.h8
-rw-r--r--arch/arm/include/asm/tls.h4
7 files changed, 117 insertions, 44 deletions
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
new file mode 100644
index 000000000000..ed2e95d46e29
--- /dev/null
+++ b/arch/arm/include/asm/arch_timer.h
@@ -0,0 +1,19 @@
1#ifndef __ASMARM_ARCH_TIMER_H
2#define __ASMARM_ARCH_TIMER_H
3
4#ifdef CONFIG_ARM_ARCH_TIMER
5int arch_timer_of_register(void);
6int arch_timer_sched_clock_init(void);
7#else
8static inline int arch_timer_of_register(void)
9{
10 return -ENXIO;
11}
12
13static inline int arch_timer_sched_clock_init(void)
14{
15 return -ENXIO;
16}
17#endif
18
19#endif
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 73f84fa4f366..d36a73d7c0e8 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -110,6 +110,6 @@ extern void it8152_irq_demux(unsigned int irq, struct irq_desc *desc);
110extern void it8152_init_irq(void); 110extern void it8152_init_irq(void);
111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); 111extern int it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys); 112extern int it8152_pci_setup(int nr, struct pci_sys_data *sys);
113extern struct pci_bus *it8152_pci_scan_bus(int nr, struct pci_sys_data *sys); 113extern struct pci_ops it8152_ops;
114 114
115#endif /* __ASM_HARDWARE_IT8152_H */ 115#endif /* __ASM_HARDWARE_IT8152_H */
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index d943b7d20f11..26c511fddf8f 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -12,13 +12,14 @@
12#define __ASM_MACH_PCI_H 12#define __ASM_MACH_PCI_H
13 13
14struct pci_sys_data; 14struct pci_sys_data;
15struct pci_ops;
15struct pci_bus; 16struct pci_bus;
16 17
17struct hw_pci { 18struct hw_pci {
18#ifdef CONFIG_PCI_DOMAINS 19#ifdef CONFIG_PCI_DOMAINS
19 int domain; 20 int domain;
20#endif 21#endif
21 struct list_head buses; 22 struct pci_ops *ops;
22 int nr_controllers; 23 int nr_controllers;
23 int (*setup)(int nr, struct pci_sys_data *); 24 int (*setup)(int nr, struct pci_sys_data *);
24 struct pci_bus *(*scan)(int nr, struct pci_sys_data *); 25 struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
@@ -45,16 +46,10 @@ struct pci_sys_data {
45 u8 (*swizzle)(struct pci_dev *, u8 *); 46 u8 (*swizzle)(struct pci_dev *, u8 *);
46 /* IRQ mapping */ 47 /* IRQ mapping */
47 int (*map_irq)(const struct pci_dev *, u8, u8); 48 int (*map_irq)(const struct pci_dev *, u8, u8);
48 struct hw_pci *hw;
49 void *private_data; /* platform controller private data */ 49 void *private_data; /* platform controller private data */
50}; 50};
51 51
52/* 52/*
53 * This is the standard PCI-PCI bridge swizzling algorithm.
54 */
55#define pci_std_swizzle pci_common_swizzle
56
57/*
58 * Call this with your hw_pci struct to initialise the PCI system. 53 * Call this with your hw_pci struct to initialise the PCI system.
59 */ 54 */
60void pci_common_init(struct hw_pci *); 55void pci_common_init(struct hw_pci *);
@@ -62,22 +57,22 @@ void pci_common_init(struct hw_pci *);
62/* 57/*
63 * PCI controllers 58 * PCI controllers
64 */ 59 */
60extern struct pci_ops iop3xx_ops;
65extern int iop3xx_pci_setup(int nr, struct pci_sys_data *); 61extern int iop3xx_pci_setup(int nr, struct pci_sys_data *);
66extern struct pci_bus *iop3xx_pci_scan_bus(int nr, struct pci_sys_data *);
67extern void iop3xx_pci_preinit(void); 62extern void iop3xx_pci_preinit(void);
68extern void iop3xx_pci_preinit_cond(void); 63extern void iop3xx_pci_preinit_cond(void);
69 64
65extern struct pci_ops dc21285_ops;
70extern int dc21285_setup(int nr, struct pci_sys_data *); 66extern int dc21285_setup(int nr, struct pci_sys_data *);
71extern struct pci_bus *dc21285_scan_bus(int nr, struct pci_sys_data *);
72extern void dc21285_preinit(void); 67extern void dc21285_preinit(void);
73extern void dc21285_postinit(void); 68extern void dc21285_postinit(void);
74 69
70extern struct pci_ops via82c505_ops;
75extern int via82c505_setup(int nr, struct pci_sys_data *); 71extern int via82c505_setup(int nr, struct pci_sys_data *);
76extern struct pci_bus *via82c505_scan_bus(int nr, struct pci_sys_data *);
77extern void via82c505_init(void *sysdata); 72extern void via82c505_init(void *sysdata);
78 73
74extern struct pci_ops pci_v3_ops;
79extern int pci_v3_setup(int nr, struct pci_sys_data *); 75extern int pci_v3_setup(int nr, struct pci_sys_data *);
80extern struct pci_bus *pci_v3_scan_bus(int nr, struct pci_sys_data *);
81extern void pci_v3_preinit(void); 76extern void pci_v3_preinit(void);
82extern void pci_v3_postinit(void); 77extern void pci_v3_postinit(void);
83 78
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index b8e580a297e4..14965658a923 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -34,11 +34,4 @@ typedef struct {
34 34
35#endif 35#endif
36 36
37/*
38 * switch_mm() may do a full cache flush over the context switch,
39 * so enable interrupts over the context switch to avoid high
40 * latency.
41 */
42#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
43
44#endif 37#endif
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a0b3cac0547c..0306bc642c0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -43,45 +43,104 @@ void __check_kvm_seq(struct mm_struct *mm);
43#define ASID_FIRST_VERSION (1 << ASID_BITS) 43#define ASID_FIRST_VERSION (1 << ASID_BITS)
44 44
45extern unsigned int cpu_last_asid; 45extern unsigned int cpu_last_asid;
46#ifdef CONFIG_SMP
47DECLARE_PER_CPU(struct mm_struct *, current_mm);
48#endif
49 46
50void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); 47void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
51void __new_context(struct mm_struct *mm); 48void __new_context(struct mm_struct *mm);
49void cpu_set_reserved_ttbr0(void);
52 50
53static inline void check_context(struct mm_struct *mm) 51static inline void switch_new_context(struct mm_struct *mm)
54{ 52{
55 /* 53 unsigned long flags;
56 * This code is executed with interrupts enabled. Therefore, 54
57 * mm->context.id cannot be updated to the latest ASID version 55 __new_context(mm);
58 * on a different CPU (and condition below not triggered) 56
59 * without first getting an IPI to reset the context. The 57 local_irq_save(flags);
60 * alternative is to take a read_lock on mm->context.id_lock 58 cpu_switch_mm(mm->pgd, mm);
61 * (after changing its type to rwlock_t). 59 local_irq_restore(flags);
62 */ 60}
63 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
64 __new_context(mm);
65 61
62static inline void check_and_switch_context(struct mm_struct *mm,
63 struct task_struct *tsk)
64{
66 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
67 __check_kvm_seq(mm); 66 __check_kvm_seq(mm);
67
68 /*
69 * Required during context switch to avoid speculative page table
70 * walking with the wrong TTBR.
71 */
72 cpu_set_reserved_ttbr0();
73
74 if (!((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
75 /*
76 * The ASID is from the current generation, just switch to the
77 * new pgd. This condition is only true for calls from
78 * context_switch() and interrupts are already disabled.
79 */
80 cpu_switch_mm(mm->pgd, mm);
81 else if (irqs_disabled())
82 /*
83 * Defer the new ASID allocation until after the context
84 * switch critical region since __new_context() cannot be
85 * called with interrupts disabled (it sends IPIs).
86 */
87 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
88 else
89 /*
90 * That is a direct call to switch_mm() or activate_mm() with
91 * interrupts enabled and a new context.
92 */
93 switch_new_context(mm);
68} 94}
69 95
70#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) 96#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
71 97
72#else 98#define finish_arch_post_lock_switch \
73 99 finish_arch_post_lock_switch
74static inline void check_context(struct mm_struct *mm) 100static inline void finish_arch_post_lock_switch(void)
75{ 101{
102 if (test_and_clear_thread_flag(TIF_SWITCH_MM))
103 switch_new_context(current->mm);
104}
105
106#else /* !CONFIG_CPU_HAS_ASID */
107
76#ifdef CONFIG_MMU 108#ifdef CONFIG_MMU
109
110static inline void check_and_switch_context(struct mm_struct *mm,
111 struct task_struct *tsk)
112{
77 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) 113 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
78 __check_kvm_seq(mm); 114 __check_kvm_seq(mm);
79#endif 115
116 if (irqs_disabled())
117 /*
118 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
119 * high interrupt latencies, defer the call and continue
120 * running with the old mm. Since we only support UP systems
121 * on non-ASID CPUs, the old mm will remain valid until the
122 * finish_arch_post_lock_switch() call.
123 */
124 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
125 else
126 cpu_switch_mm(mm->pgd, mm);
80} 127}
81 128
129#define finish_arch_post_lock_switch \
130 finish_arch_post_lock_switch
131static inline void finish_arch_post_lock_switch(void)
132{
133 if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
134 struct mm_struct *mm = current->mm;
135 cpu_switch_mm(mm->pgd, mm);
136 }
137}
138
139#endif /* CONFIG_MMU */
140
82#define init_new_context(tsk,mm) 0 141#define init_new_context(tsk,mm) 0
83 142
84#endif 143#endif /* CONFIG_CPU_HAS_ASID */
85 144
86#define destroy_context(mm) do { } while(0) 145#define destroy_context(mm) do { } while(0)
87 146
@@ -119,12 +178,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
119 __flush_icache_all(); 178 __flush_icache_all();
120#endif 179#endif
121 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 180 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
122#ifdef CONFIG_SMP 181 check_and_switch_context(next, tsk);
123 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
124 *crt_mm = next;
125#endif
126 check_context(next);
127 cpu_switch_mm(next->pgd, next);
128 if (cache_is_vivt()) 182 if (cache_is_vivt())
129 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 183 cpumask_clear_cpu(cpu, mm_cpumask(prev));
130 } 184 }
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index d4c24d412a8d..68388eb4946b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
118extern void vfp_sync_hwstate(struct thread_info *); 118extern void vfp_sync_hwstate(struct thread_info *);
119extern void vfp_flush_hwstate(struct thread_info *); 119extern void vfp_flush_hwstate(struct thread_info *);
120 120
121struct user_vfp;
122struct user_vfp_exc;
123
124extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
125 struct user_vfp_exc __user *);
126extern int vfp_restore_user_hwstate(struct user_vfp __user *,
127 struct user_vfp_exc __user *);
121#endif 128#endif
122 129
123/* 130/*
@@ -146,6 +153,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
146#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 153#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
147#define TIF_RESTORE_SIGMASK 20 154#define TIF_RESTORE_SIGMASK 20
148#define TIF_SECCOMP 21 155#define TIF_SECCOMP 21
156#define TIF_SWITCH_MM 22 /* deferred switch_mm */
149 157
150#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 158#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
151#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 159#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 60843eb0f61c..73409e6c0251 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -7,6 +7,8 @@
7 7
8 .macro set_tls_v6k, tp, tmp1, tmp2 8 .macro set_tls_v6k, tp, tmp1, tmp2
9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register 9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
10 mov \tmp1, #0
11 mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
10 .endm 12 .endm
11 13
12 .macro set_tls_v6, tp, tmp1, tmp2 14 .macro set_tls_v6, tp, tmp1, tmp2
@@ -15,6 +17,8 @@
15 mov \tmp2, #0xffff0fff 17 mov \tmp2, #0xffff0fff
16 tst \tmp1, #HWCAP_TLS @ hardware TLS available? 18 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
17 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register 19 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
20 movne \tmp1, #0
21 mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
18 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 22 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
19 .endm 23 .endm
20 24