aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 13:00:00 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-10 13:00:00 -0500
commitedb16bec41db68b22799a5fbad82c3891e637565 (patch)
treed019d2fa8fbf374d810f66e1a210648e53b0c593 /include
parentbb7320d1d96dc2e479180ae8e7a112caf0726ace (diff)
parentf0882589666440d573f657cb3a1d5f66f3caa157 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6: [SPARC64]: Fix several kprobes bugs. [SPARC64]: Update defconfig. [SPARC64]: dma remove extra brackets [SPARC{32,64}]: Propagate ptrace_traceme() return value. [SPARC64]: Replace kmalloc+memset with kzalloc [SPARC]: Check kzalloc() return value in SUN4D irq/iommu init. [SPARC]: Replace kmalloc+memset with kzalloc [SPARC64]: Run ctrl-alt-del action for sun4v powerdown request. [SPARC64]: Unaligned accesses to userspace are hard errors. [SPARC64]: Call do_mathemu on illegal instruction traps too. [SPARC64]: Update defconfig. [SPARC64]: Add irqtrace/stacktrace/lockdep support.
Diffstat (limited to 'include')
-rw-r--r--include/asm-sparc64/dma.h6
-rw-r--r--include/asm-sparc64/irqflags.h89
-rw-r--r--include/asm-sparc64/kprobes.h11
-rw-r--r--include/asm-sparc64/rwsem.h32
-rw-r--r--include/asm-sparc64/system.h49
-rw-r--r--include/asm-sparc64/ttable.h45
6 files changed, 169 insertions, 63 deletions
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index 27f65972b3bb..93e5a062df88 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -152,9 +152,9 @@ extern void dvma_init(struct sbus_bus *);
152#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL)) 152#define DMA_MAXEND(addr) (0x01000000UL-(((unsigned long)(addr))&0x00ffffffUL))
153 153
154/* Yes, I hack a lot of elisp in my spare time... */ 154/* Yes, I hack a lot of elisp in my spare time... */
155#define DMA_ERROR_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR)) 155#define DMA_ERROR_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_HNDL_ERROR))
156#define DMA_IRQ_P(regs) (((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))) 156#define DMA_IRQ_P(regs) ((sbus_readl((regs) + DMA_CSR)) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
157#define DMA_WRITE_P(regs) (((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE)) 157#define DMA_WRITE_P(regs) ((sbus_readl((regs) + DMA_CSR) & DMA_ST_WRITE))
158#define DMA_OFF(__regs) \ 158#define DMA_OFF(__regs) \
159do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \ 159do { u32 tmp = sbus_readl((__regs) + DMA_CSR); \
160 tmp &= ~DMA_ENABLE; \ 160 tmp &= ~DMA_ENABLE; \
diff --git a/include/asm-sparc64/irqflags.h b/include/asm-sparc64/irqflags.h
new file mode 100644
index 000000000000..024fc54d0682
--- /dev/null
+++ b/include/asm-sparc64/irqflags.h
@@ -0,0 +1,89 @@
1/*
2 * include/asm-sparc64/irqflags.h
3 *
4 * IRQ flags handling
5 *
6 * This file gets included from lowlevel asm headers too, to provide
7 * wrapped versions of the local_irq_*() APIs, based on the
8 * raw_local_irq_*() functions from the lowlevel headers.
9 */
10#ifndef _ASM_IRQFLAGS_H
11#define _ASM_IRQFLAGS_H
12
13#ifndef __ASSEMBLY__
14
15static inline unsigned long __raw_local_save_flags(void)
16{
17 unsigned long flags;
18
19 __asm__ __volatile__(
20 "rdpr %%pil, %0"
21 : "=r" (flags)
22 );
23
24 return flags;
25}
26
27#define raw_local_save_flags(flags) \
28 do { (flags) = __raw_local_save_flags(); } while (0)
29
30static inline void raw_local_irq_restore(unsigned long flags)
31{
32 __asm__ __volatile__(
33 "wrpr %0, %%pil"
34 : /* no output */
35 : "r" (flags)
36 : "memory"
37 );
38}
39
40static inline void raw_local_irq_disable(void)
41{
42 __asm__ __volatile__(
43 "wrpr 15, %%pil"
44 : /* no outputs */
45 : /* no inputs */
46 : "memory"
47 );
48}
49
50static inline void raw_local_irq_enable(void)
51{
52 __asm__ __volatile__(
53 "wrpr 0, %%pil"
54 : /* no outputs */
55 : /* no inputs */
56 : "memory"
57 );
58}
59
60static inline int raw_irqs_disabled_flags(unsigned long flags)
61{
62 return (flags > 0);
63}
64
65static inline int raw_irqs_disabled(void)
66{
67 unsigned long flags = __raw_local_save_flags();
68
69 return raw_irqs_disabled_flags(flags);
70}
71
72/*
73 * For spinlocks, etc:
74 */
75static inline unsigned long __raw_local_irq_save(void)
76{
77 unsigned long flags = __raw_local_save_flags();
78
79 raw_local_irq_disable();
80
81 return flags;
82}
83
84#define raw_local_irq_save(flags) \
85 do { (flags) = __raw_local_irq_save(); } while (0)
86
87#endif /* (__ASSEMBLY__) */
88
89#endif /* !(_ASM_IRQFLAGS_H) */
diff --git a/include/asm-sparc64/kprobes.h b/include/asm-sparc64/kprobes.h
index c9f5c34d318c..becc38fa06c5 100644
--- a/include/asm-sparc64/kprobes.h
+++ b/include/asm-sparc64/kprobes.h
@@ -13,7 +13,11 @@ typedef u32 kprobe_opcode_t;
13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry 13#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
14#define arch_remove_kprobe(p) do {} while (0) 14#define arch_remove_kprobe(p) do {} while (0)
15#define ARCH_INACTIVE_KPROBE_COUNT 0 15#define ARCH_INACTIVE_KPROBE_COUNT 0
16#define flush_insn_slot(p) do { } while (0) 16
17#define flush_insn_slot(p) \
18do { flushi(&(p)->ainsn.insn[0]); \
19 flushi(&(p)->ainsn.insn[1]); \
20} while (0)
17 21
18/* Architecture specific copy of original instruction*/ 22/* Architecture specific copy of original instruction*/
19struct arch_specific_insn { 23struct arch_specific_insn {
@@ -23,7 +27,7 @@ struct arch_specific_insn {
23 27
24struct prev_kprobe { 28struct prev_kprobe {
25 struct kprobe *kp; 29 struct kprobe *kp;
26 unsigned int status; 30 unsigned long status;
27 unsigned long orig_tnpc; 31 unsigned long orig_tnpc;
28 unsigned long orig_tstate_pil; 32 unsigned long orig_tstate_pil;
29}; 33};
@@ -33,10 +37,7 @@ struct kprobe_ctlblk {
33 unsigned long kprobe_status; 37 unsigned long kprobe_status;
34 unsigned long kprobe_orig_tnpc; 38 unsigned long kprobe_orig_tnpc;
35 unsigned long kprobe_orig_tstate_pil; 39 unsigned long kprobe_orig_tstate_pil;
36 long *jprobe_saved_esp;
37 struct pt_regs jprobe_saved_regs; 40 struct pt_regs jprobe_saved_regs;
38 struct pt_regs *jprobe_saved_regs_location;
39 struct sparc_stackf jprobe_saved_stack;
40 struct prev_kprobe prev_kprobe; 41 struct prev_kprobe prev_kprobe;
41}; 42};
42 43
diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h
index cef5e8270421..1294b7ce5d06 100644
--- a/include/asm-sparc64/rwsem.h
+++ b/include/asm-sparc64/rwsem.h
@@ -23,20 +23,33 @@ struct rw_semaphore {
23 signed int count; 23 signed int count;
24 spinlock_t wait_lock; 24 spinlock_t wait_lock;
25 struct list_head wait_list; 25 struct list_head wait_list;
26#ifdef CONFIG_DEBUG_LOCK_ALLOC
27 struct lockdep_map dep_map;
28#endif
26}; 29};
27 30
31#ifdef CONFIG_DEBUG_LOCK_ALLOC
32# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
33#else
34# define __RWSEM_DEP_MAP_INIT(lockname)
35#endif
36
28#define __RWSEM_INITIALIZER(name) \ 37#define __RWSEM_INITIALIZER(name) \
29{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) } 38{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
39 __RWSEM_DEP_MAP_INIT(name) }
30 40
31#define DECLARE_RWSEM(name) \ 41#define DECLARE_RWSEM(name) \
32 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 42 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
33 43
34static __inline__ void init_rwsem(struct rw_semaphore *sem) 44extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
35{ 45 struct lock_class_key *key);
36 sem->count = RWSEM_UNLOCKED_VALUE; 46
37 spin_lock_init(&sem->wait_lock); 47#define init_rwsem(sem) \
38 INIT_LIST_HEAD(&sem->wait_list); 48do { \
39} 49 static struct lock_class_key __key; \
50 \
51 __init_rwsem((sem), #sem, &__key); \
52} while (0)
40 53
41extern void __down_read(struct rw_semaphore *sem); 54extern void __down_read(struct rw_semaphore *sem);
42extern int __down_read_trylock(struct rw_semaphore *sem); 55extern int __down_read_trylock(struct rw_semaphore *sem);
@@ -46,6 +59,11 @@ extern void __up_read(struct rw_semaphore *sem);
46extern void __up_write(struct rw_semaphore *sem); 59extern void __up_write(struct rw_semaphore *sem);
47extern void __downgrade_write(struct rw_semaphore *sem); 60extern void __downgrade_write(struct rw_semaphore *sem);
48 61
62static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
63{
64 __down_write(sem);
65}
66
49static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) 67static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
50{ 68{
51 return atomic_add_return(delta, (atomic_t *)(&sem->count)); 69 return atomic_add_return(delta, (atomic_t *)(&sem->count));
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index a8b7432c9a70..32281acb878b 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -7,6 +7,9 @@
7#include <asm/visasm.h> 7#include <asm/visasm.h>
8 8
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10
11#include <linux/irqflags.h>
12
10/* 13/*
11 * Sparc (general) CPU types 14 * Sparc (general) CPU types
12 */ 15 */
@@ -72,52 +75,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
72 75
73#endif 76#endif
74 77
75#define setipl(__new_ipl) \
76 __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory")
77
78#define local_irq_disable() \
79 __asm__ __volatile__("wrpr 15, %%pil" : : : "memory")
80
81#define local_irq_enable() \
82 __asm__ __volatile__("wrpr 0, %%pil" : : : "memory")
83
84#define getipl() \
85({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; })
86
87#define swap_pil(__new_pil) \
88({ unsigned long retval; \
89 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
90 "wrpr %1, %%pil" \
91 : "=&r" (retval) \
92 : "r" (__new_pil) \
93 : "memory"); \
94 retval; \
95})
96
97#define read_pil_and_cli() \
98({ unsigned long retval; \
99 __asm__ __volatile__("rdpr %%pil, %0\n\t" \
100 "wrpr 15, %%pil" \
101 : "=r" (retval) \
102 : : "memory"); \
103 retval; \
104})
105
106#define local_save_flags(flags) ((flags) = getipl())
107#define local_irq_save(flags) ((flags) = read_pil_and_cli())
108#define local_irq_restore(flags) setipl((flags))
109
110/* On sparc64 IRQ flags are the PIL register. A value of zero
111 * means all interrupt levels are enabled, any other value means
112 * only IRQ levels greater than that value will be received.
113 * Consequently this means that the lowest IRQ level is one.
114 */
115#define irqs_disabled() \
116({ unsigned long flags; \
117 local_save_flags(flags);\
118 (flags > 0); \
119})
120
121#define nop() __asm__ __volatile__ ("nop") 78#define nop() __asm__ __volatile__ ("nop")
122 79
123#define read_barrier_depends() do { } while(0) 80#define read_barrier_depends() do { } while(0)
diff --git a/include/asm-sparc64/ttable.h b/include/asm-sparc64/ttable.h
index f2352606a79f..c2a16e188499 100644
--- a/include/asm-sparc64/ttable.h
+++ b/include/asm-sparc64/ttable.h
@@ -137,10 +137,49 @@
137#endif 137#endif
138#define BREAKPOINT_TRAP TRAP(breakpoint_trap) 138#define BREAKPOINT_TRAP TRAP(breakpoint_trap)
139 139
140#ifdef CONFIG_TRACE_IRQFLAGS
141
142#define TRAP_IRQ(routine, level) \
143 rdpr %pil, %g2; \
144 wrpr %g0, 15, %pil; \
145 sethi %hi(1f-4), %g7; \
146 ba,pt %xcc, etrap_irq; \
147 or %g7, %lo(1f-4), %g7; \
148 nop; \
149 nop; \
150 nop; \
151 .subsection 2; \
1521: call trace_hardirqs_off; \
153 nop; \
154 mov level, %o0; \
155 call routine; \
156 add %sp, PTREGS_OFF, %o1; \
157 ba,a,pt %xcc, rtrap_irq; \
158 .previous;
159
160#define TICK_SMP_IRQ \
161 rdpr %pil, %g2; \
162 wrpr %g0, 15, %pil; \
163 sethi %hi(1f-4), %g7; \
164 ba,pt %xcc, etrap_irq; \
165 or %g7, %lo(1f-4), %g7; \
166 nop; \
167 nop; \
168 nop; \
169 .subsection 2; \
1701: call trace_hardirqs_off; \
171 nop; \
172 call smp_percpu_timer_interrupt; \
173 add %sp, PTREGS_OFF, %o0; \
174 ba,a,pt %xcc, rtrap_irq; \
175 .previous;
176
177#else
178
140#define TRAP_IRQ(routine, level) \ 179#define TRAP_IRQ(routine, level) \
141 rdpr %pil, %g2; \ 180 rdpr %pil, %g2; \
142 wrpr %g0, 15, %pil; \ 181 wrpr %g0, 15, %pil; \
143 b,pt %xcc, etrap_irq; \ 182 ba,pt %xcc, etrap_irq; \
144 rd %pc, %g7; \ 183 rd %pc, %g7; \
145 mov level, %o0; \ 184 mov level, %o0; \
146 call routine; \ 185 call routine; \
@@ -151,12 +190,14 @@
151 rdpr %pil, %g2; \ 190 rdpr %pil, %g2; \
152 wrpr %g0, 15, %pil; \ 191 wrpr %g0, 15, %pil; \
153 sethi %hi(109f), %g7; \ 192 sethi %hi(109f), %g7; \
154 b,pt %xcc, etrap_irq; \ 193 ba,pt %xcc, etrap_irq; \
155109: or %g7, %lo(109b), %g7; \ 194109: or %g7, %lo(109b), %g7; \
156 call smp_percpu_timer_interrupt; \ 195 call smp_percpu_timer_interrupt; \
157 add %sp, PTREGS_OFF, %o0; \ 196 add %sp, PTREGS_OFF, %o0; \
158 ba,a,pt %xcc, rtrap_irq; 197 ba,a,pt %xcc, rtrap_irq;
159 198
199#endif
200
160#define TRAP_IVEC TRAP_NOSAVE(do_ivec) 201#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
161 202
162#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl) 203#define BTRAP(lvl) TRAP_ARG(bad_trap, lvl)