aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-arm
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-arm')
-rw-r--r--include/asm-arm/arch-ixp2000/io.h20
-rw-r--r--include/asm-arm/arch-ixp2000/ixp2000-regs.h2
-rw-r--r--include/asm-arm/mach/time.h21
-rw-r--r--include/asm-arm/signal.h1
-rw-r--r--include/asm-arm/system.h30
5 files changed, 41 insertions, 33 deletions
diff --git a/include/asm-arm/arch-ixp2000/io.h b/include/asm-arm/arch-ixp2000/io.h
index 5e56b47446e0..3241cd6f0778 100644
--- a/include/asm-arm/arch-ixp2000/io.h
+++ b/include/asm-arm/arch-ixp2000/io.h
@@ -17,16 +17,21 @@
17 17
18#define IO_SPACE_LIMIT 0xffffffff 18#define IO_SPACE_LIMIT 0xffffffff
19#define __mem_pci(a) (a) 19#define __mem_pci(a) (a)
20#define ___io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE))
21 20
22/* 21/*
23 * The IXP2400 before revision B0 asserts byte lanes for PCI I/O 22 * The A? revisions of the IXP2000s assert byte lanes for PCI I/O
24 * transactions the other way round (MEM transactions don't have this 23 * transactions the other way round (MEM transactions don't have this
25 * issue), so we need to override the standard functions. B0 and later 24 * issue), so if we want to support those models, we need to override
26 * have a bit that can be set to 1 to get the 'proper' behavior, but 25 * the standard I/O functions.
27 * since that isn't available on the A? revisions we just keep doing 26 *
28 * things manually. 27 * B0 and later have a bit that can be set to 1 to get the proper
28 * behavior for I/O transactions, which then allows us to use the
29 * standard I/O functions. This is what we do if the user does not
30 * explicitly ask for support for pre-B0.
29 */ 31 */
32#ifdef CONFIG_IXP2000_SUPPORT_BROKEN_PCI_IO
33#define ___io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE))
34
30#define alignb(addr) (void __iomem *)((unsigned long)(addr) ^ 3) 35#define alignb(addr) (void __iomem *)((unsigned long)(addr) ^ 3)
31#define alignw(addr) (void __iomem *)((unsigned long)(addr) ^ 2) 36#define alignw(addr) (void __iomem *)((unsigned long)(addr) ^ 2)
32 37
@@ -119,6 +124,9 @@
119#define ioport_map(port, nr) ___io(port) 124#define ioport_map(port, nr) ___io(port)
120 125
121#define ioport_unmap(addr) 126#define ioport_unmap(addr)
127#else
128#define __io(p) ((void __iomem *)((p)+IXP2000_PCI_IO_VIRT_BASE))
129#endif
122 130
123 131
124#ifdef CONFIG_ARCH_IXDP2X01 132#ifdef CONFIG_ARCH_IXDP2X01
diff --git a/include/asm-arm/arch-ixp2000/ixp2000-regs.h b/include/asm-arm/arch-ixp2000/ixp2000-regs.h
index a1d9e181b10f..5eb47d4bfbf6 100644
--- a/include/asm-arm/arch-ixp2000/ixp2000-regs.h
+++ b/include/asm-arm/arch-ixp2000/ixp2000-regs.h
@@ -241,7 +241,7 @@
241#define PCI_CONTROL_BE_DEI (1 << 21) /* Big Endian Data Enable In */ 241#define PCI_CONTROL_BE_DEI (1 << 21) /* Big Endian Data Enable In */
242#define PCI_CONTROL_BE_BEO (1 << 20) /* Big Endian Byte Enable Out */ 242#define PCI_CONTROL_BE_BEO (1 << 20) /* Big Endian Byte Enable Out */
243#define PCI_CONTROL_BE_BEI (1 << 19) /* Big Endian Byte Enable In */ 243#define PCI_CONTROL_BE_BEI (1 << 19) /* Big Endian Byte Enable In */
244#define PCI_CONTROL_PNR (1 << 17) /* PCI Not Reset bit */ 244#define PCI_CONTROL_IEE (1 << 17) /* I/O cycle Endian swap Enable */
245 245
246#define IXP2000_PCI_RST_REL (1 << 2) 246#define IXP2000_PCI_RST_REL (1 << 2)
247#define CFG_RST_DIR (*IXP2000_PCI_CONTROL & IXP2000_PCICNTL_PCF) 247#define CFG_RST_DIR (*IXP2000_PCI_CONTROL & IXP2000_PCICNTL_PCF)
diff --git a/include/asm-arm/mach/time.h b/include/asm-arm/mach/time.h
index 5cf4fd659fd5..047980ad18d1 100644
--- a/include/asm-arm/mach/time.h
+++ b/include/asm-arm/mach/time.h
@@ -39,8 +39,29 @@ struct sys_timer {
39 void (*suspend)(void); 39 void (*suspend)(void);
40 void (*resume)(void); 40 void (*resume)(void);
41 unsigned long (*offset)(void); 41 unsigned long (*offset)(void);
42
43#ifdef CONFIG_NO_IDLE_HZ
44 struct dyn_tick_timer *dyn_tick;
45#endif
46};
47
48#ifdef CONFIG_NO_IDLE_HZ
49
50#define DYN_TICK_SKIPPING (1 << 2)
51#define DYN_TICK_ENABLED (1 << 1)
52#define DYN_TICK_SUITABLE (1 << 0)
53
54struct dyn_tick_timer {
55 unsigned int state; /* Current state */
56 int (*enable)(void); /* Enables dynamic tick */
57 int (*disable)(void); /* Disables dynamic tick */
58 void (*reprogram)(unsigned long); /* Reprograms the timer */
59 int (*handler)(int, void *, struct pt_regs *);
42}; 60};
43 61
62void timer_dyn_reprogram(void);
63#endif
64
44extern struct sys_timer *system_timer; 65extern struct sys_timer *system_timer;
45extern void timer_tick(struct pt_regs *); 66extern void timer_tick(struct pt_regs *);
46 67
diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h
index 46e69ae395af..760f6e65af05 100644
--- a/include/asm-arm/signal.h
+++ b/include/asm-arm/signal.h
@@ -114,6 +114,7 @@ typedef unsigned long sigset_t;
114#define SIGSTKSZ 8192 114#define SIGSTKSZ 8192
115 115
116#ifdef __KERNEL__ 116#ifdef __KERNEL__
117#define SA_TIMER 0x40000000
117#define SA_IRQNOMASK 0x08000000 118#define SA_IRQNOMASK 0x08000000
118#endif 119#endif
119 120
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 39dd7008013c..3d0d2860b6db 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -145,34 +145,12 @@ extern unsigned int user_debug;
145#define set_wmb(var, value) do { var = value; wmb(); } while (0) 145#define set_wmb(var, value) do { var = value; wmb(); } while (0)
146#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); 146#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
147 147
148#ifdef CONFIG_SMP
149/* 148/*
150 * Define our own context switch locking. This allows us to enable 149 * switch_mm() may do a full cache flush over the context switch,
151 * interrupts over the context switch, otherwise we end up with high 150 * so enable interrupts over the context switch to avoid high
152 * interrupt latency. The real problem area is switch_mm() which may 151 * latency.
153 * do a full cache flush.
154 */ 152 */
155#define prepare_arch_switch(rq,next) \ 153#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
156do { \
157 spin_lock(&(next)->switch_lock); \
158 spin_unlock_irq(&(rq)->lock); \
159} while (0)
160
161#define finish_arch_switch(rq,prev) \
162 spin_unlock(&(prev)->switch_lock)
163
164#define task_running(rq,p) \
165 ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock))
166#else
167/*
168 * Our UP-case is more simple, but we assume knowledge of how
169 * spin_unlock_irq() and friends are implemented. This avoids
170 * us needlessly decrementing and incrementing the preempt count.
171 */
172#define prepare_arch_switch(rq,next) local_irq_enable()
173#define finish_arch_switch(rq,prev) spin_unlock(&(rq)->lock)
174#define task_running(rq,p) ((rq)->curr == (p))
175#endif
176 154
177/* 155/*
178 * switch_to(prev, next) should switch from task `prev' to `next' 156 * switch_to(prev, next) should switch from task `prev' to `next'