aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/Kconfig6
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/common/clkdev.c179
-rw-r--r--arch/arm/common/dmabounce.c18
-rw-r--r--arch/arm/common/gic.c300
-rw-r--r--arch/arm/common/icst.c2
-rw-r--r--arch/arm/common/it8152.c21
-rw-r--r--arch/arm/common/locomo.c41
-rw-r--r--arch/arm/common/pl330.c11
-rw-r--r--arch/arm/common/sa1111.c130
-rw-r--r--arch/arm/common/scoop.c12
-rw-r--r--arch/arm/common/timer-sp.c172
-rw-r--r--arch/arm/common/uengine.c18
-rw-r--r--arch/arm/common/vic.c109
14 files changed, 540 insertions, 481 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 0a34c8186924..4b71766fb21d 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -6,6 +6,8 @@ config ARM_VIC
6 6
7config ARM_VIC_NR 7config ARM_VIC_NR
8 int 8 int
9 default 4 if ARCH_S5PV210
10 default 3 if ARCH_S5PC100
9 default 2 11 default 2
10 depends on ARM_VIC 12 depends on ARM_VIC
11 help 13 help
@@ -37,7 +39,3 @@ config SHARP_PARAM
37 39
38config SHARP_SCOOP 40config SHARP_SCOOP
39 bool 41 bool
40
41config COMMON_CLKDEV
42 bool
43 select HAVE_CLK
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index e6e8664a9413..6ea9b6f3607a 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -16,4 +16,4 @@ obj-$(CONFIG_SHARP_SCOOP) += scoop.o
16obj-$(CONFIG_ARCH_IXP2000) += uengine.o 16obj-$(CONFIG_ARCH_IXP2000) += uengine.o
17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o 17obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o 18obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
19obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o 19obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c
deleted file mode 100644
index e2b2bb66e094..000000000000
--- a/arch/arm/common/clkdev.c
+++ /dev/null
@@ -1,179 +0,0 @@
1/*
2 * arch/arm/common/clkdev.c
3 *
4 * Copyright (C) 2008 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Helper for the clk API to assist looking up a struct clk.
11 */
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/device.h>
15#include <linux/list.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/string.h>
19#include <linux/mutex.h>
20#include <linux/clk.h>
21#include <linux/slab.h>
22
23#include <asm/clkdev.h>
24#include <mach/clkdev.h>
25
26static LIST_HEAD(clocks);
27static DEFINE_MUTEX(clocks_mutex);
28
29/*
30 * Find the correct struct clk for the device and connection ID.
31 * We do slightly fuzzy matching here:
32 * An entry with a NULL ID is assumed to be a wildcard.
33 * If an entry has a device ID, it must match
34 * If an entry has a connection ID, it must match
35 * Then we take the most specific entry - with the following
36 * order of precedence: dev+con > dev only > con only.
37 */
38static struct clk *clk_find(const char *dev_id, const char *con_id)
39{
40 struct clk_lookup *p;
41 struct clk *clk = NULL;
42 int match, best = 0;
43
44 list_for_each_entry(p, &clocks, node) {
45 match = 0;
46 if (p->dev_id) {
47 if (!dev_id || strcmp(p->dev_id, dev_id))
48 continue;
49 match += 2;
50 }
51 if (p->con_id) {
52 if (!con_id || strcmp(p->con_id, con_id))
53 continue;
54 match += 1;
55 }
56
57 if (match > best) {
58 clk = p->clk;
59 if (match != 3)
60 best = match;
61 else
62 break;
63 }
64 }
65 return clk;
66}
67
68struct clk *clk_get_sys(const char *dev_id, const char *con_id)
69{
70 struct clk *clk;
71
72 mutex_lock(&clocks_mutex);
73 clk = clk_find(dev_id, con_id);
74 if (clk && !__clk_get(clk))
75 clk = NULL;
76 mutex_unlock(&clocks_mutex);
77
78 return clk ? clk : ERR_PTR(-ENOENT);
79}
80EXPORT_SYMBOL(clk_get_sys);
81
82struct clk *clk_get(struct device *dev, const char *con_id)
83{
84 const char *dev_id = dev ? dev_name(dev) : NULL;
85
86 return clk_get_sys(dev_id, con_id);
87}
88EXPORT_SYMBOL(clk_get);
89
90void clk_put(struct clk *clk)
91{
92 __clk_put(clk);
93}
94EXPORT_SYMBOL(clk_put);
95
96void clkdev_add(struct clk_lookup *cl)
97{
98 mutex_lock(&clocks_mutex);
99 list_add_tail(&cl->node, &clocks);
100 mutex_unlock(&clocks_mutex);
101}
102EXPORT_SYMBOL(clkdev_add);
103
104void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
105{
106 mutex_lock(&clocks_mutex);
107 while (num--) {
108 list_add_tail(&cl->node, &clocks);
109 cl++;
110 }
111 mutex_unlock(&clocks_mutex);
112}
113
114#define MAX_DEV_ID 20
115#define MAX_CON_ID 16
116
117struct clk_lookup_alloc {
118 struct clk_lookup cl;
119 char dev_id[MAX_DEV_ID];
120 char con_id[MAX_CON_ID];
121};
122
123struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
124 const char *dev_fmt, ...)
125{
126 struct clk_lookup_alloc *cla;
127
128 cla = kzalloc(sizeof(*cla), GFP_KERNEL);
129 if (!cla)
130 return NULL;
131
132 cla->cl.clk = clk;
133 if (con_id) {
134 strlcpy(cla->con_id, con_id, sizeof(cla->con_id));
135 cla->cl.con_id = cla->con_id;
136 }
137
138 if (dev_fmt) {
139 va_list ap;
140
141 va_start(ap, dev_fmt);
142 vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap);
143 cla->cl.dev_id = cla->dev_id;
144 va_end(ap);
145 }
146
147 return &cla->cl;
148}
149EXPORT_SYMBOL(clkdev_alloc);
150
151int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
152 struct device *dev)
153{
154 struct clk *r = clk_get(dev, id);
155 struct clk_lookup *l;
156
157 if (IS_ERR(r))
158 return PTR_ERR(r);
159
160 l = clkdev_alloc(r, alias, alias_dev_name);
161 clk_put(r);
162 if (!l)
163 return -ENODEV;
164 clkdev_add(l);
165 return 0;
166}
167EXPORT_SYMBOL(clk_add_alias);
168
169/*
170 * clkdev_drop - remove a clock dynamically allocated
171 */
172void clkdev_drop(struct clk_lookup *cl)
173{
174 mutex_lock(&clocks_mutex);
175 list_del(&cl->node);
176 mutex_unlock(&clocks_mutex);
177 kfree(cl);
178}
179EXPORT_SYMBOL(clkdev_drop);
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cc0a932bbea9..841df7d21c2f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -255,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
255 if (buf == 0) { 255 if (buf == 0) {
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr); 257 __func__, ptr);
258 return 0; 258 return ~0;
259 } 259 }
260 260
261 dev_dbg(dev, 261 dev_dbg(dev,
@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
328 * substitute the safe buffer for the unsafe one. 328 * substitute the safe buffer for the unsafe one.
329 * (basically move the buffer from an unsafe area to a safe one) 329 * (basically move the buffer from an unsafe area to a safe one)
330 */ 330 */
331dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, 331dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332 enum dma_data_direction dir) 332 enum dma_data_direction dir)
333{ 333{
334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
338 338
339 return map_single(dev, ptr, size, dir); 339 return map_single(dev, ptr, size, dir);
340} 340}
341EXPORT_SYMBOL(dma_map_single); 341EXPORT_SYMBOL(__dma_map_single);
342 342
343/* 343/*
344 * see if a mapped address was really a "safe" buffer and if so, copy 344 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
346 * the safe buffer. (basically return things back to the way they 346 * the safe buffer. (basically return things back to the way they
347 * should be) 347 * should be)
348 */ 348 */
349void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 349void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir) 350 enum dma_data_direction dir)
351{ 351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
354 354
355 unmap_single(dev, dma_addr, size, dir); 355 unmap_single(dev, dma_addr, size, dir);
356} 356}
357EXPORT_SYMBOL(dma_unmap_single); 357EXPORT_SYMBOL(__dma_unmap_single);
358 358
359dma_addr_t dma_map_page(struct device *dev, struct page *page, 359dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360 unsigned long offset, size_t size, enum dma_data_direction dir) 360 unsigned long offset, size_t size, enum dma_data_direction dir)
361{ 361{
362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
372 372
373 return map_single(dev, page_address(page) + offset, size, dir); 373 return map_single(dev, page_address(page) + offset, size, dir);
374} 374}
375EXPORT_SYMBOL(dma_map_page); 375EXPORT_SYMBOL(__dma_map_page);
376 376
377/* 377/*
378 * see if a mapped address was really a "safe" buffer and if so, copy 378 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
380 * the safe buffer. (basically return things back to the way they 380 * the safe buffer. (basically return things back to the way they
381 * should be) 381 * should be)
382 */ 382 */
383void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 383void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384 enum dma_data_direction dir) 384 enum dma_data_direction dir)
385{ 385{
386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
388 388
389 unmap_single(dev, dma_addr, size, dir); 389 unmap_single(dev, dma_addr, size, dir);
390} 390}
391EXPORT_SYMBOL(dma_unmap_page); 391EXPORT_SYMBOL(__dma_unmap_page);
392 392
393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
394 unsigned long off, size_t sz, enum dma_data_direction dir) 394 unsigned long off, size_t sz, enum dma_data_direction dir)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 7dfa9a85bc0c..4ddd0a6ac7ff 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -35,83 +35,92 @@
35 35
36static DEFINE_SPINLOCK(irq_controller_lock); 36static DEFINE_SPINLOCK(irq_controller_lock);
37 37
38/* Address of GIC 0 CPU interface */
39void __iomem *gic_cpu_base_addr __read_mostly;
40
38struct gic_chip_data { 41struct gic_chip_data {
39 unsigned int irq_offset; 42 unsigned int irq_offset;
40 void __iomem *dist_base; 43 void __iomem *dist_base;
41 void __iomem *cpu_base; 44 void __iomem *cpu_base;
42}; 45};
43 46
47/*
48 * Supported arch specific GIC irq extension.
49 * Default make them NULL.
50 */
51struct irq_chip gic_arch_extn = {
52 .irq_eoi = NULL,
53 .irq_mask = NULL,
54 .irq_unmask = NULL,
55 .irq_retrigger = NULL,
56 .irq_set_type = NULL,
57 .irq_set_wake = NULL,
58};
59
44#ifndef MAX_GIC_NR 60#ifndef MAX_GIC_NR
45#define MAX_GIC_NR 1 61#define MAX_GIC_NR 1
46#endif 62#endif
47 63
48static struct gic_chip_data gic_data[MAX_GIC_NR]; 64static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
49 65
50static inline void __iomem *gic_dist_base(unsigned int irq) 66static inline void __iomem *gic_dist_base(struct irq_data *d)
51{ 67{
52 struct gic_chip_data *gic_data = get_irq_chip_data(irq); 68 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
53 return gic_data->dist_base; 69 return gic_data->dist_base;
54} 70}
55 71
56static inline void __iomem *gic_cpu_base(unsigned int irq) 72static inline void __iomem *gic_cpu_base(struct irq_data *d)
57{ 73{
58 struct gic_chip_data *gic_data = get_irq_chip_data(irq); 74 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
59 return gic_data->cpu_base; 75 return gic_data->cpu_base;
60} 76}
61 77
62static inline unsigned int gic_irq(unsigned int irq) 78static inline unsigned int gic_irq(struct irq_data *d)
63{ 79{
64 struct gic_chip_data *gic_data = get_irq_chip_data(irq); 80 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
65 return irq - gic_data->irq_offset; 81 return d->irq - gic_data->irq_offset;
66} 82}
67 83
68/* 84/*
69 * Routines to acknowledge, disable and enable interrupts 85 * Routines to acknowledge, disable and enable interrupts
70 *
71 * Linux assumes that when we're done with an interrupt we need to
72 * unmask it, in the same way we need to unmask an interrupt when
73 * we first enable it.
74 *
75 * The GIC has a separate notion of "end of interrupt" to re-enable
76 * an interrupt after handling, in order to support hardware
77 * prioritisation.
78 *
79 * We can make the GIC behave in the way that Linux expects by making
80 * our "acknowledge" routine disable the interrupt, then mark it as
81 * complete.
82 */ 86 */
83static void gic_ack_irq(unsigned int irq) 87static void gic_mask_irq(struct irq_data *d)
84{ 88{
85 u32 mask = 1 << (irq % 32); 89 u32 mask = 1 << (d->irq % 32);
86 90
87 spin_lock(&irq_controller_lock); 91 spin_lock(&irq_controller_lock);
88 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4); 92 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
89 writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI); 93 if (gic_arch_extn.irq_mask)
94 gic_arch_extn.irq_mask(d);
90 spin_unlock(&irq_controller_lock); 95 spin_unlock(&irq_controller_lock);
91} 96}
92 97
93static void gic_mask_irq(unsigned int irq) 98static void gic_unmask_irq(struct irq_data *d)
94{ 99{
95 u32 mask = 1 << (irq % 32); 100 u32 mask = 1 << (d->irq % 32);
96 101
97 spin_lock(&irq_controller_lock); 102 spin_lock(&irq_controller_lock);
98 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4); 103 if (gic_arch_extn.irq_unmask)
104 gic_arch_extn.irq_unmask(d);
105 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
99 spin_unlock(&irq_controller_lock); 106 spin_unlock(&irq_controller_lock);
100} 107}
101 108
102static void gic_unmask_irq(unsigned int irq) 109static void gic_eoi_irq(struct irq_data *d)
103{ 110{
104 u32 mask = 1 << (irq % 32); 111 if (gic_arch_extn.irq_eoi) {
112 spin_lock(&irq_controller_lock);
113 gic_arch_extn.irq_eoi(d);
114 spin_unlock(&irq_controller_lock);
115 }
105 116
106 spin_lock(&irq_controller_lock); 117 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
107 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
108 spin_unlock(&irq_controller_lock);
109} 118}
110 119
111static int gic_set_type(unsigned int irq, unsigned int type) 120static int gic_set_type(struct irq_data *d, unsigned int type)
112{ 121{
113 void __iomem *base = gic_dist_base(irq); 122 void __iomem *base = gic_dist_base(d);
114 unsigned int gicirq = gic_irq(irq); 123 unsigned int gicirq = gic_irq(d);
115 u32 enablemask = 1 << (gicirq % 32); 124 u32 enablemask = 1 << (gicirq % 32);
116 u32 enableoff = (gicirq / 32) * 4; 125 u32 enableoff = (gicirq / 32) * 4;
117 u32 confmask = 0x2 << ((gicirq % 16) * 2); 126 u32 confmask = 0x2 << ((gicirq % 16) * 2);
@@ -128,7 +137,10 @@ static int gic_set_type(unsigned int irq, unsigned int type)
128 137
129 spin_lock(&irq_controller_lock); 138 spin_lock(&irq_controller_lock);
130 139
131 val = readl(base + GIC_DIST_CONFIG + confoff); 140 if (gic_arch_extn.irq_set_type)
141 gic_arch_extn.irq_set_type(d, type);
142
143 val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
132 if (type == IRQ_TYPE_LEVEL_HIGH) 144 if (type == IRQ_TYPE_LEVEL_HIGH)
133 val &= ~confmask; 145 val &= ~confmask;
134 else if (type == IRQ_TYPE_EDGE_RISING) 146 else if (type == IRQ_TYPE_EDGE_RISING)
@@ -138,52 +150,80 @@ static int gic_set_type(unsigned int irq, unsigned int type)
138 * As recommended by the spec, disable the interrupt before changing 150 * As recommended by the spec, disable the interrupt before changing
139 * the configuration 151 * the configuration
140 */ 152 */
141 if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) { 153 if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
142 writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff); 154 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
143 enabled = true; 155 enabled = true;
144 } 156 }
145 157
146 writel(val, base + GIC_DIST_CONFIG + confoff); 158 writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
147 159
148 if (enabled) 160 if (enabled)
149 writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); 161 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
150 162
151 spin_unlock(&irq_controller_lock); 163 spin_unlock(&irq_controller_lock);
152 164
153 return 0; 165 return 0;
154} 166}
155 167
168static int gic_retrigger(struct irq_data *d)
169{
170 if (gic_arch_extn.irq_retrigger)
171 return gic_arch_extn.irq_retrigger(d);
172
173 return -ENXIO;
174}
175
156#ifdef CONFIG_SMP 176#ifdef CONFIG_SMP
157static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val) 177static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
178 bool force)
158{ 179{
159 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3); 180 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
160 unsigned int shift = (irq % 4) * 8; 181 unsigned int shift = (d->irq % 4) * 8;
161 unsigned int cpu = cpumask_first(mask_val); 182 unsigned int cpu = cpumask_first(mask_val);
162 u32 val; 183 u32 val, mask, bit;
184
185 if (cpu >= 8)
186 return -EINVAL;
187
188 mask = 0xff << shift;
189 bit = 1 << (cpu + shift);
163 190
164 spin_lock(&irq_controller_lock); 191 spin_lock(&irq_controller_lock);
165 irq_desc[irq].node = cpu; 192 d->node = cpu;
166 val = readl(reg) & ~(0xff << shift); 193 val = readl_relaxed(reg) & ~mask;
167 val |= 1 << (cpu + shift); 194 writel_relaxed(val | bit, reg);
168 writel(val, reg);
169 spin_unlock(&irq_controller_lock); 195 spin_unlock(&irq_controller_lock);
170 196
171 return 0; 197 return 0;
172} 198}
173#endif 199#endif
174 200
201#ifdef CONFIG_PM
202static int gic_set_wake(struct irq_data *d, unsigned int on)
203{
204 int ret = -ENXIO;
205
206 if (gic_arch_extn.irq_set_wake)
207 ret = gic_arch_extn.irq_set_wake(d, on);
208
209 return ret;
210}
211
212#else
213#define gic_set_wake NULL
214#endif
215
175static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 216static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
176{ 217{
177 struct gic_chip_data *chip_data = get_irq_data(irq); 218 struct gic_chip_data *chip_data = irq_get_handler_data(irq);
178 struct irq_chip *chip = get_irq_chip(irq); 219 struct irq_chip *chip = irq_get_chip(irq);
179 unsigned int cascade_irq, gic_irq; 220 unsigned int cascade_irq, gic_irq;
180 unsigned long status; 221 unsigned long status;
181 222
182 /* primary controller ack'ing */ 223 chained_irq_enter(chip, desc);
183 chip->ack(irq);
184 224
185 spin_lock(&irq_controller_lock); 225 spin_lock(&irq_controller_lock);
186 status = readl(chip_data->cpu_base + GIC_CPU_INTACK); 226 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
187 spin_unlock(&irq_controller_lock); 227 spin_unlock(&irq_controller_lock);
188 228
189 gic_irq = (status & 0x3ff); 229 gic_irq = (status & 0x3ff);
@@ -197,107 +237,153 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
197 generic_handle_irq(cascade_irq); 237 generic_handle_irq(cascade_irq);
198 238
199 out: 239 out:
200 /* primary controller unmasking */ 240 chained_irq_exit(chip, desc);
201 chip->unmask(irq);
202} 241}
203 242
204static struct irq_chip gic_chip = { 243static struct irq_chip gic_chip = {
205 .name = "GIC", 244 .name = "GIC",
206 .ack = gic_ack_irq, 245 .irq_mask = gic_mask_irq,
207 .mask = gic_mask_irq, 246 .irq_unmask = gic_unmask_irq,
208 .unmask = gic_unmask_irq, 247 .irq_eoi = gic_eoi_irq,
209 .set_type = gic_set_type, 248 .irq_set_type = gic_set_type,
249 .irq_retrigger = gic_retrigger,
210#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
211 .set_affinity = gic_set_cpu, 251 .irq_set_affinity = gic_set_affinity,
212#endif 252#endif
253 .irq_set_wake = gic_set_wake,
213}; 254};
214 255
215void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 256void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
216{ 257{
217 if (gic_nr >= MAX_GIC_NR) 258 if (gic_nr >= MAX_GIC_NR)
218 BUG(); 259 BUG();
219 if (set_irq_data(irq, &gic_data[gic_nr]) != 0) 260 if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
220 BUG(); 261 BUG();
221 set_irq_chained_handler(irq, gic_handle_cascade_irq); 262 irq_set_chained_handler(irq, gic_handle_cascade_irq);
222} 263}
223 264
224void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, 265static void __init gic_dist_init(struct gic_chip_data *gic,
225 unsigned int irq_start) 266 unsigned int irq_start)
226{ 267{
227 unsigned int max_irq, i; 268 unsigned int gic_irqs, irq_limit, i;
269 void __iomem *base = gic->dist_base;
228 u32 cpumask = 1 << smp_processor_id(); 270 u32 cpumask = 1 << smp_processor_id();
229 271
230 if (gic_nr >= MAX_GIC_NR)
231 BUG();
232
233 cpumask |= cpumask << 8; 272 cpumask |= cpumask << 8;
234 cpumask |= cpumask << 16; 273 cpumask |= cpumask << 16;
235 274
236 gic_data[gic_nr].dist_base = base; 275 writel_relaxed(0, base + GIC_DIST_CTRL);
237 gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
238
239 writel(0, base + GIC_DIST_CTRL);
240 276
241 /* 277 /*
242 * Find out how many interrupts are supported. 278 * Find out how many interrupts are supported.
243 */
244 max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
245 max_irq = (max_irq + 1) * 32;
246
247 /*
248 * The GIC only supports up to 1020 interrupt sources. 279 * The GIC only supports up to 1020 interrupt sources.
249 * Limit this to either the architected maximum, or the
250 * platform maximum.
251 */ 280 */
252 if (max_irq > max(1020, NR_IRQS)) 281 gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
253 max_irq = max(1020, NR_IRQS); 282 gic_irqs = (gic_irqs + 1) * 32;
283 if (gic_irqs > 1020)
284 gic_irqs = 1020;
254 285
255 /* 286 /*
256 * Set all global interrupts to be level triggered, active low. 287 * Set all global interrupts to be level triggered, active low.
257 */ 288 */
258 for (i = 32; i < max_irq; i += 16) 289 for (i = 32; i < gic_irqs; i += 16)
259 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16); 290 writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
260 291
261 /* 292 /*
262 * Set all global interrupts to this CPU only. 293 * Set all global interrupts to this CPU only.
263 */ 294 */
264 for (i = 32; i < max_irq; i += 4) 295 for (i = 32; i < gic_irqs; i += 4)
265 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 296 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
297
298 /*
299 * Set priority on all global interrupts.
300 */
301 for (i = 32; i < gic_irqs; i += 4)
302 writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
266 303
267 /* 304 /*
268 * Set priority on all interrupts. 305 * Disable all interrupts. Leave the PPI and SGIs alone
306 * as these enables are banked registers.
269 */ 307 */
270 for (i = 0; i < max_irq; i += 4) 308 for (i = 32; i < gic_irqs; i += 32)
271 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); 309 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
272 310
273 /* 311 /*
274 * Disable all interrupts. 312 * Limit number of interrupts registered to the platform maximum
275 */ 313 */
276 for (i = 0; i < max_irq; i += 32) 314 irq_limit = gic->irq_offset + gic_irqs;
277 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 315 if (WARN_ON(irq_limit > NR_IRQS))
316 irq_limit = NR_IRQS;
278 317
279 /* 318 /*
280 * Setup the Linux IRQ subsystem. 319 * Setup the Linux IRQ subsystem.
281 */ 320 */
282 for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) { 321 for (i = irq_start; i < irq_limit; i++) {
283 set_irq_chip(i, &gic_chip); 322 irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
284 set_irq_chip_data(i, &gic_data[gic_nr]); 323 irq_set_chip_data(i, gic);
285 set_irq_handler(i, handle_level_irq);
286 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 324 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
287 } 325 }
288 326
289 writel(1, base + GIC_DIST_CTRL); 327 writel_relaxed(1, base + GIC_DIST_CTRL);
290} 328}
291 329
292void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base) 330static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
293{ 331{
294 if (gic_nr >= MAX_GIC_NR) 332 void __iomem *dist_base = gic->dist_base;
295 BUG(); 333 void __iomem *base = gic->cpu_base;
334 int i;
296 335
297 gic_data[gic_nr].cpu_base = base; 336 /*
337 * Deal with the banked PPI and SGI interrupts - disable all
338 * PPI interrupts, ensure all SGI interrupts are enabled.
339 */
340 writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
341 writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
342
343 /*
344 * Set priority on PPI and SGI interrupts
345 */
346 for (i = 0; i < 32; i += 4)
347 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
298 348
299 writel(0xf0, base + GIC_CPU_PRIMASK); 349 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
300 writel(1, base + GIC_CPU_CTRL); 350 writel_relaxed(1, base + GIC_CPU_CTRL);
351}
352
353void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
354 void __iomem *dist_base, void __iomem *cpu_base)
355{
356 struct gic_chip_data *gic;
357
358 BUG_ON(gic_nr >= MAX_GIC_NR);
359
360 gic = &gic_data[gic_nr];
361 gic->dist_base = dist_base;
362 gic->cpu_base = cpu_base;
363 gic->irq_offset = (irq_start - 1) & ~31;
364
365 if (gic_nr == 0)
366 gic_cpu_base_addr = cpu_base;
367
368 gic_dist_init(gic, irq_start);
369 gic_cpu_init(gic);
370}
371
372void __cpuinit gic_secondary_init(unsigned int gic_nr)
373{
374 BUG_ON(gic_nr >= MAX_GIC_NR);
375
376 gic_cpu_init(&gic_data[gic_nr]);
377}
378
379void __cpuinit gic_enable_ppi(unsigned int irq)
380{
381 unsigned long flags;
382
383 local_irq_save(flags);
384 irq_set_status_flags(irq, IRQ_NOPROBE);
385 gic_unmask_irq(irq_get_irq_data(irq));
386 local_irq_restore(flags);
301} 387}
302 388
303#ifdef CONFIG_SMP 389#ifdef CONFIG_SMP
@@ -305,7 +391,13 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
305{ 391{
306 unsigned long map = *cpus_addr(*mask); 392 unsigned long map = *cpus_addr(*mask);
307 393
394 /*
395 * Ensure that stores to Normal memory are visible to the
396 * other CPUs before issuing the IPI.
397 */
398 dsb();
399
308 /* this always happens on GIC0 */ 400 /* this always happens on GIC0 */
309 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT); 401 writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
310} 402}
311#endif 403#endif
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 9a7f09cff300..2dc6da70ae59 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -8,7 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 * 9 *
10 * Support functions for calculating clocks/divisors for the ICST307 10 * Support functions for calculating clocks/divisors for the ICST307
11 * clock generators. See http://www.icst.com/ for more information 11 * clock generators. See http://www.idt.com/ for more information
12 * on these devices. 12 * on these devices.
13 * 13 *
14 * This is an almost identical implementation to the ICST525 clock generator. 14 * This is an almost identical implementation to the ICST525 clock generator.
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 1bec96e85196..7a21927c52e1 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -31,8 +31,10 @@
31 31
32#define MAX_SLOTS 21 32#define MAX_SLOTS 21
33 33
34static void it8152_mask_irq(unsigned int irq) 34static void it8152_mask_irq(struct irq_data *d)
35{ 35{
36 unsigned int irq = d->irq;
37
36 if (irq >= IT8152_LD_IRQ(0)) { 38 if (irq >= IT8152_LD_IRQ(0)) {
37 __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) | 39 __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) |
38 (1 << (irq - IT8152_LD_IRQ(0)))), 40 (1 << (irq - IT8152_LD_IRQ(0)))),
@@ -48,8 +50,10 @@ static void it8152_mask_irq(unsigned int irq)
48 } 50 }
49} 51}
50 52
51static void it8152_unmask_irq(unsigned int irq) 53static void it8152_unmask_irq(struct irq_data *d)
52{ 54{
55 unsigned int irq = d->irq;
56
53 if (irq >= IT8152_LD_IRQ(0)) { 57 if (irq >= IT8152_LD_IRQ(0)) {
54 __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) & 58 __raw_writel((__raw_readl(IT8152_INTC_LDCNIMR) &
55 ~(1 << (irq - IT8152_LD_IRQ(0)))), 59 ~(1 << (irq - IT8152_LD_IRQ(0)))),
@@ -67,9 +71,9 @@ static void it8152_unmask_irq(unsigned int irq)
67 71
68static struct irq_chip it8152_irq_chip = { 72static struct irq_chip it8152_irq_chip = {
69 .name = "it8152", 73 .name = "it8152",
70 .ack = it8152_mask_irq, 74 .irq_ack = it8152_mask_irq,
71 .mask = it8152_mask_irq, 75 .irq_mask = it8152_mask_irq,
72 .unmask = it8152_unmask_irq, 76 .irq_unmask = it8152_unmask_irq,
73}; 77};
74 78
75void it8152_init_irq(void) 79void it8152_init_irq(void)
@@ -84,8 +88,8 @@ void it8152_init_irq(void)
84 __raw_writel((0), IT8152_INTC_LDCNIRR); 88 __raw_writel((0), IT8152_INTC_LDCNIRR);
85 89
86 for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) { 90 for (irq = IT8152_IRQ(0); irq <= IT8152_LAST_IRQ; irq++) {
87 set_irq_chip(irq, &it8152_irq_chip); 91 irq_set_chip_and_handler(irq, &it8152_irq_chip,
88 set_irq_handler(irq, handle_level_irq); 92 handle_level_irq);
89 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 93 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
90 } 94 }
91} 95}
@@ -236,7 +240,7 @@ static struct resource it8152_mem = {
236 240
237/* 241/*
238 * The following functions are needed for DMA bouncing. 242 * The following functions are needed for DMA bouncing.
239 * ITE8152 chip can addrees up to 64MByte, so all the devices 243 * ITE8152 chip can address up to 64MByte, so all the devices
240 * connected to ITE8152 (PCI and USB) should have limited DMA window 244 * connected to ITE8152 (PCI and USB) should have limited DMA window
241 */ 245 */
242 246
@@ -352,3 +356,4 @@ struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
352 return pci_scan_bus(nr, &it8152_ops, sys); 356 return pci_scan_bus(nr, &it8152_ops, sys);
353} 357}
354 358
359EXPORT_SYMBOL(dma_set_coherent_mask);
diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c
index 9dff07c80ddb..b55c3625d7ee 100644
--- a/arch/arm/common/locomo.c
+++ b/arch/arm/common/locomo.c
@@ -140,11 +140,11 @@ static struct locomo_dev_info locomo_devices[] = {
140 140
141static void locomo_handler(unsigned int irq, struct irq_desc *desc) 141static void locomo_handler(unsigned int irq, struct irq_desc *desc)
142{ 142{
143 struct locomo *lchip = get_irq_chip_data(irq); 143 struct locomo *lchip = irq_get_chip_data(irq);
144 int req, i; 144 int req, i;
145 145
146 /* Acknowledge the parent IRQ */ 146 /* Acknowledge the parent IRQ */
147 desc->chip->ack(irq); 147 desc->irq_data.chip->irq_ack(&desc->irq_data);
148 148
149 /* check why this interrupt was generated */ 149 /* check why this interrupt was generated */
150 req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00; 150 req = locomo_readl(lchip->base + LOCOMO_ICR) & 0x0f00;
@@ -161,33 +161,33 @@ static void locomo_handler(unsigned int irq, struct irq_desc *desc)
161 } 161 }
162} 162}
163 163
164static void locomo_ack_irq(unsigned int irq) 164static void locomo_ack_irq(struct irq_data *d)
165{ 165{
166} 166}
167 167
168static void locomo_mask_irq(unsigned int irq) 168static void locomo_mask_irq(struct irq_data *d)
169{ 169{
170 struct locomo *lchip = get_irq_chip_data(irq); 170 struct locomo *lchip = irq_data_get_irq_chip_data(d);
171 unsigned int r; 171 unsigned int r;
172 r = locomo_readl(lchip->base + LOCOMO_ICR); 172 r = locomo_readl(lchip->base + LOCOMO_ICR);
173 r &= ~(0x0010 << (irq - lchip->irq_base)); 173 r &= ~(0x0010 << (d->irq - lchip->irq_base));
174 locomo_writel(r, lchip->base + LOCOMO_ICR); 174 locomo_writel(r, lchip->base + LOCOMO_ICR);
175} 175}
176 176
177static void locomo_unmask_irq(unsigned int irq) 177static void locomo_unmask_irq(struct irq_data *d)
178{ 178{
179 struct locomo *lchip = get_irq_chip_data(irq); 179 struct locomo *lchip = irq_data_get_irq_chip_data(d);
180 unsigned int r; 180 unsigned int r;
181 r = locomo_readl(lchip->base + LOCOMO_ICR); 181 r = locomo_readl(lchip->base + LOCOMO_ICR);
182 r |= (0x0010 << (irq - lchip->irq_base)); 182 r |= (0x0010 << (d->irq - lchip->irq_base));
183 locomo_writel(r, lchip->base + LOCOMO_ICR); 183 locomo_writel(r, lchip->base + LOCOMO_ICR);
184} 184}
185 185
186static struct irq_chip locomo_chip = { 186static struct irq_chip locomo_chip = {
187 .name = "LOCOMO", 187 .name = "LOCOMO",
188 .ack = locomo_ack_irq, 188 .irq_ack = locomo_ack_irq,
189 .mask = locomo_mask_irq, 189 .irq_mask = locomo_mask_irq,
190 .unmask = locomo_unmask_irq, 190 .irq_unmask = locomo_unmask_irq,
191}; 191};
192 192
193static void locomo_setup_irq(struct locomo *lchip) 193static void locomo_setup_irq(struct locomo *lchip)
@@ -197,15 +197,14 @@ static void locomo_setup_irq(struct locomo *lchip)
197 /* 197 /*
198 * Install handler for IRQ_LOCOMO_HW. 198 * Install handler for IRQ_LOCOMO_HW.
199 */ 199 */
200 set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING); 200 irq_set_irq_type(lchip->irq, IRQ_TYPE_EDGE_FALLING);
201 set_irq_chip_data(lchip->irq, lchip); 201 irq_set_chip_data(lchip->irq, lchip);
202 set_irq_chained_handler(lchip->irq, locomo_handler); 202 irq_set_chained_handler(lchip->irq, locomo_handler);
203 203
204 /* Install handlers for IRQ_LOCOMO_* */ 204 /* Install handlers for IRQ_LOCOMO_* */
205 for ( ; irq <= lchip->irq_base + 3; irq++) { 205 for ( ; irq <= lchip->irq_base + 3; irq++) {
206 set_irq_chip(irq, &locomo_chip); 206 irq_set_chip_and_handler(irq, &locomo_chip, handle_level_irq);
207 set_irq_chip_data(irq, lchip); 207 irq_set_chip_data(irq, lchip);
208 set_irq_handler(irq, handle_level_irq);
209 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 208 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
210 } 209 }
211} 210}
@@ -476,8 +475,8 @@ static void __locomo_remove(struct locomo *lchip)
476 device_for_each_child(lchip->dev, NULL, locomo_remove_child); 475 device_for_each_child(lchip->dev, NULL, locomo_remove_child);
477 476
478 if (lchip->irq != NO_IRQ) { 477 if (lchip->irq != NO_IRQ) {
479 set_irq_chained_handler(lchip->irq, NULL); 478 irq_set_chained_handler(lchip->irq, NULL);
480 set_irq_data(lchip->irq, NULL); 479 irq_set_handler_data(lchip->irq, NULL);
481 } 480 }
482 481
483 iounmap(lchip->base); 482 iounmap(lchip->base);
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index 5ebbab6242a7..97912fa48782 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -146,8 +146,7 @@
146#define DESIGNER 0x41 146#define DESIGNER 0x41
147#define REVISION 0x0 147#define REVISION 0x0
148#define INTEG_CFG 0x0 148#define INTEG_CFG 0x0
149#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) \ 149#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
150 | (REVISION << 20) | (INTEG_CFG << 24))
151 150
152#define PCELL_ID_VAL 0xb105f00d 151#define PCELL_ID_VAL 0xb105f00d
153 152
@@ -1046,7 +1045,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
1046 unsigned lcnt0, lcnt1, ljmp0, ljmp1; 1045 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1047 struct _arg_LPEND lpend; 1046 struct _arg_LPEND lpend;
1048 1047
1049 /* Max iterations possibile in DMALP is 256 */ 1048 /* Max iterations possible in DMALP is 256 */
1050 if (*bursts >= 256*256) { 1049 if (*bursts >= 256*256) {
1051 lcnt1 = 256; 1050 lcnt1 = 256;
1052 lcnt0 = 256; 1051 lcnt0 = 256;
@@ -1447,7 +1446,7 @@ int pl330_update(const struct pl330_info *pi)
1447 } 1446 }
1448 1447
1449 for (ev = 0; ev < pi->pcfg.num_events; ev++) { 1448 for (ev = 0; ev < pi->pcfg.num_events; ev++) {
1450 if (val & (1 << ev)) { /* Event occured */ 1449 if (val & (1 << ev)) { /* Event occurred */
1451 struct pl330_thread *thrd; 1450 struct pl330_thread *thrd;
1452 u32 inten = readl(regs + INTEN); 1451 u32 inten = readl(regs + INTEN);
1453 int active; 1452 int active;
@@ -1859,10 +1858,10 @@ int pl330_add(struct pl330_info *pi)
1859 regs = pi->base; 1858 regs = pi->base;
1860 1859
1861 /* Check if we can handle this DMAC */ 1860 /* Check if we can handle this DMAC */
1862 if (get_id(pi, PERIPH_ID) != PERIPH_ID_VAL 1861 if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL
1863 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { 1862 || get_id(pi, PCELL_ID) != PCELL_ID_VAL) {
1864 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", 1863 dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1865 readl(regs + PERIPH_ID), readl(regs + PCELL_ID)); 1864 get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID));
1866 return -EINVAL; 1865 return -EINVAL;
1867 } 1866 }
1868 1867
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 517d50ddbeb3..9c49a46a2b7a 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -185,14 +185,6 @@ static struct sa1111_dev_info sa1111_devices[] = {
185 }, 185 },
186}; 186};
187 187
188void __init sa1111_adjust_zones(unsigned long *size, unsigned long *holes)
189{
190 unsigned int sz = SZ_1M >> PAGE_SHIFT;
191
192 size[1] = size[0] - sz;
193 size[0] = sz;
194}
195
196/* 188/*
197 * SA1111 interrupt support. Since clearing an IRQ while there are 189 * SA1111 interrupt support. Since clearing an IRQ while there are
198 * active IRQs causes the interrupt output to pulse, the upper levels 190 * active IRQs causes the interrupt output to pulse, the upper levels
@@ -202,7 +194,7 @@ static void
202sa1111_irq_handler(unsigned int irq, struct irq_desc *desc) 194sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
203{ 195{
204 unsigned int stat0, stat1, i; 196 unsigned int stat0, stat1, i;
205 struct sa1111 *sachip = get_irq_data(irq); 197 struct sa1111 *sachip = irq_get_handler_data(irq);
206 void __iomem *mapbase = sachip->base + SA1111_INTC; 198 void __iomem *mapbase = sachip->base + SA1111_INTC;
207 199
208 stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0); 200 stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
@@ -210,7 +202,7 @@ sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
210 202
211 sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0); 203 sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
212 204
213 desc->chip->ack(irq); 205 desc->irq_data.chip->irq_ack(&desc->irq_data);
214 206
215 sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1); 207 sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
216 208
@@ -228,35 +220,35 @@ sa1111_irq_handler(unsigned int irq, struct irq_desc *desc)
228 generic_handle_irq(i + sachip->irq_base); 220 generic_handle_irq(i + sachip->irq_base);
229 221
230 /* For level-based interrupts */ 222 /* For level-based interrupts */
231 desc->chip->unmask(irq); 223 desc->irq_data.chip->irq_unmask(&desc->irq_data);
232} 224}
233 225
234#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base)) 226#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
235#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32)) 227#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
236 228
237static void sa1111_ack_irq(unsigned int irq) 229static void sa1111_ack_irq(struct irq_data *d)
238{ 230{
239} 231}
240 232
241static void sa1111_mask_lowirq(unsigned int irq) 233static void sa1111_mask_lowirq(struct irq_data *d)
242{ 234{
243 struct sa1111 *sachip = get_irq_chip_data(irq); 235 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
244 void __iomem *mapbase = sachip->base + SA1111_INTC; 236 void __iomem *mapbase = sachip->base + SA1111_INTC;
245 unsigned long ie0; 237 unsigned long ie0;
246 238
247 ie0 = sa1111_readl(mapbase + SA1111_INTEN0); 239 ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
248 ie0 &= ~SA1111_IRQMASK_LO(irq); 240 ie0 &= ~SA1111_IRQMASK_LO(d->irq);
249 writel(ie0, mapbase + SA1111_INTEN0); 241 writel(ie0, mapbase + SA1111_INTEN0);
250} 242}
251 243
252static void sa1111_unmask_lowirq(unsigned int irq) 244static void sa1111_unmask_lowirq(struct irq_data *d)
253{ 245{
254 struct sa1111 *sachip = get_irq_chip_data(irq); 246 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
255 void __iomem *mapbase = sachip->base + SA1111_INTC; 247 void __iomem *mapbase = sachip->base + SA1111_INTC;
256 unsigned long ie0; 248 unsigned long ie0;
257 249
258 ie0 = sa1111_readl(mapbase + SA1111_INTEN0); 250 ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
259 ie0 |= SA1111_IRQMASK_LO(irq); 251 ie0 |= SA1111_IRQMASK_LO(d->irq);
260 sa1111_writel(ie0, mapbase + SA1111_INTEN0); 252 sa1111_writel(ie0, mapbase + SA1111_INTEN0);
261} 253}
262 254
@@ -267,11 +259,11 @@ static void sa1111_unmask_lowirq(unsigned int irq)
267 * be triggered. In fact, its very difficult, if not impossible to get 259 * be triggered. In fact, its very difficult, if not impossible to get
268 * INTSET to re-trigger the interrupt. 260 * INTSET to re-trigger the interrupt.
269 */ 261 */
270static int sa1111_retrigger_lowirq(unsigned int irq) 262static int sa1111_retrigger_lowirq(struct irq_data *d)
271{ 263{
272 struct sa1111 *sachip = get_irq_chip_data(irq); 264 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
273 void __iomem *mapbase = sachip->base + SA1111_INTC; 265 void __iomem *mapbase = sachip->base + SA1111_INTC;
274 unsigned int mask = SA1111_IRQMASK_LO(irq); 266 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
275 unsigned long ip0; 267 unsigned long ip0;
276 int i; 268 int i;
277 269
@@ -279,21 +271,21 @@ static int sa1111_retrigger_lowirq(unsigned int irq)
279 for (i = 0; i < 8; i++) { 271 for (i = 0; i < 8; i++) {
280 sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0); 272 sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
281 sa1111_writel(ip0, mapbase + SA1111_INTPOL0); 273 sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
282 if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask) 274 if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
283 break; 275 break;
284 } 276 }
285 277
286 if (i == 8) 278 if (i == 8)
287 printk(KERN_ERR "Danger Will Robinson: failed to " 279 printk(KERN_ERR "Danger Will Robinson: failed to "
288 "re-trigger IRQ%d\n", irq); 280 "re-trigger IRQ%d\n", d->irq);
289 return i == 8 ? -1 : 0; 281 return i == 8 ? -1 : 0;
290} 282}
291 283
292static int sa1111_type_lowirq(unsigned int irq, unsigned int flags) 284static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
293{ 285{
294 struct sa1111 *sachip = get_irq_chip_data(irq); 286 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
295 void __iomem *mapbase = sachip->base + SA1111_INTC; 287 void __iomem *mapbase = sachip->base + SA1111_INTC;
296 unsigned int mask = SA1111_IRQMASK_LO(irq); 288 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
297 unsigned long ip0; 289 unsigned long ip0;
298 290
299 if (flags == IRQ_TYPE_PROBE) 291 if (flags == IRQ_TYPE_PROBE)
@@ -313,11 +305,11 @@ static int sa1111_type_lowirq(unsigned int irq, unsigned int flags)
313 return 0; 305 return 0;
314} 306}
315 307
316static int sa1111_wake_lowirq(unsigned int irq, unsigned int on) 308static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
317{ 309{
318 struct sa1111 *sachip = get_irq_chip_data(irq); 310 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
319 void __iomem *mapbase = sachip->base + SA1111_INTC; 311 void __iomem *mapbase = sachip->base + SA1111_INTC;
320 unsigned int mask = SA1111_IRQMASK_LO(irq); 312 unsigned int mask = SA1111_IRQMASK_LO(d->irq);
321 unsigned long we0; 313 unsigned long we0;
322 314
323 we0 = sa1111_readl(mapbase + SA1111_WAKEEN0); 315 we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
@@ -332,33 +324,33 @@ static int sa1111_wake_lowirq(unsigned int irq, unsigned int on)
332 324
333static struct irq_chip sa1111_low_chip = { 325static struct irq_chip sa1111_low_chip = {
334 .name = "SA1111-l", 326 .name = "SA1111-l",
335 .ack = sa1111_ack_irq, 327 .irq_ack = sa1111_ack_irq,
336 .mask = sa1111_mask_lowirq, 328 .irq_mask = sa1111_mask_lowirq,
337 .unmask = sa1111_unmask_lowirq, 329 .irq_unmask = sa1111_unmask_lowirq,
338 .retrigger = sa1111_retrigger_lowirq, 330 .irq_retrigger = sa1111_retrigger_lowirq,
339 .set_type = sa1111_type_lowirq, 331 .irq_set_type = sa1111_type_lowirq,
340 .set_wake = sa1111_wake_lowirq, 332 .irq_set_wake = sa1111_wake_lowirq,
341}; 333};
342 334
343static void sa1111_mask_highirq(unsigned int irq) 335static void sa1111_mask_highirq(struct irq_data *d)
344{ 336{
345 struct sa1111 *sachip = get_irq_chip_data(irq); 337 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
346 void __iomem *mapbase = sachip->base + SA1111_INTC; 338 void __iomem *mapbase = sachip->base + SA1111_INTC;
347 unsigned long ie1; 339 unsigned long ie1;
348 340
349 ie1 = sa1111_readl(mapbase + SA1111_INTEN1); 341 ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
350 ie1 &= ~SA1111_IRQMASK_HI(irq); 342 ie1 &= ~SA1111_IRQMASK_HI(d->irq);
351 sa1111_writel(ie1, mapbase + SA1111_INTEN1); 343 sa1111_writel(ie1, mapbase + SA1111_INTEN1);
352} 344}
353 345
354static void sa1111_unmask_highirq(unsigned int irq) 346static void sa1111_unmask_highirq(struct irq_data *d)
355{ 347{
356 struct sa1111 *sachip = get_irq_chip_data(irq); 348 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
357 void __iomem *mapbase = sachip->base + SA1111_INTC; 349 void __iomem *mapbase = sachip->base + SA1111_INTC;
358 unsigned long ie1; 350 unsigned long ie1;
359 351
360 ie1 = sa1111_readl(mapbase + SA1111_INTEN1); 352 ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
361 ie1 |= SA1111_IRQMASK_HI(irq); 353 ie1 |= SA1111_IRQMASK_HI(d->irq);
362 sa1111_writel(ie1, mapbase + SA1111_INTEN1); 354 sa1111_writel(ie1, mapbase + SA1111_INTEN1);
363} 355}
364 356
@@ -369,11 +361,11 @@ static void sa1111_unmask_highirq(unsigned int irq)
369 * be triggered. In fact, its very difficult, if not impossible to get 361 * be triggered. In fact, its very difficult, if not impossible to get
370 * INTSET to re-trigger the interrupt. 362 * INTSET to re-trigger the interrupt.
371 */ 363 */
372static int sa1111_retrigger_highirq(unsigned int irq) 364static int sa1111_retrigger_highirq(struct irq_data *d)
373{ 365{
374 struct sa1111 *sachip = get_irq_chip_data(irq); 366 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
375 void __iomem *mapbase = sachip->base + SA1111_INTC; 367 void __iomem *mapbase = sachip->base + SA1111_INTC;
376 unsigned int mask = SA1111_IRQMASK_HI(irq); 368 unsigned int mask = SA1111_IRQMASK_HI(d->irq);
377 unsigned long ip1; 369 unsigned long ip1;
378 int i; 370 int i;
379 371
@@ -387,15 +379,15 @@ static int sa1111_retrigger_highirq(unsigned int irq)
387 379
388 if (i == 8) 380 if (i == 8)
389 printk(KERN_ERR "Danger Will Robinson: failed to " 381 printk(KERN_ERR "Danger Will Robinson: failed to "
390 "re-trigger IRQ%d\n", irq); 382 "re-trigger IRQ%d\n", d->irq);
391 return i == 8 ? -1 : 0; 383 return i == 8 ? -1 : 0;
392} 384}
393 385
394static int sa1111_type_highirq(unsigned int irq, unsigned int flags) 386static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
395{ 387{
396 struct sa1111 *sachip = get_irq_chip_data(irq); 388 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
397 void __iomem *mapbase = sachip->base + SA1111_INTC; 389 void __iomem *mapbase = sachip->base + SA1111_INTC;
398 unsigned int mask = SA1111_IRQMASK_HI(irq); 390 unsigned int mask = SA1111_IRQMASK_HI(d->irq);
399 unsigned long ip1; 391 unsigned long ip1;
400 392
401 if (flags == IRQ_TYPE_PROBE) 393 if (flags == IRQ_TYPE_PROBE)
@@ -415,11 +407,11 @@ static int sa1111_type_highirq(unsigned int irq, unsigned int flags)
415 return 0; 407 return 0;
416} 408}
417 409
418static int sa1111_wake_highirq(unsigned int irq, unsigned int on) 410static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
419{ 411{
420 struct sa1111 *sachip = get_irq_chip_data(irq); 412 struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
421 void __iomem *mapbase = sachip->base + SA1111_INTC; 413 void __iomem *mapbase = sachip->base + SA1111_INTC;
422 unsigned int mask = SA1111_IRQMASK_HI(irq); 414 unsigned int mask = SA1111_IRQMASK_HI(d->irq);
423 unsigned long we1; 415 unsigned long we1;
424 416
425 we1 = sa1111_readl(mapbase + SA1111_WAKEEN1); 417 we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
@@ -434,12 +426,12 @@ static int sa1111_wake_highirq(unsigned int irq, unsigned int on)
434 426
435static struct irq_chip sa1111_high_chip = { 427static struct irq_chip sa1111_high_chip = {
436 .name = "SA1111-h", 428 .name = "SA1111-h",
437 .ack = sa1111_ack_irq, 429 .irq_ack = sa1111_ack_irq,
438 .mask = sa1111_mask_highirq, 430 .irq_mask = sa1111_mask_highirq,
439 .unmask = sa1111_unmask_highirq, 431 .irq_unmask = sa1111_unmask_highirq,
440 .retrigger = sa1111_retrigger_highirq, 432 .irq_retrigger = sa1111_retrigger_highirq,
441 .set_type = sa1111_type_highirq, 433 .irq_set_type = sa1111_type_highirq,
442 .set_wake = sa1111_wake_highirq, 434 .irq_set_wake = sa1111_wake_highirq,
443}; 435};
444 436
445static void sa1111_setup_irq(struct sa1111 *sachip) 437static void sa1111_setup_irq(struct sa1111 *sachip)
@@ -472,25 +464,25 @@ static void sa1111_setup_irq(struct sa1111 *sachip)
472 sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1); 464 sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
473 465
474 for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) { 466 for (irq = IRQ_GPAIN0; irq <= SSPROR; irq++) {
475 set_irq_chip(irq, &sa1111_low_chip); 467 irq_set_chip_and_handler(irq, &sa1111_low_chip,
476 set_irq_chip_data(irq, sachip); 468 handle_edge_irq);
477 set_irq_handler(irq, handle_edge_irq); 469 irq_set_chip_data(irq, sachip);
478 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 470 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
479 } 471 }
480 472
481 for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) { 473 for (irq = AUDXMTDMADONEA; irq <= IRQ_S1_BVD1_STSCHG; irq++) {
482 set_irq_chip(irq, &sa1111_high_chip); 474 irq_set_chip_and_handler(irq, &sa1111_high_chip,
483 set_irq_chip_data(irq, sachip); 475 handle_edge_irq);
484 set_irq_handler(irq, handle_edge_irq); 476 irq_set_chip_data(irq, sachip);
485 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 477 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
486 } 478 }
487 479
488 /* 480 /*
489 * Register SA1111 interrupt 481 * Register SA1111 interrupt
490 */ 482 */
491 set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING); 483 irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
492 set_irq_data(sachip->irq, sachip); 484 irq_set_handler_data(sachip->irq, sachip);
493 set_irq_chained_handler(sachip->irq, sa1111_irq_handler); 485 irq_set_chained_handler(sachip->irq, sa1111_irq_handler);
494} 486}
495 487
496/* 488/*
@@ -678,7 +670,7 @@ out:
678 * %-EBUSY physical address already marked in-use. 670 * %-EBUSY physical address already marked in-use.
679 * %0 successful. 671 * %0 successful.
680 */ 672 */
681static int 673static int __devinit
682__sa1111_probe(struct device *me, struct resource *mem, int irq) 674__sa1111_probe(struct device *me, struct resource *mem, int irq)
683{ 675{
684 struct sa1111 *sachip; 676 struct sa1111 *sachip;
@@ -815,8 +807,8 @@ static void __sa1111_remove(struct sa1111 *sachip)
815 clk_disable(sachip->clk); 807 clk_disable(sachip->clk);
816 808
817 if (sachip->irq != NO_IRQ) { 809 if (sachip->irq != NO_IRQ) {
818 set_irq_chained_handler(sachip->irq, NULL); 810 irq_set_chained_handler(sachip->irq, NULL);
819 set_irq_data(sachip->irq, NULL); 811 irq_set_handler_data(sachip->irq, NULL);
820 812
821 release_mem_region(sachip->phys + SA1111_INTC, 512); 813 release_mem_region(sachip->phys + SA1111_INTC, 512);
822 } 814 }
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 9012004321dd..c11af1e4bad3 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -44,12 +44,12 @@ void reset_scoop(struct device *dev)
44{ 44{
45 struct scoop_dev *sdev = dev_get_drvdata(dev); 45 struct scoop_dev *sdev = dev_get_drvdata(dev);
46 46
47 iowrite16(0x0100, sdev->base + SCOOP_MCR); // 00 47 iowrite16(0x0100, sdev->base + SCOOP_MCR); /* 00 */
48 iowrite16(0x0000, sdev->base + SCOOP_CDR); // 04 48 iowrite16(0x0000, sdev->base + SCOOP_CDR); /* 04 */
49 iowrite16(0x0000, sdev->base + SCOOP_CCR); // 10 49 iowrite16(0x0000, sdev->base + SCOOP_CCR); /* 10 */
50 iowrite16(0x0000, sdev->base + SCOOP_IMR); // 18 50 iowrite16(0x0000, sdev->base + SCOOP_IMR); /* 18 */
51 iowrite16(0x00FF, sdev->base + SCOOP_IRM); // 14 51 iowrite16(0x00FF, sdev->base + SCOOP_IRM); /* 14 */
52 iowrite16(0x0000, sdev->base + SCOOP_ISR); // 1C 52 iowrite16(0x0000, sdev->base + SCOOP_ISR); /* 1C */
53 iowrite16(0x0000, sdev->base + SCOOP_IRM); 53 iowrite16(0x0000, sdev->base + SCOOP_IRM);
54} 54}
55 55
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
new file mode 100644
index 000000000000..41df47875122
--- /dev/null
+++ b/arch/arm/common/timer-sp.c
@@ -0,0 +1,172 @@
1/*
2 * linux/arch/arm/common/timer-sp.c
3 *
4 * Copyright (C) 1999 - 2003 ARM Limited
5 * Copyright (C) 2000 Deep Blue Solutions Ltd
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21#include <linux/clk.h>
22#include <linux/clocksource.h>
23#include <linux/clockchips.h>
24#include <linux/err.h>
25#include <linux/interrupt.h>
26#include <linux/irq.h>
27#include <linux/io.h>
28
29#include <asm/hardware/arm_timer.h>
30
31static long __init sp804_get_clock_rate(const char *name)
32{
33 struct clk *clk;
34 long rate;
35 int err;
36
37 clk = clk_get_sys("sp804", name);
38 if (IS_ERR(clk)) {
39 pr_err("sp804: %s clock not found: %d\n", name,
40 (int)PTR_ERR(clk));
41 return PTR_ERR(clk);
42 }
43
44 err = clk_enable(clk);
45 if (err) {
46 pr_err("sp804: %s clock failed to enable: %d\n", name, err);
47 clk_put(clk);
48 return err;
49 }
50
51 rate = clk_get_rate(clk);
52 if (rate < 0) {
53 pr_err("sp804: %s clock failed to get rate: %ld\n", name, rate);
54 clk_disable(clk);
55 clk_put(clk);
56 }
57
58 return rate;
59}
60
61void __init sp804_clocksource_init(void __iomem *base, const char *name)
62{
63 long rate = sp804_get_clock_rate(name);
64
65 if (rate < 0)
66 return;
67
68 /* setup timer 0 as free-running clocksource */
69 writel(0, base + TIMER_CTRL);
70 writel(0xffffffff, base + TIMER_LOAD);
71 writel(0xffffffff, base + TIMER_VALUE);
72 writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
73 base + TIMER_CTRL);
74
75 clocksource_mmio_init(base + TIMER_VALUE, name,
76 rate, 200, 32, clocksource_mmio_readl_down);
77}
78
79
80static void __iomem *clkevt_base;
81static unsigned long clkevt_reload;
82
83/*
84 * IRQ handler for the timer
85 */
86static irqreturn_t sp804_timer_interrupt(int irq, void *dev_id)
87{
88 struct clock_event_device *evt = dev_id;
89
90 /* clear the interrupt */
91 writel(1, clkevt_base + TIMER_INTCLR);
92
93 evt->event_handler(evt);
94
95 return IRQ_HANDLED;
96}
97
98static void sp804_set_mode(enum clock_event_mode mode,
99 struct clock_event_device *evt)
100{
101 unsigned long ctrl = TIMER_CTRL_32BIT | TIMER_CTRL_IE;
102
103 writel(ctrl, clkevt_base + TIMER_CTRL);
104
105 switch (mode) {
106 case CLOCK_EVT_MODE_PERIODIC:
107 writel(clkevt_reload, clkevt_base + TIMER_LOAD);
108 ctrl |= TIMER_CTRL_PERIODIC | TIMER_CTRL_ENABLE;
109 break;
110
111 case CLOCK_EVT_MODE_ONESHOT:
112 /* period set, and timer enabled in 'next_event' hook */
113 ctrl |= TIMER_CTRL_ONESHOT;
114 break;
115
116 case CLOCK_EVT_MODE_UNUSED:
117 case CLOCK_EVT_MODE_SHUTDOWN:
118 default:
119 break;
120 }
121
122 writel(ctrl, clkevt_base + TIMER_CTRL);
123}
124
125static int sp804_set_next_event(unsigned long next,
126 struct clock_event_device *evt)
127{
128 unsigned long ctrl = readl(clkevt_base + TIMER_CTRL);
129
130 writel(next, clkevt_base + TIMER_LOAD);
131 writel(ctrl | TIMER_CTRL_ENABLE, clkevt_base + TIMER_CTRL);
132
133 return 0;
134}
135
136static struct clock_event_device sp804_clockevent = {
137 .shift = 32,
138 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
139 .set_mode = sp804_set_mode,
140 .set_next_event = sp804_set_next_event,
141 .rating = 300,
142 .cpumask = cpu_all_mask,
143};
144
145static struct irqaction sp804_timer_irq = {
146 .name = "timer",
147 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
148 .handler = sp804_timer_interrupt,
149 .dev_id = &sp804_clockevent,
150};
151
152void __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
153 const char *name)
154{
155 struct clock_event_device *evt = &sp804_clockevent;
156 long rate = sp804_get_clock_rate(name);
157
158 if (rate < 0)
159 return;
160
161 clkevt_base = base;
162 clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
163
164 evt->name = name;
165 evt->irq = irq;
166 evt->mult = div_sc(rate, NSEC_PER_SEC, evt->shift);
167 evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
168 evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
169
170 setup_irq(irq, &sp804_timer_irq);
171 clockevents_register_device(evt);
172}
diff --git a/arch/arm/common/uengine.c b/arch/arm/common/uengine.c
index b520e56216a9..bef408f3d76c 100644
--- a/arch/arm/common/uengine.c
+++ b/arch/arm/common/uengine.c
@@ -312,16 +312,16 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
312 b1 = (gpr_a[i] >> 8) & 0xff; 312 b1 = (gpr_a[i] >> 8) & 0xff;
313 b0 = gpr_a[i] & 0xff; 313 b0 = gpr_a[i] & 0xff;
314 314
315 // immed[@ai, (b1 << 8) | b0] 315 /* immed[@ai, (b1 << 8) | b0] */
316 // 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII 316 /* 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII */
317 ucode[offset++] = 0xf0; 317 ucode[offset++] = 0xf0;
318 ucode[offset++] = (b1 >> 4); 318 ucode[offset++] = (b1 >> 4);
319 ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6); 319 ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6);
320 ucode[offset++] = (b0 << 2); 320 ucode[offset++] = (b0 << 2);
321 ucode[offset++] = 0x80 | i; 321 ucode[offset++] = 0x80 | i;
322 322
323 // immed_w1[@ai, (b3 << 8) | b2] 323 /* immed_w1[@ai, (b3 << 8) | b2] */
324 // 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII 324 /* 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII */
325 ucode[offset++] = 0xf4; 325 ucode[offset++] = 0xf4;
326 ucode[offset++] = 0x40 | (b3 >> 4); 326 ucode[offset++] = 0x40 | (b3 >> 4);
327 ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6); 327 ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6);
@@ -340,16 +340,16 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
340 b1 = (gpr_b[i] >> 8) & 0xff; 340 b1 = (gpr_b[i] >> 8) & 0xff;
341 b0 = gpr_b[i] & 0xff; 341 b0 = gpr_b[i] & 0xff;
342 342
343 // immed[@bi, (b1 << 8) | b0] 343 /* immed[@bi, (b1 << 8) | b0] */
344 // 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV 344 /* 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV */
345 ucode[offset++] = 0xf0; 345 ucode[offset++] = 0xf0;
346 ucode[offset++] = (b1 >> 4); 346 ucode[offset++] = (b1 >> 4);
347 ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6); 347 ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6);
348 ucode[offset++] = (i << 2) | 0x03; 348 ucode[offset++] = (i << 2) | 0x03;
349 ucode[offset++] = b0; 349 ucode[offset++] = b0;
350 350
351 // immed_w1[@bi, (b3 << 8) | b2] 351 /* immed_w1[@bi, (b3 << 8) | b2] */
352 // 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV 352 /* 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV */
353 ucode[offset++] = 0xf4; 353 ucode[offset++] = 0xf4;
354 ucode[offset++] = 0x40 | (b3 >> 4); 354 ucode[offset++] = 0x40 | (b3 >> 4);
355 ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6); 355 ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6);
@@ -357,7 +357,7 @@ static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b)
357 ucode[offset++] = b2; 357 ucode[offset++] = b2;
358 } 358 }
359 359
360 // ctx_arb[kill] 360 /* ctx_arb[kill] */
361 ucode[offset++] = 0xe0; 361 ucode[offset++] = 0xe0;
362 ucode[offset++] = 0x00; 362 ucode[offset++] = 0x00;
363 ucode[offset++] = 0x01; 363 ucode[offset++] = 0x01;
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index ba65f6eedca6..7aa4262ada7a 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -22,17 +22,16 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/sysdev.h> 25#include <linux/syscore_ops.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28 28
29#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
30#include <asm/hardware/vic.h> 30#include <asm/hardware/vic.h>
31 31
32#if defined(CONFIG_PM) 32#ifdef CONFIG_PM
33/** 33/**
34 * struct vic_device - VIC PM device 34 * struct vic_device - VIC PM device
35 * @sysdev: The system device which is registered.
36 * @irq: The IRQ number for the base of the VIC. 35 * @irq: The IRQ number for the base of the VIC.
37 * @base: The register base for the VIC. 36 * @base: The register base for the VIC.
38 * @resume_sources: A bitmask of interrupts for resume. 37 * @resume_sources: A bitmask of interrupts for resume.
@@ -43,8 +42,6 @@
43 * @protect: Save for VIC_PROTECT. 42 * @protect: Save for VIC_PROTECT.
44 */ 43 */
45struct vic_device { 44struct vic_device {
46 struct sys_device sysdev;
47
48 void __iomem *base; 45 void __iomem *base;
49 int irq; 46 int irq;
50 u32 resume_sources; 47 u32 resume_sources;
@@ -59,18 +56,13 @@ struct vic_device {
59static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; 56static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
60 57
61static int vic_id; 58static int vic_id;
62
63static inline struct vic_device *to_vic(struct sys_device *sys)
64{
65 return container_of(sys, struct vic_device, sysdev);
66}
67#endif /* CONFIG_PM */ 59#endif /* CONFIG_PM */
68 60
69/** 61/**
70 * vic_init2 - common initialisation code 62 * vic_init2 - common initialisation code
71 * @base: Base of the VIC. 63 * @base: Base of the VIC.
72 * 64 *
73 * Common initialisation code for registeration 65 * Common initialisation code for registration
74 * and resume. 66 * and resume.
75*/ 67*/
76static void vic_init2(void __iomem *base) 68static void vic_init2(void __iomem *base)
@@ -85,10 +77,9 @@ static void vic_init2(void __iomem *base)
85 writel(32, base + VIC_PL190_DEF_VECT_ADDR); 77 writel(32, base + VIC_PL190_DEF_VECT_ADDR);
86} 78}
87 79
88#if defined(CONFIG_PM) 80#ifdef CONFIG_PM
89static int vic_class_resume(struct sys_device *dev) 81static void resume_one_vic(struct vic_device *vic)
90{ 82{
91 struct vic_device *vic = to_vic(dev);
92 void __iomem *base = vic->base; 83 void __iomem *base = vic->base;
93 84
94 printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base); 85 printk(KERN_DEBUG "%s: resuming vic at %p\n", __func__, base);
@@ -107,13 +98,18 @@ static int vic_class_resume(struct sys_device *dev)
107 98
108 writel(vic->soft_int, base + VIC_INT_SOFT); 99 writel(vic->soft_int, base + VIC_INT_SOFT);
109 writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR); 100 writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
101}
110 102
111 return 0; 103static void vic_resume(void)
104{
105 int id;
106
107 for (id = vic_id - 1; id >= 0; id--)
108 resume_one_vic(vic_devices + id);
112} 109}
113 110
114static int vic_class_suspend(struct sys_device *dev, pm_message_t state) 111static void suspend_one_vic(struct vic_device *vic)
115{ 112{
116 struct vic_device *vic = to_vic(dev);
117 void __iomem *base = vic->base; 113 void __iomem *base = vic->base;
118 114
119 printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base); 115 printk(KERN_DEBUG "%s: suspending vic at %p\n", __func__, base);
@@ -128,14 +124,21 @@ static int vic_class_suspend(struct sys_device *dev, pm_message_t state)
128 124
129 writel(vic->resume_irqs, base + VIC_INT_ENABLE); 125 writel(vic->resume_irqs, base + VIC_INT_ENABLE);
130 writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR); 126 writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
127}
128
129static int vic_suspend(void)
130{
131 int id;
132
133 for (id = 0; id < vic_id; id++)
134 suspend_one_vic(vic_devices + id);
131 135
132 return 0; 136 return 0;
133} 137}
134 138
135struct sysdev_class vic_class = { 139struct syscore_ops vic_syscore_ops = {
136 .name = "vic", 140 .suspend = vic_suspend,
137 .suspend = vic_class_suspend, 141 .resume = vic_resume,
138 .resume = vic_class_resume,
139}; 142};
140 143
141/** 144/**
@@ -147,30 +150,8 @@ struct sysdev_class vic_class = {
147*/ 150*/
148static int __init vic_pm_init(void) 151static int __init vic_pm_init(void)
149{ 152{
150 struct vic_device *dev = vic_devices; 153 if (vic_id > 0)
151 int err; 154 register_syscore_ops(&vic_syscore_ops);
152 int id;
153
154 if (vic_id == 0)
155 return 0;
156
157 err = sysdev_class_register(&vic_class);
158 if (err) {
159 printk(KERN_ERR "%s: cannot register class\n", __func__);
160 return err;
161 }
162
163 for (id = 0; id < vic_id; id++, dev++) {
164 dev->sysdev.id = id;
165 dev->sysdev.cls = &vic_class;
166
167 err = sysdev_register(&dev->sysdev);
168 if (err) {
169 printk(KERN_ERR "%s: failed to register device\n",
170 __func__);
171 return err;
172 }
173 }
174 155
175 return 0; 156 return 0;
176} 157}
@@ -204,26 +185,26 @@ static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 res
204static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { } 185static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
205#endif /* CONFIG_PM */ 186#endif /* CONFIG_PM */
206 187
207static void vic_ack_irq(unsigned int irq) 188static void vic_ack_irq(struct irq_data *d)
208{ 189{
209 void __iomem *base = get_irq_chip_data(irq); 190 void __iomem *base = irq_data_get_irq_chip_data(d);
210 irq &= 31; 191 unsigned int irq = d->irq & 31;
211 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); 192 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
212 /* moreover, clear the soft-triggered, in case it was the reason */ 193 /* moreover, clear the soft-triggered, in case it was the reason */
213 writel(1 << irq, base + VIC_INT_SOFT_CLEAR); 194 writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
214} 195}
215 196
216static void vic_mask_irq(unsigned int irq) 197static void vic_mask_irq(struct irq_data *d)
217{ 198{
218 void __iomem *base = get_irq_chip_data(irq); 199 void __iomem *base = irq_data_get_irq_chip_data(d);
219 irq &= 31; 200 unsigned int irq = d->irq & 31;
220 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR); 201 writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
221} 202}
222 203
223static void vic_unmask_irq(unsigned int irq) 204static void vic_unmask_irq(struct irq_data *d)
224{ 205{
225 void __iomem *base = get_irq_chip_data(irq); 206 void __iomem *base = irq_data_get_irq_chip_data(d);
226 irq &= 31; 207 unsigned int irq = d->irq & 31;
227 writel(1 << irq, base + VIC_INT_ENABLE); 208 writel(1 << irq, base + VIC_INT_ENABLE);
228} 209}
229 210
@@ -242,10 +223,10 @@ static struct vic_device *vic_from_irq(unsigned int irq)
242 return NULL; 223 return NULL;
243} 224}
244 225
245static int vic_set_wake(unsigned int irq, unsigned int on) 226static int vic_set_wake(struct irq_data *d, unsigned int on)
246{ 227{
247 struct vic_device *v = vic_from_irq(irq); 228 struct vic_device *v = vic_from_irq(d->irq);
248 unsigned int off = irq & 31; 229 unsigned int off = d->irq & 31;
249 u32 bit = 1 << off; 230 u32 bit = 1 << off;
250 231
251 if (!v) 232 if (!v)
@@ -267,10 +248,10 @@ static int vic_set_wake(unsigned int irq, unsigned int on)
267 248
268static struct irq_chip vic_chip = { 249static struct irq_chip vic_chip = {
269 .name = "VIC", 250 .name = "VIC",
270 .ack = vic_ack_irq, 251 .irq_ack = vic_ack_irq,
271 .mask = vic_mask_irq, 252 .irq_mask = vic_mask_irq,
272 .unmask = vic_unmask_irq, 253 .irq_unmask = vic_unmask_irq,
273 .set_wake = vic_set_wake, 254 .irq_set_wake = vic_set_wake,
274}; 255};
275 256
276static void __init vic_disable(void __iomem *base) 257static void __init vic_disable(void __iomem *base)
@@ -305,9 +286,9 @@ static void __init vic_set_irq_sources(void __iomem *base,
305 if (vic_sources & (1 << i)) { 286 if (vic_sources & (1 << i)) {
306 unsigned int irq = irq_start + i; 287 unsigned int irq = irq_start + i;
307 288
308 set_irq_chip(irq, &vic_chip); 289 irq_set_chip_and_handler(irq, &vic_chip,
309 set_irq_chip_data(irq, base); 290 handle_level_irq);
310 set_irq_handler(irq, handle_level_irq); 291 irq_set_chip_data(irq, base);
311 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 292 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
312 } 293 }
313 } 294 }