aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-19 16:18:39 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-19 16:18:39 -0500
commit59b8175c771040afcd4ad67022b0cc80c216b866 (patch)
tree4ef5935bee1e342716d49b9d4b99e3fa835526e6 /arch/arm/common
parent920841d8d1d61bc12b43f95a579a5374f6d98f81 (diff)
parent3b0eb4a195a124567cd0dd6f700f8388def542c6 (diff)
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (117 commits) [ARM] 4058/2: iop32x: set ->broken_parity_status on n2100 onboard r8169 ports [ARM] 4140/1: AACI stability add ac97 timeout and retries [ARM] 4139/1: AACI record support [ARM] 4138/1: AACI: multiple channel support for IRQ handling [ARM] 4211/1: Provide a defconfig for ns9xxx [ARM] 4210/1: base for new machine type "NetSilicon NS9360" [ARM] 4222/1: S3C2443: Remove reference to missing S3C2443_PM [ARM] 4221/1: S3C2443: DMA support [ARM] 4220/1: S3C24XX: DMA system initialised from sysdev [ARM] 4219/1: S3C2443: DMA source definitions [ARM] 4218/1: S3C2412: fix CONFIG_CPU_S3C2412_ONLY wrt to S3C2443 [ARM] 4217/1: S3C24XX: remove the dma channel show at startup [ARM] 4090/2: avoid clash between PXA and SA1111 defines [ARM] 4216/1: add .gitignore entries for ARM specific files [ARM] 4214/2: S3C2410: Add Armzone QT2410 [ARM] 4215/1: s3c2410 usb device: per-platform vbus_draw [ARM] 4213/1: S3C2410 - Update definition of ADCTSC_XY_PST [ARM] 4098/1: ARM: rtc_lock only used with rtc_cmos [ARM] 4137/1: Add kexec support [ARM] 4201/1: SMP barriers pair needed for the secondary boot process ... Fix up conflict due to typedef removal in sound/arm/aaci.h
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/dmabounce.c87
-rw-r--r--arch/arm/common/gic.c109
2 files changed, 131 insertions, 65 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 2e635b814c14..6fbe7722aa44 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -32,7 +32,6 @@
32 32
33#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
34 34
35#undef DEBUG
36#undef STATS 35#undef STATS
37 36
38#ifdef STATS 37#ifdef STATS
@@ -66,14 +65,13 @@ struct dmabounce_pool {
66}; 65};
67 66
68struct dmabounce_device_info { 67struct dmabounce_device_info {
69 struct list_head node;
70
71 struct device *dev; 68 struct device *dev;
72 struct list_head safe_buffers; 69 struct list_head safe_buffers;
73#ifdef STATS 70#ifdef STATS
74 unsigned long total_allocs; 71 unsigned long total_allocs;
75 unsigned long map_op_count; 72 unsigned long map_op_count;
76 unsigned long bounce_count; 73 unsigned long bounce_count;
74 int attr_res;
77#endif 75#endif
78 struct dmabounce_pool small; 76 struct dmabounce_pool small;
79 struct dmabounce_pool large; 77 struct dmabounce_pool large;
@@ -81,33 +79,23 @@ struct dmabounce_device_info {
81 rwlock_t lock; 79 rwlock_t lock;
82}; 80};
83 81
84static LIST_HEAD(dmabounce_devs);
85
86#ifdef STATS 82#ifdef STATS
87static void print_alloc_stats(struct dmabounce_device_info *device_info) 83static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
84 char *buf)
88{ 85{
89 printk(KERN_INFO 86 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
90 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", 87 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
91 device_info->dev->bus_id, 88 device_info->small.allocs,
92 device_info->small.allocs, device_info->large.allocs, 89 device_info->large.allocs,
93 device_info->total_allocs - device_info->small.allocs - 90 device_info->total_allocs - device_info->small.allocs -
94 device_info->large.allocs, 91 device_info->large.allocs,
95 device_info->total_allocs); 92 device_info->total_allocs,
93 device_info->map_op_count,
94 device_info->bounce_count);
96} 95}
97#endif
98
99/* find the given device in the dmabounce device list */
100static inline struct dmabounce_device_info *
101find_dmabounce_dev(struct device *dev)
102{
103 struct dmabounce_device_info *d;
104 96
105 list_for_each_entry(d, &dmabounce_devs, node) 97static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
106 if (d->dev == dev) 98#endif
107 return d;
108
109 return NULL;
110}
111 99
112 100
113/* allocate a 'safe' buffer and keep track of it */ 101/* allocate a 'safe' buffer and keep track of it */
@@ -162,8 +150,6 @@ alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
162 if (pool) 150 if (pool)
163 pool->allocs++; 151 pool->allocs++;
164 device_info->total_allocs++; 152 device_info->total_allocs++;
165 if (device_info->total_allocs % 1000 == 0)
166 print_alloc_stats(device_info);
167#endif 153#endif
168 154
169 write_lock_irqsave(&device_info->lock, flags); 155 write_lock_irqsave(&device_info->lock, flags);
@@ -218,20 +204,11 @@ free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *
218 204
219/* ************************************************** */ 205/* ************************************************** */
220 206
221#ifdef STATS
222static void print_map_stats(struct dmabounce_device_info *device_info)
223{
224 dev_info(device_info->dev,
225 "dmabounce: map_op_count=%lu, bounce_count=%lu\n",
226 device_info->map_op_count, device_info->bounce_count);
227}
228#endif
229
230static inline dma_addr_t 207static inline dma_addr_t
231map_single(struct device *dev, void *ptr, size_t size, 208map_single(struct device *dev, void *ptr, size_t size,
232 enum dma_data_direction dir) 209 enum dma_data_direction dir)
233{ 210{
234 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 211 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
235 dma_addr_t dma_addr; 212 dma_addr_t dma_addr;
236 int needs_bounce = 0; 213 int needs_bounce = 0;
237 214
@@ -281,10 +258,14 @@ map_single(struct device *dev, void *ptr, size_t size,
281 ptr = buf->safe; 258 ptr = buf->safe;
282 259
283 dma_addr = buf->safe_dma_addr; 260 dma_addr = buf->safe_dma_addr;
261 } else {
262 /*
263 * We don't need to sync the DMA buffer since
264 * it was allocated via the coherent allocators.
265 */
266 consistent_sync(ptr, size, dir);
284 } 267 }
285 268
286 consistent_sync(ptr, size, dir);
287
288 return dma_addr; 269 return dma_addr;
289} 270}
290 271
@@ -292,7 +273,7 @@ static inline void
292unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 273unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
293 enum dma_data_direction dir) 274 enum dma_data_direction dir)
294{ 275{
295 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 276 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
296 struct safe_buffer *buf = NULL; 277 struct safe_buffer *buf = NULL;
297 278
298 /* 279 /*
@@ -317,12 +298,12 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
317 DO_STATS ( device_info->bounce_count++ ); 298 DO_STATS ( device_info->bounce_count++ );
318 299
319 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 300 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
320 unsigned long ptr; 301 void *ptr = buf->ptr;
321 302
322 dev_dbg(dev, 303 dev_dbg(dev,
323 "%s: copy back safe %p to unsafe %p size %d\n", 304 "%s: copy back safe %p to unsafe %p size %d\n",
324 __func__, buf->safe, buf->ptr, size); 305 __func__, buf->safe, ptr, size);
325 memcpy(buf->ptr, buf->safe, size); 306 memcpy(ptr, buf->safe, size);
326 307
327 /* 308 /*
328 * DMA buffers must have the same cache properties 309 * DMA buffers must have the same cache properties
@@ -332,8 +313,8 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
332 * bidirectional case because we know the cache 313 * bidirectional case because we know the cache
333 * lines will be coherent with the data written. 314 * lines will be coherent with the data written.
334 */ 315 */
335 ptr = (unsigned long)buf->ptr;
336 dmac_clean_range(ptr, ptr + size); 316 dmac_clean_range(ptr, ptr + size);
317 outer_clean_range(__pa(ptr), __pa(ptr) + size);
337 } 318 }
338 free_safe_buffer(device_info, buf); 319 free_safe_buffer(device_info, buf);
339 } 320 }
@@ -343,7 +324,7 @@ static inline void
343sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, 324sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
344 enum dma_data_direction dir) 325 enum dma_data_direction dir)
345{ 326{
346 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 327 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
347 struct safe_buffer *buf = NULL; 328 struct safe_buffer *buf = NULL;
348 329
349 if (device_info) 330 if (device_info)
@@ -397,7 +378,10 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
397 default: 378 default:
398 BUG(); 379 BUG();
399 } 380 }
400 consistent_sync(buf->safe, size, dir); 381 /*
382 * No need to sync the safe buffer - it was allocated
383 * via the coherent allocators.
384 */
401 } else { 385 } else {
402 consistent_sync(dma_to_virt(dev, dma_addr), size, dir); 386 consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
403 } 387 }
@@ -604,9 +588,10 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
604 device_info->total_allocs = 0; 588 device_info->total_allocs = 0;
605 device_info->map_op_count = 0; 589 device_info->map_op_count = 0;
606 device_info->bounce_count = 0; 590 device_info->bounce_count = 0;
591 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
607#endif 592#endif
608 593
609 list_add(&device_info->node, &dmabounce_devs); 594 dev->archdata.dmabounce = device_info;
610 595
611 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", 596 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
612 dev->bus_id, dev->bus->name); 597 dev->bus_id, dev->bus->name);
@@ -623,7 +608,9 @@ dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
623void 608void
624dmabounce_unregister_dev(struct device *dev) 609dmabounce_unregister_dev(struct device *dev)
625{ 610{
626 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); 611 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
612
613 dev->archdata.dmabounce = NULL;
627 614
628 if (!device_info) { 615 if (!device_info) {
629 printk(KERN_WARNING 616 printk(KERN_WARNING
@@ -645,12 +632,10 @@ dmabounce_unregister_dev(struct device *dev)
645 dma_pool_destroy(device_info->large.pool); 632 dma_pool_destroy(device_info->large.pool);
646 633
647#ifdef STATS 634#ifdef STATS
648 print_alloc_stats(device_info); 635 if (device_info->attr_res == 0)
649 print_map_stats(device_info); 636 device_remove_file(dev, &dev_attr_dmabounce_stats);
650#endif 637#endif
651 638
652 list_del(&device_info->node);
653
654 kfree(device_info); 639 kfree(device_info);
655 640
656 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", 641 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 09b9d1b6844c..4deece5fbdf4 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -14,7 +14,9 @@
14 * 14 *
15 * o There is one CPU Interface per CPU, which sends interrupts sent 15 * o There is one CPU Interface per CPU, which sends interrupts sent
16 * by the Distributor, and interrupts generated locally, to the 16 * by the Distributor, and interrupts generated locally, to the
17 * associated CPU. 17 * associated CPU. The base address of the CPU interface is usually
18 * aliased so that the same address points to different chips depending
19 * on the CPU it is accessed from.
18 * 20 *
19 * Note that IRQs 0-31 are special - they are local to each CPU. 21 * Note that IRQs 0-31 are special - they are local to each CPU.
20 * As such, the enable set/clear, pending set/clear and active bit 22 * As such, the enable set/clear, pending set/clear and active bit
@@ -31,10 +33,38 @@
31#include <asm/mach/irq.h> 33#include <asm/mach/irq.h>
32#include <asm/hardware/gic.h> 34#include <asm/hardware/gic.h>
33 35
34static void __iomem *gic_dist_base;
35static void __iomem *gic_cpu_base;
36static DEFINE_SPINLOCK(irq_controller_lock); 36static DEFINE_SPINLOCK(irq_controller_lock);
37 37
38struct gic_chip_data {
39 unsigned int irq_offset;
40 void __iomem *dist_base;
41 void __iomem *cpu_base;
42};
43
44#ifndef MAX_GIC_NR
45#define MAX_GIC_NR 1
46#endif
47
48static struct gic_chip_data gic_data[MAX_GIC_NR];
49
50static inline void __iomem *gic_dist_base(unsigned int irq)
51{
52 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
53 return gic_data->dist_base;
54}
55
56static inline void __iomem *gic_cpu_base(unsigned int irq)
57{
58 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
59 return gic_data->cpu_base;
60}
61
62static inline unsigned int gic_irq(unsigned int irq)
63{
64 struct gic_chip_data *gic_data = get_irq_chip_data(irq);
65 return irq - gic_data->irq_offset;
66}
67
38/* 68/*
39 * Routines to acknowledge, disable and enable interrupts 69 * Routines to acknowledge, disable and enable interrupts
40 * 70 *
@@ -55,8 +85,8 @@ static void gic_ack_irq(unsigned int irq)
55 u32 mask = 1 << (irq % 32); 85 u32 mask = 1 << (irq % 32);
56 86
57 spin_lock(&irq_controller_lock); 87 spin_lock(&irq_controller_lock);
58 writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4); 88 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
59 writel(irq, gic_cpu_base + GIC_CPU_EOI); 89 writel(gic_irq(irq), gic_cpu_base(irq) + GIC_CPU_EOI);
60 spin_unlock(&irq_controller_lock); 90 spin_unlock(&irq_controller_lock);
61} 91}
62 92
@@ -65,7 +95,7 @@ static void gic_mask_irq(unsigned int irq)
65 u32 mask = 1 << (irq % 32); 95 u32 mask = 1 << (irq % 32);
66 96
67 spin_lock(&irq_controller_lock); 97 spin_lock(&irq_controller_lock);
68 writel(mask, gic_dist_base + GIC_DIST_ENABLE_CLEAR + (irq / 32) * 4); 98 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_CLEAR + (gic_irq(irq) / 32) * 4);
69 spin_unlock(&irq_controller_lock); 99 spin_unlock(&irq_controller_lock);
70} 100}
71 101
@@ -74,14 +104,14 @@ static void gic_unmask_irq(unsigned int irq)
74 u32 mask = 1 << (irq % 32); 104 u32 mask = 1 << (irq % 32);
75 105
76 spin_lock(&irq_controller_lock); 106 spin_lock(&irq_controller_lock);
77 writel(mask, gic_dist_base + GIC_DIST_ENABLE_SET + (irq / 32) * 4); 107 writel(mask, gic_dist_base(irq) + GIC_DIST_ENABLE_SET + (gic_irq(irq) / 32) * 4);
78 spin_unlock(&irq_controller_lock); 108 spin_unlock(&irq_controller_lock);
79} 109}
80 110
81#ifdef CONFIG_SMP 111#ifdef CONFIG_SMP
82static void gic_set_cpu(unsigned int irq, cpumask_t mask_val) 112static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
83{ 113{
84 void __iomem *reg = gic_dist_base + GIC_DIST_TARGET + (irq & ~3); 114 void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
85 unsigned int shift = (irq % 4) * 8; 115 unsigned int shift = (irq % 4) * 8;
86 unsigned int cpu = first_cpu(mask_val); 116 unsigned int cpu = first_cpu(mask_val);
87 u32 val; 117 u32 val;
@@ -95,6 +125,37 @@ static void gic_set_cpu(unsigned int irq, cpumask_t mask_val)
95} 125}
96#endif 126#endif
97 127
128static void fastcall gic_handle_cascade_irq(unsigned int irq,
129 struct irq_desc *desc)
130{
131 struct gic_chip_data *chip_data = get_irq_data(irq);
132 struct irq_chip *chip = get_irq_chip(irq);
133 unsigned int cascade_irq;
134 unsigned long status;
135
136 /* primary controller ack'ing */
137 chip->ack(irq);
138
139 spin_lock(&irq_controller_lock);
140 status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
141 spin_unlock(&irq_controller_lock);
142
143 cascade_irq = (status & 0x3ff);
144 if (cascade_irq > 1020)
145 goto out;
146 if (cascade_irq < 32 || cascade_irq >= NR_IRQS) {
147 do_bad_IRQ(cascade_irq, desc);
148 goto out;
149 }
150
151 cascade_irq += chip_data->irq_offset;
152 generic_handle_irq(cascade_irq);
153
154 out:
155 /* primary controller unmasking */
156 chip->unmask(irq);
157}
158
98static struct irq_chip gic_chip = { 159static struct irq_chip gic_chip = {
99 .name = "GIC", 160 .name = "GIC",
100 .ack = gic_ack_irq, 161 .ack = gic_ack_irq,
@@ -105,15 +166,29 @@ static struct irq_chip gic_chip = {
105#endif 166#endif
106}; 167};
107 168
108void __init gic_dist_init(void __iomem *base) 169void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
170{
171 if (gic_nr >= MAX_GIC_NR)
172 BUG();
173 if (set_irq_data(irq, &gic_data[gic_nr]) != 0)
174 BUG();
175 set_irq_chained_handler(irq, gic_handle_cascade_irq);
176}
177
178void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
179 unsigned int irq_start)
109{ 180{
110 unsigned int max_irq, i; 181 unsigned int max_irq, i;
111 u32 cpumask = 1 << smp_processor_id(); 182 u32 cpumask = 1 << smp_processor_id();
112 183
184 if (gic_nr >= MAX_GIC_NR)
185 BUG();
186
113 cpumask |= cpumask << 8; 187 cpumask |= cpumask << 8;
114 cpumask |= cpumask << 16; 188 cpumask |= cpumask << 16;
115 189
116 gic_dist_base = base; 190 gic_data[gic_nr].dist_base = base;
191 gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
117 192
118 writel(0, base + GIC_DIST_CTRL); 193 writel(0, base + GIC_DIST_CTRL);
119 194
@@ -158,8 +233,9 @@ void __init gic_dist_init(void __iomem *base)
158 /* 233 /*
159 * Setup the Linux IRQ subsystem. 234 * Setup the Linux IRQ subsystem.
160 */ 235 */
161 for (i = 29; i < max_irq; i++) { 236 for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) {
162 set_irq_chip(i, &gic_chip); 237 set_irq_chip(i, &gic_chip);
238 set_irq_chip_data(i, &gic_data[gic_nr]);
163 set_irq_handler(i, handle_level_irq); 239 set_irq_handler(i, handle_level_irq);
164 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 240 set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
165 } 241 }
@@ -167,9 +243,13 @@ void __init gic_dist_init(void __iomem *base)
167 writel(1, base + GIC_DIST_CTRL); 243 writel(1, base + GIC_DIST_CTRL);
168} 244}
169 245
170void __cpuinit gic_cpu_init(void __iomem *base) 246void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
171{ 247{
172 gic_cpu_base = base; 248 if (gic_nr >= MAX_GIC_NR)
249 BUG();
250
251 gic_data[gic_nr].cpu_base = base;
252
173 writel(0xf0, base + GIC_CPU_PRIMASK); 253 writel(0xf0, base + GIC_CPU_PRIMASK);
174 writel(1, base + GIC_CPU_CTRL); 254 writel(1, base + GIC_CPU_CTRL);
175} 255}
@@ -179,6 +259,7 @@ void gic_raise_softirq(cpumask_t cpumask, unsigned int irq)
179{ 259{
180 unsigned long map = *cpus_addr(cpumask); 260 unsigned long map = *cpus_addr(cpumask);
181 261
182 writel(map << 16 | irq, gic_dist_base + GIC_DIST_SOFTINT); 262 /* this always happens on GIC0 */
263 writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
183} 264}
184#endif 265#endif