aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/atomic.h26
-rw-r--r--arch/arm/include/asm/cache.h2
-rw-r--r--arch/arm/include/asm/cputype.h10
-rw-r--r--arch/arm/include/asm/mach/mmc.h17
-rw-r--r--arch/arm/include/asm/tcm.h31
-rw-r--r--arch/arm/include/asm/unified.h4
6 files changed, 52 insertions, 38 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 9ed2377fe8e5..d0daeab2234e 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -19,31 +19,21 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21 21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
22#define atomic_read(v) ((v)->counter) 27#define atomic_read(v) ((v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
23 29
24#if __LINUX_ARM_ARCH__ >= 6 30#if __LINUX_ARM_ARCH__ >= 6
25 31
26/* 32/*
27 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and 33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
28 * store exclusive to ensure that these are atomic. We may loop 34 * store exclusive to ensure that these are atomic. We may loop
29 * to ensure that the update happens. Writing to 'v->counter' 35 * to ensure that the update happens.
30 * without using the following operations WILL break the atomic
31 * nature of these ops.
32 */ 36 */
33static inline void atomic_set(atomic_t *v, int i)
34{
35 unsigned long tmp;
36
37 __asm__ __volatile__("@ atomic_set\n"
38"1: ldrex %0, [%1]\n"
39" strex %0, %2, [%1]\n"
40" teq %0, #0\n"
41" bne 1b"
42 : "=&r" (tmp)
43 : "r" (&v->counter), "r" (i)
44 : "cc");
45}
46
47static inline void atomic_add(int i, atomic_t *v) 37static inline void atomic_add(int i, atomic_t *v)
48{ 38{
49 unsigned long tmp; 39 unsigned long tmp;
@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
163#error SMP not supported on pre-ARMv6 CPUs 153#error SMP not supported on pre-ARMv6 CPUs
164#endif 154#endif
165 155
166#define atomic_set(v,i) (((v)->counter) = (i))
167
168static inline int atomic_add_return(int i, atomic_t *v) 156static inline int atomic_add_return(int i, atomic_t *v)
169{ 157{
170 unsigned long flags; 158 unsigned long flags;
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index feaa75f0013e..66c160b8547f 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -4,7 +4,7 @@
4#ifndef __ASMARM_CACHE_H 4#ifndef __ASMARM_CACHE_H
5#define __ASMARM_CACHE_H 5#define __ASMARM_CACHE_H
6 6
7#define L1_CACHE_SHIFT 5 7#define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 8#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9 9
10/* 10/*
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b3e656c6fb78..20ae96cc0020 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -63,6 +63,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
63 return read_cpuid(CPUID_CACHETYPE); 63 return read_cpuid(CPUID_CACHETYPE);
64} 64}
65 65
66static inline unsigned int __attribute_const__ read_cpuid_tcmstatus(void)
67{
68 return read_cpuid(CPUID_TCM);
69}
70
66/* 71/*
67 * Intel's XScale3 core supports some v6 features (supersections, L2) 72 * Intel's XScale3 core supports some v6 features (supersections, L2)
68 * but advertises itself as v5 as it does not support the v6 ISA. For 73 * but advertises itself as v5 as it does not support the v6 ISA. For
@@ -73,7 +78,10 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
73#else 78#else
74static inline int cpu_is_xsc3(void) 79static inline int cpu_is_xsc3(void)
75{ 80{
76 if ((read_cpuid_id() & 0xffffe000) == 0x69056000) 81 unsigned int id;
82 id = read_cpuid_id() & 0xffffe000;
83 /* It covers both Intel ID and Marvell ID */
84 if ((id == 0x69056000) || (id == 0x56056000))
77 return 1; 85 return 1;
78 86
79 return 0; 87 return 0;
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
deleted file mode 100644
index b490ecc79def..000000000000
--- a/arch/arm/include/asm/mach/mmc.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * arch/arm/include/asm/mach/mmc.h
3 */
4#ifndef ASMARM_MACH_MMC_H
5#define ASMARM_MACH_MMC_H
6
7#include <linux/mmc/host.h>
8
9struct mmc_platform_data {
10 unsigned int ocr_mask; /* available voltages */
11 u32 (*translate_vdd)(struct device *, unsigned int);
12 unsigned int (*status)(struct device *);
13 int gpio_wp;
14 int gpio_cd;
15};
16
17#endif
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
new file mode 100644
index 000000000000..5929ef5d927a
--- /dev/null
+++ b/arch/arm/include/asm/tcm.h
@@ -0,0 +1,31 @@
1/*
2 *
3 * Copyright (C) 2008-2009 ST-Ericsson AB
4 * License terms: GNU General Public License (GPL) version 2
5 *
6 * Author: Rickard Andersson <rickard.andersson@stericsson.com>
7 * Author: Linus Walleij <linus.walleij@stericsson.com>
8 *
9 */
10#ifndef __ASMARM_TCM_H
11#define __ASMARM_TCM_H
12
13#ifndef CONFIG_HAVE_TCM
14#error "You should not be including tcm.h unless you have a TCM!"
15#endif
16
17#include <linux/compiler.h>
18
19/* Tag variables with this */
20#define __tcmdata __section(.tcm.data)
21/* Tag constants with this */
22#define __tcmconst __section(.tcm.rodata)
23/* Tag functions inside TCM called from outside TCM with this */
24#define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline
25/* Tag function inside TCM called from inside TCM with this */
26#define __tcmlocalfunc __section(.tcm.text)
27
28void *tcm_alloc(size_t len);
29void tcm_free(void *addr, size_t len);
30
31#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index 073e85b9b961..bc631161e9c6 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -35,7 +35,9 @@
35 35
36#define ARM(x...) 36#define ARM(x...)
37#define THUMB(x...) x 37#define THUMB(x...) x
38#ifdef __ASSEMBLY__
38#define W(instr) instr.w 39#define W(instr) instr.w
40#endif
39#define BSYM(sym) sym + 1 41#define BSYM(sym) sym + 1
40 42
41#else /* !CONFIG_THUMB2_KERNEL */ 43#else /* !CONFIG_THUMB2_KERNEL */
@@ -45,7 +47,9 @@
45 47
46#define ARM(x...) x 48#define ARM(x...) x
47#define THUMB(x...) 49#define THUMB(x...)
50#ifdef __ASSEMBLY__
48#define W(instr) instr 51#define W(instr) instr
52#endif
49#define BSYM(sym) sym 53#define BSYM(sym) sym
50 54
51#endif /* CONFIG_THUMB2_KERNEL */ 55#endif /* CONFIG_THUMB2_KERNEL */