aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/cacheflush.h22
-rw-r--r--arch/arm/include/asm/dma-mapping.h26
-rw-r--r--arch/arm/include/asm/hardware/cache-tauros2.h11
-rw-r--r--arch/arm/include/asm/hardware/coresight.h165
-rw-r--r--arch/arm/include/asm/hardware/iop3xx.h18
-rw-r--r--arch/arm/include/asm/memory.h16
-rw-r--r--arch/arm/include/asm/pgtable.h14
-rw-r--r--arch/arm/include/asm/swab.h19
-rw-r--r--arch/arm/include/asm/system.h19
9 files changed, 275 insertions, 35 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 3d0cdd21b882..9fd6d3ab68c0 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
331 * Convert calls to our calling convention. 331 * Convert calls to our calling convention.
332 */ 332 */
333#define flush_cache_all() __cpuc_flush_kern_all() 333#define flush_cache_all() __cpuc_flush_kern_all()
334#ifndef CONFIG_CPU_CACHE_VIPT 334
335static inline void flush_cache_mm(struct mm_struct *mm) 335static inline void vivt_flush_cache_mm(struct mm_struct *mm)
336{ 336{
337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 337 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338 __cpuc_flush_user_all(); 338 __cpuc_flush_user_all();
339} 339}
340 340
341static inline void 341static inline void
342flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 342vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
343{ 343{
344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 344 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), 345 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
347} 347}
348 348
349static inline void 349static inline void
350flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 350vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
351{ 351{
352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 352 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
353 unsigned long addr = user_addr & PAGE_MASK; 353 unsigned long addr = user_addr & PAGE_MASK;
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
356} 356}
357 357
358static inline void 358static inline void
359flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 359vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360 unsigned long uaddr, void *kaddr, 360 unsigned long uaddr, void *kaddr,
361 unsigned long len, int write) 361 unsigned long len, int write)
362{ 362{
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
365 __cpuc_coherent_kern_range(addr, addr + len); 365 __cpuc_coherent_kern_range(addr, addr + len);
366 } 366 }
367} 367}
368
369#ifndef CONFIG_CPU_CACHE_VIPT
370#define flush_cache_mm(mm) \
371 vivt_flush_cache_mm(mm)
372#define flush_cache_range(vma,start,end) \
373 vivt_flush_cache_range(vma,start,end)
374#define flush_cache_page(vma,addr,pfn) \
375 vivt_flush_cache_page(vma,addr,pfn)
376#define flush_ptrace_access(vma,page,ua,ka,len,write) \
377 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
368#else 378#else
369extern void flush_cache_mm(struct mm_struct *mm); 379extern void flush_cache_mm(struct mm_struct *mm);
370extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 380extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -410,8 +420,6 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
410 */ 420 */
411extern void flush_dcache_page(struct page *); 421extern void flush_dcache_page(struct page *);
412 422
413extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
414
415static inline void __flush_icache_all(void) 423static inline void __flush_icache_all(void)
416{ 424{
417#ifdef CONFIG_ARM_ERRATA_411920 425#ifdef CONFIG_ARM_ERRATA_411920
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index ff46dfa68a97..a96300bf83fd 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -15,20 +15,15 @@
15 * must not be used by drivers. 15 * must not be used by drivers.
16 */ 16 */
17#ifndef __arch_page_to_dma 17#ifndef __arch_page_to_dma
18
19#if !defined(CONFIG_HIGHMEM)
20static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 18static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
21{ 19{
22 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); 20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
23} 21}
24#elif defined(__pfn_to_bus) 22
25static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) 23static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
26{ 24{
27 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page)); 25 return pfn_to_page(__bus_to_pfn(addr));
28} 26}
29#else
30#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
31#endif
32 27
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 28static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{ 29{
@@ -45,6 +40,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
45 return __arch_page_to_dma(dev, page); 40 return __arch_page_to_dma(dev, page);
46} 41}
47 42
43static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
44{
45 return __arch_dma_to_page(dev, addr);
46}
47
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) 48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{ 49{
50 return __arch_dma_to_virt(dev, addr); 50 return __arch_dma_to_virt(dev, addr);
@@ -257,9 +257,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
257 */ 257 */
258extern dma_addr_t dma_map_single(struct device *, void *, size_t, 258extern dma_addr_t dma_map_single(struct device *, void *, size_t,
259 enum dma_data_direction); 259 enum dma_data_direction);
260extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
261 enum dma_data_direction);
260extern dma_addr_t dma_map_page(struct device *, struct page *, 262extern dma_addr_t dma_map_page(struct device *, struct page *,
261 unsigned long, size_t, enum dma_data_direction); 263 unsigned long, size_t, enum dma_data_direction);
262extern void dma_unmap_single(struct device *, dma_addr_t, size_t, 264extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
263 enum dma_data_direction); 265 enum dma_data_direction);
264 266
265/* 267/*
@@ -352,7 +354,6 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
352{ 354{
353 /* nothing to do */ 355 /* nothing to do */
354} 356}
355#endif /* CONFIG_DMABOUNCE */
356 357
357/** 358/**
358 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 359 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -371,8 +372,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
371static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, 372static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
372 size_t size, enum dma_data_direction dir) 373 size_t size, enum dma_data_direction dir)
373{ 374{
374 dma_unmap_single(dev, handle, size, dir); 375 /* nothing to do */
375} 376}
377#endif /* CONFIG_DMABOUNCE */
376 378
377/** 379/**
378 * dma_sync_single_range_for_cpu 380 * dma_sync_single_range_for_cpu
diff --git a/arch/arm/include/asm/hardware/cache-tauros2.h b/arch/arm/include/asm/hardware/cache-tauros2.h
new file mode 100644
index 000000000000..538f17ca905b
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-tauros2.h
@@ -0,0 +1,11 @@
1/*
2 * arch/arm/include/asm/hardware/cache-tauros2.h
3 *
4 * Copyright (C) 2008 Marvell Semiconductor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11extern void __init tauros2_init(void);
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
new file mode 100644
index 000000000000..f82b25d4f73e
--- /dev/null
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -0,0 +1,165 @@
1/*
2 * linux/arch/arm/include/asm/hardware/coresight.h
3 *
4 * CoreSight components' registers
5 *
6 * Copyright (C) 2009 Nokia Corporation.
7 * Alexander Shishkin
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#ifndef __ASM_HARDWARE_CORESIGHT_H
15#define __ASM_HARDWARE_CORESIGHT_H
16
17#define TRACER_ACCESSED_BIT 0
18#define TRACER_RUNNING_BIT 1
19#define TRACER_CYCLE_ACC_BIT 2
20#define TRACER_ACCESSED BIT(TRACER_ACCESSED_BIT)
21#define TRACER_RUNNING BIT(TRACER_RUNNING_BIT)
22#define TRACER_CYCLE_ACC BIT(TRACER_CYCLE_ACC_BIT)
23
24struct tracectx {
25 unsigned int etb_bufsz;
26 void __iomem *etb_regs;
27 void __iomem *etm_regs;
28 unsigned long flags;
29 int ncmppairs;
30 int etm_portsz;
31 struct device *dev;
32 struct clk *emu_clk;
33 struct mutex mutex;
34};
35
36#define TRACER_TIMEOUT 10000
37
38#define etm_writel(t, v, x) \
39 (__raw_writel((v), (t)->etm_regs + (x)))
40#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
41
42/* CoreSight Management Registers */
43#define CSMR_LOCKACCESS 0xfb0
44#define CSMR_LOCKSTATUS 0xfb4
45#define CSMR_AUTHSTATUS 0xfb8
46#define CSMR_DEVID 0xfc8
47#define CSMR_DEVTYPE 0xfcc
48/* CoreSight Component Registers */
49#define CSCR_CLASS 0xff4
50
51#define CSCR_PRSR 0x314
52
53#define UNLOCK_MAGIC 0xc5acce55
54
55/* ETM control register, "ETM Architecture", 3.3.1 */
56#define ETMR_CTRL 0
57#define ETMCTRL_POWERDOWN 1
58#define ETMCTRL_PROGRAM (1 << 10)
59#define ETMCTRL_PORTSEL (1 << 11)
60#define ETMCTRL_DO_CONTEXTID (3 << 14)
61#define ETMCTRL_PORTMASK1 (7 << 4)
62#define ETMCTRL_PORTMASK2 (1 << 21)
63#define ETMCTRL_PORTMASK (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
64#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
65#define ETMCTRL_DO_CPRT (1 << 1)
66#define ETMCTRL_DATAMASK (3 << 2)
67#define ETMCTRL_DATA_DO_DATA (1 << 2)
68#define ETMCTRL_DATA_DO_ADDR (1 << 3)
69#define ETMCTRL_DATA_DO_BOTH (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
70#define ETMCTRL_BRANCH_OUTPUT (1 << 8)
71#define ETMCTRL_CYCLEACCURATE (1 << 12)
72
73/* ETM configuration code register */
74#define ETMR_CONFCODE (0x04)
75
76/* ETM trace start/stop resource control register */
77#define ETMR_TRACESSCTRL (0x18)
78
79/* ETM trigger event register */
80#define ETMR_TRIGEVT (0x08)
81
82/* address access type register bits, "ETM architecture",
83 * table 3-27 */
84/* - access type */
85#define ETMAAT_IFETCH 0
86#define ETMAAT_IEXEC 1
87#define ETMAAT_IEXECPASS 2
88#define ETMAAT_IEXECFAIL 3
89#define ETMAAT_DLOADSTORE 4
90#define ETMAAT_DLOAD 5
91#define ETMAAT_DSTORE 6
92/* - comparison access size */
93#define ETMAAT_JAVA (0 << 3)
94#define ETMAAT_THUMB (1 << 3)
95#define ETMAAT_ARM (3 << 3)
96/* - data value comparison control */
97#define ETMAAT_NOVALCMP (0 << 5)
98#define ETMAAT_VALMATCH (1 << 5)
99#define ETMAAT_VALNOMATCH (3 << 5)
100/* - exact match */
101#define ETMAAT_EXACTMATCH (1 << 7)
102/* - context id comparator control */
103#define ETMAAT_IGNCONTEXTID (0 << 8)
104#define ETMAAT_VALUE1 (1 << 8)
105#define ETMAAT_VALUE2 (2 << 8)
106#define ETMAAT_VALUE3 (3 << 8)
107/* - security level control */
108#define ETMAAT_IGNSECURITY (0 << 10)
109#define ETMAAT_NSONLY (1 << 10)
110#define ETMAAT_SONLY (2 << 10)
111
112#define ETMR_COMP_VAL(x) (0x40 + (x) * 4)
113#define ETMR_COMP_ACC_TYPE(x) (0x80 + (x) * 4)
114
115/* ETM status register, "ETM Architecture", 3.3.2 */
116#define ETMR_STATUS (0x10)
117#define ETMST_OVERFLOW (1 << 0)
118#define ETMST_PROGBIT (1 << 1)
119#define ETMST_STARTSTOP (1 << 2)
120#define ETMST_TRIGGER (1 << 3)
121
122#define etm_progbit(t) (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
123#define etm_started(t) (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
124#define etm_triggered(t) (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
125
126#define ETMR_TRACEENCTRL2 0x1c
127#define ETMR_TRACEENCTRL 0x24
128#define ETMTE_INCLEXCL (1 << 24)
129#define ETMR_TRACEENEVT 0x20
130#define ETMCTRL_OPTS (ETMCTRL_DO_CPRT | \
131 ETMCTRL_DATA_DO_ADDR | \
132 ETMCTRL_BRANCH_OUTPUT | \
133 ETMCTRL_DO_CONTEXTID)
134
135/* ETB registers, "CoreSight Components TRM", 9.3 */
136#define ETBR_DEPTH 0x04
137#define ETBR_STATUS 0x0c
138#define ETBR_READMEM 0x10
139#define ETBR_READADDR 0x14
140#define ETBR_WRITEADDR 0x18
141#define ETBR_TRIGGERCOUNT 0x1c
142#define ETBR_CTRL 0x20
143#define ETBR_FORMATTERCTRL 0x304
144#define ETBFF_ENFTC 1
145#define ETBFF_ENFCONT (1 << 1)
146#define ETBFF_FONFLIN (1 << 4)
147#define ETBFF_MANUAL_FLUSH (1 << 6)
148#define ETBFF_TRIGIN (1 << 8)
149#define ETBFF_TRIGEVT (1 << 9)
150#define ETBFF_TRIGFL (1 << 10)
151
152#define etb_writel(t, v, x) \
153 (__raw_writel((v), (t)->etb_regs + (x)))
154#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
155
156#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
157#define etm_unlock(t) \
158 do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
159
160#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
161#define etb_unlock(t) \
162 do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
163
164#endif /* __ASM_HARDWARE_CORESIGHT_H */
165
diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h
index 8d60ad267e3a..5daea2961d48 100644
--- a/arch/arm/include/asm/hardware/iop3xx.h
+++ b/arch/arm/include/asm/hardware/iop3xx.h
@@ -234,7 +234,13 @@ extern int iop3xx_get_init_atu(void);
234void iop3xx_map_io(void); 234void iop3xx_map_io(void);
235void iop_init_cp6_handler(void); 235void iop_init_cp6_handler(void);
236void iop_init_time(unsigned long tickrate); 236void iop_init_time(unsigned long tickrate);
237unsigned long iop_gettimeoffset(void); 237
238static inline u32 read_tmr0(void)
239{
240 u32 val;
241 asm volatile("mrc p6, 0, %0, c0, c1, 0" : "=r" (val));
242 return val;
243}
238 244
239static inline void write_tmr0(u32 val) 245static inline void write_tmr0(u32 val)
240{ 246{
@@ -253,6 +259,11 @@ static inline u32 read_tcr0(void)
253 return val; 259 return val;
254} 260}
255 261
262static inline void write_tcr0(u32 val)
263{
264 asm volatile("mcr p6, 0, %0, c2, c1, 0" : : "r" (val));
265}
266
256static inline u32 read_tcr1(void) 267static inline u32 read_tcr1(void)
257{ 268{
258 u32 val; 269 u32 val;
@@ -260,6 +271,11 @@ static inline u32 read_tcr1(void)
260 return val; 271 return val;
261} 272}
262 273
274static inline void write_tcr1(u32 val)
275{
276 asm volatile("mcr p6, 0, %0, c3, c1, 0" : : "r" (val));
277}
278
263static inline void write_trr0(u32 val) 279static inline void write_trr0(u32 val)
264{ 280{
265 asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val)); 281 asm volatile("mcr p6, 0, %0, c4, c1, 0" : : "r" (val));
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index cefedf062138..5421d82a2572 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -125,8 +125,10 @@
125 * private definitions which should NOT be used outside memory.h 125 * private definitions which should NOT be used outside memory.h
126 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. 126 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
127 */ 127 */
128#ifndef __virt_to_phys
128#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) 129#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
129#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 130#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
131#endif
130 132
131/* 133/*
132 * Convert a physical address to a Page Frame Number and back 134 * Convert a physical address to a Page Frame Number and back
@@ -134,6 +136,12 @@
134#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) 136#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
135#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) 137#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
136 138
139/*
140 * Convert a page to/from a physical address
141 */
142#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
143#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
144
137#ifndef __ASSEMBLY__ 145#ifndef __ASSEMBLY__
138 146
139/* 147/*
@@ -194,7 +202,8 @@ static inline void *phys_to_virt(unsigned long x)
194#ifndef __virt_to_bus 202#ifndef __virt_to_bus
195#define __virt_to_bus __virt_to_phys 203#define __virt_to_bus __virt_to_phys
196#define __bus_to_virt __phys_to_virt 204#define __bus_to_virt __phys_to_virt
197#define __pfn_to_bus(x) ((x) << PAGE_SHIFT) 205#define __pfn_to_bus(x) __pfn_to_phys(x)
206#define __bus_to_pfn(x) __phys_to_pfn(x)
198#endif 207#endif
199 208
200static inline __deprecated unsigned long virt_to_bus(void *x) 209static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -293,11 +302,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
293#endif /* !CONFIG_DISCONTIGMEM */ 302#endif /* !CONFIG_DISCONTIGMEM */
294 303
295/* 304/*
296 * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
297 */
298#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
299
300/*
301 * Optional coherency support. Currently used only by selected 305 * Optional coherency support. Currently used only by selected
302 * Intel XSC3-based systems. 306 * Intel XSC3-based systems.
303 */ 307 */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 201ccaa11f61..11397687f42c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -304,13 +304,23 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
304 304
305static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 305static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
306 306
307#define __pgprot_modify(prot,mask,bits) \
308 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
309
307/* 310/*
308 * Mark the prot value as uncacheable and unbufferable. 311 * Mark the prot value as uncacheable and unbufferable.
309 */ 312 */
310#define pgprot_noncached(prot) \ 313#define pgprot_noncached(prot) \
311 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED) 314 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
312#define pgprot_writecombine(prot) \ 315#define pgprot_writecombine(prot) \
313 __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE) 316 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
317#if __LINUX_ARM_ARCH__ >= 7
318#define pgprot_dmacoherent(prot) \
319 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
320#else
321#define pgprot_dmacoherent(prot) \
322 __pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
323#endif
314 324
315#define pmd_none(pmd) (!pmd_val(pmd)) 325#define pmd_none(pmd) (!pmd_val(pmd))
316#define pmd_present(pmd) (pmd_val(pmd)) 326#define pmd_present(pmd) (pmd_val(pmd))
diff --git a/arch/arm/include/asm/swab.h b/arch/arm/include/asm/swab.h
index ca2bf2f6d6ea..9997ad20eff1 100644
--- a/arch/arm/include/asm/swab.h
+++ b/arch/arm/include/asm/swab.h
@@ -22,6 +22,24 @@
22# define __SWAB_64_THRU_32__ 22# define __SWAB_64_THRU_32__
23#endif 23#endif
24 24
25#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
26
27static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
28{
29 __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
30 return x;
31}
32#define __arch_swab16 __arch_swab16
33
34static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
35{
36 __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
37 return x;
38}
39#define __arch_swab32 __arch_swab32
40
41#else
42
25static inline __attribute_const__ __u32 __arch_swab32(__u32 x) 43static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
26{ 44{
27 __u32 t; 45 __u32 t;
@@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
48 66
49#endif 67#endif
50 68
69#endif
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index d65b2f5bf41f..058e7e90881d 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -138,21 +138,26 @@ extern unsigned int user_debug;
138#define dmb() __asm__ __volatile__ ("" : : : "memory") 138#define dmb() __asm__ __volatile__ ("" : : : "memory")
139#endif 139#endif
140 140
141#ifndef CONFIG_SMP 141#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
142#define mb() dmb()
143#define rmb() dmb()
144#define wmb() dmb()
145#else
142#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 146#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
143#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 147#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
144#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) 148#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
149#endif
150
151#ifndef CONFIG_SMP
145#define smp_mb() barrier() 152#define smp_mb() barrier()
146#define smp_rmb() barrier() 153#define smp_rmb() barrier()
147#define smp_wmb() barrier() 154#define smp_wmb() barrier()
148#else 155#else
149#define mb() dmb() 156#define smp_mb() mb()
150#define rmb() dmb() 157#define smp_rmb() rmb()
151#define wmb() dmb() 158#define smp_wmb() wmb()
152#define smp_mb() dmb()
153#define smp_rmb() dmb()
154#define smp_wmb() dmb()
155#endif 159#endif
160
156#define read_barrier_depends() do { } while(0) 161#define read_barrier_depends() do { } while(0)
157#define smp_read_barrier_depends() do { } while(0) 162#define smp_read_barrier_depends() do { } while(0)
158 163