aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2007-02-20 14:13:30 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2007-02-20 14:13:30 -0500
commit5a84d159061d914c8dd4aa372ac6e9529c2be453 (patch)
tree9b08af78085334af44414adafe0096276f8fe0ff /include/asm-generic
parente80a0e6e7ccdf64575d4384cb4172860422f5b81 (diff)
parent7d477a04a619e90ee08724e8f2d8803c6bdfcef8 (diff)
Merge ARM fixes
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/dma-mapping-broken.h4
-rw-r--r--include/asm-generic/gpio.h25
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/mman.h1
-rw-r--r--include/asm-generic/pgtable.h13
5 files changed, 42 insertions, 3 deletions
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index a7f1a55ce6b0..29413d3d4605 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -3,7 +3,6 @@
3 3
4/* This is used for archs that do not support DMA */ 4/* This is used for archs that do not support DMA */
5 5
6
7static inline void * 6static inline void *
8dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 7dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9 gfp_t flag) 8 gfp_t flag)
@@ -19,4 +18,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
19 BUG(); 18 BUG();
20} 19}
21 20
21#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23
22#endif /* _ASM_GENERIC_DMA_MAPPING_H */ 24#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
new file mode 100644
index 000000000000..2d0aab1d8611
--- /dev/null
+++ b/include/asm-generic/gpio.h
@@ -0,0 +1,25 @@
1#ifndef _ASM_GENERIC_GPIO_H
2#define _ASM_GENERIC_GPIO_H
3
4/* platforms that don't directly support access to GPIOs through I2C, SPI,
5 * or other blocking infrastructure can use these wrappers.
6 */
7
8static inline int gpio_cansleep(unsigned gpio)
9{
10 return 0;
11}
12
13static inline int gpio_get_value_cansleep(unsigned gpio)
14{
15 might_sleep();
16 return gpio_get_value(gpio);
17}
18
19static inline void gpio_set_value_cansleep(unsigned gpio, int value)
20{
21 might_sleep();
22 gpio_set_value(gpio, value);
23}
24
25#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index 8078cbd2c016..30d8d33491dd 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -54,7 +54,7 @@
54#define __page_to_pfn(pg) \ 54#define __page_to_pfn(pg) \
55({ struct page *__pg = (pg); \ 55({ struct page *__pg = (pg); \
56 int __sec = page_to_section(__pg); \ 56 int __sec = page_to_section(__pg); \
57 __pg - __section_mem_map_addr(__nr_to_section(__sec)); \ 57 (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
58}) 58})
59 59
60#define __pfn_to_page(pfn) \ 60#define __pfn_to_page(pfn) \
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
index 3b41d2bb70da..5e3dde2ee5ad 100644
--- a/include/asm-generic/mman.h
+++ b/include/asm-generic/mman.h
@@ -36,7 +36,6 @@
36#define MADV_DOFORK 11 /* do inherit across fork */ 36#define MADV_DOFORK 11 /* do inherit across fork */
37 37
38/* compatibility flags */ 38/* compatibility flags */
39#define MAP_ANON MAP_ANONYMOUS
40#define MAP_FILE 0 39#define MAP_FILE 0
41 40
42#endif 41#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 9d774d07d95b..00c23433b39f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -183,6 +183,19 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
183#endif 183#endif
184 184
185/* 185/*
186 * A facility to provide batching of the reload of page tables with the
187 * actual context switch code for paravirtualized guests. By convention,
188 * only one of the lazy modes (CPU, MMU) should be active at any given
189 * time, entry should never be nested, and entry and exits should always
190 * be paired. This is for sanity of maintaining and reasoning about the
191 * kernel code.
192 */
193#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
194#define arch_enter_lazy_cpu_mode() do {} while (0)
195#define arch_leave_lazy_cpu_mode() do {} while (0)
196#endif
197
198/*
186 * When walking page tables, get the address of the next boundary, 199 * When walking page tables, get the address of the next boundary,
187 * or the end address of the range if that comes earlier. Although no 200 * or the end address of the range if that comes earlier. Although no
188 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. 201 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.