aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/audit_write.h2
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/kdebug.h1
-rw-r--r--include/asm-generic/memory_model.h2
-rw-r--r--include/asm-generic/mutex-dec.h26
-rw-r--r--include/asm-generic/mutex-xchg.h9
-rw-r--r--include/asm-generic/pgtable.h50
8 files changed, 66 insertions, 34 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 4ec0a296bdec..7abdaa91ccd3 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -251,7 +251,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
251#define atomic_long_cmpxchg(l, old, new) \ 251#define atomic_long_cmpxchg(l, old, new) \
252 (atomic_cmpxchg((atomic_t *)(l), (old), (new))) 252 (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
253#define atomic_long_xchg(v, new) \ 253#define atomic_long_xchg(v, new) \
254 (atomic_xchg((atomic_t *)(l), (new))) 254 (atomic_xchg((atomic_t *)(v), (new)))
255 255
256#endif /* BITS_PER_LONG == 64 */ 256#endif /* BITS_PER_LONG == 64 */
257 257
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index f10d367fb2a5..c5f1c2c920e2 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -1,6 +1,8 @@
1#include <asm-generic/audit_dir_write.h> 1#include <asm-generic/audit_dir_write.h>
2__NR_acct, 2__NR_acct,
3#ifdef __NR_swapon
3__NR_swapon, 4__NR_swapon,
5#endif
4__NR_quotactl, 6__NR_quotactl,
5__NR_truncate, 7__NR_truncate,
6#ifdef __NR_truncate64 8#ifdef __NR_truncate64
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 12c07c1866b2..4c794d73fb84 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -8,9 +8,17 @@
8#ifdef CONFIG_GENERIC_BUG 8#ifdef CONFIG_GENERIC_BUG
9#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
10struct bug_entry { 10struct bug_entry {
11#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
11 unsigned long bug_addr; 12 unsigned long bug_addr;
13#else
14 signed int bug_addr_disp;
15#endif
12#ifdef CONFIG_DEBUG_BUGVERBOSE 16#ifdef CONFIG_DEBUG_BUGVERBOSE
17#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
13 const char *file; 18 const char *file;
19#else
20 signed int file_disp;
21#endif
14 unsigned short line; 22 unsigned short line;
15#endif 23#endif
16 unsigned short flags; 24 unsigned short flags;
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h
index 2b799c90b2d4..11e57b6a85fc 100644
--- a/include/asm-generic/kdebug.h
+++ b/include/asm-generic/kdebug.h
@@ -3,6 +3,7 @@
3 3
4enum die_val { 4enum die_val {
5 DIE_UNUSED, 5 DIE_UNUSED,
6 DIE_OOPS=1
6}; 7};
7 8
8#endif /* _ASM_GENERIC_KDEBUG_H */ 9#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
index ae060c62aff1..18546d8eb78e 100644
--- a/include/asm-generic/memory_model.h
+++ b/include/asm-generic/memory_model.h
@@ -34,7 +34,7 @@
34 34
35#define __pfn_to_page(pfn) \ 35#define __pfn_to_page(pfn) \
36({ unsigned long __pfn = (pfn); \ 36({ unsigned long __pfn = (pfn); \
37 unsigned long __nid = arch_pfn_to_nid(pfn); \ 37 unsigned long __nid = arch_pfn_to_nid(__pfn); \
38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ 38 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
39}) 39})
40 40
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be6743f..f104af7cf437 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{ 22{
23 if (unlikely(atomic_dec_return(count) < 0)) 23 if (unlikely(atomic_dec_return(count) < 0))
24 fail_fn(count); 24 fail_fn(count);
25 else
26 smp_mb();
27} 25}
28 26
29/** 27/**
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
41{ 39{
42 if (unlikely(atomic_dec_return(count) < 0)) 40 if (unlikely(atomic_dec_return(count) < 0))
43 return fail_fn(count); 41 return fail_fn(count);
44 else { 42 return 0;
45 smp_mb();
46 return 0;
47 }
48} 43}
49 44
50/** 45/**
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
63static inline void 58static inline void
64__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 59__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
65{ 60{
66 smp_mb();
67 if (unlikely(atomic_inc_return(count) <= 0)) 61 if (unlikely(atomic_inc_return(count) <= 0))
68 fail_fn(count); 62 fail_fn(count);
69} 63}
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
88static inline int 82static inline int
89__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 83__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
90{ 84{
91 /* 85 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
92 * We have two variants here. The cmpxchg based one is the best one
93 * because it never induce a false contention state. It is included
94 * here because architectures using the inc/dec algorithms over the
95 * xchg ones are much more likely to support cmpxchg natively.
96 *
97 * If not we fall back to the spinlock based variant - that is
98 * just as efficient (and simpler) as a 'destructive' probing of
99 * the mutex state would be.
100 */
101#ifdef __HAVE_ARCH_CMPXCHG
102 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
103 smp_mb();
104 return 1; 86 return 1;
105 }
106 return 0; 87 return 0;
107#else
108 return fail_fn(count);
109#endif
110} 88}
111 89
112#endif 90#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2cbfebe..580a6d35c700 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{ 27{
28 if (unlikely(atomic_xchg(count, 0) != 1)) 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 fail_fn(count); 29 fail_fn(count);
30 else
31 smp_mb();
32} 30}
33 31
34/** 32/**
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
46{ 44{
47 if (unlikely(atomic_xchg(count, 0) != 1)) 45 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count); 46 return fail_fn(count);
49 else { 47 return 0;
50 smp_mb();
51 return 0;
52 }
53} 48}
54 49
55/** 50/**
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
67static inline void 62static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 63__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{ 64{
70 smp_mb();
71 if (unlikely(atomic_xchg(count, 1) != 0)) 65 if (unlikely(atomic_xchg(count, 1) != 0))
72 fail_fn(count); 66 fail_fn(count);
73} 67}
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
110 if (prev < 0) 104 if (prev < 0)
111 prev = 0; 105 prev = 0;
112 } 106 }
113 smp_mb();
114 107
115 return prev; 108 return prev;
116} 109}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index ef87f889ef62..72ebe91005a8 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
129#define move_pte(pte, prot, old_addr, new_addr) (pte) 129#define move_pte(pte, prot, old_addr, new_addr) (pte)
130#endif 130#endif
131 131
132#ifndef pgprot_writecombine
133#define pgprot_writecombine pgprot_noncached
134#endif
135
132/* 136/*
133 * When walking page tables, get the address of the next boundary, 137 * When walking page tables, get the address of the next boundary,
134 * or the end address of the range if that comes earlier. Although no 138 * or the end address of the range if that comes earlier. Although no
@@ -289,6 +293,52 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
289#define arch_flush_lazy_cpu_mode() do {} while (0) 293#define arch_flush_lazy_cpu_mode() do {} while (0)
290#endif 294#endif
291 295
296#ifndef __HAVE_PFNMAP_TRACKING
297/*
298 * Interface that can be used by architecture code to keep track of
299 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
300 *
301 * track_pfn_vma_new is called when a _new_ pfn mapping is being established
302 * for physical range indicated by pfn and size.
303 */
304static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
305 unsigned long pfn, unsigned long size)
306{
307 return 0;
308}
309
310/*
311 * Interface that can be used by architecture code to keep track of
312 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
313 *
314 * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
315 * copied through copy_page_range().
316 */
317static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
318{
319 return 0;
320}
321
322/*
323 * Interface that can be used by architecture code to keep track of
324 * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
325 *
326 * untrack_pfn_vma is called while unmapping a pfnmap for a region.
327 * untrack can be called for a specific region indicated by pfn and size or
328 * can be for the entire vma (in which case size can be zero).
329 */
330static inline void untrack_pfn_vma(struct vm_area_struct *vma,
331 unsigned long pfn, unsigned long size)
332{
333}
334#else
335extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
336 unsigned long pfn, unsigned long size);
337extern int track_pfn_vma_copy(struct vm_area_struct *vma);
338extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
339 unsigned long size);
340#endif
341
292#endif /* !__ASSEMBLY__ */ 342#endif /* !__ASSEMBLY__ */
293 343
294#endif /* _ASM_GENERIC_PGTABLE_H */ 344#endif /* _ASM_GENERIC_PGTABLE_H */