aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2012-04-22 02:28:35 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2012-04-22 02:28:35 -0400
commit57b8628bb0ac4e47c806e45c5bbd89282e93869b (patch)
treeee9289f0898054474b7e5054abdb3ffb78666436 /include/asm-generic
parent486c8aba39e5f194519cd5c0e85e5d1de8b74b03 (diff)
parent66f75a5d028beaf67c931435fdc3e7823125730c (diff)
Merge commit 'v3.4-rc4' into next
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/atomic.h3
-rw-r--r--include/asm-generic/barrier.h50
-rw-r--r--include/asm-generic/bitops/atomic.h2
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-generic/cmpxchg.h88
-rw-r--r--include/asm-generic/dma-mapping-common.h1
-rw-r--r--include/asm-generic/exec.h19
-rw-r--r--include/asm-generic/getorder.h53
-rw-r--r--include/asm-generic/gpio.h4
-rw-r--r--include/asm-generic/mman-common.h4
-rw-r--r--include/asm-generic/pci-bridge.h6
-rw-r--r--include/asm-generic/pci.h24
-rw-r--r--include/asm-generic/pgtable.h62
-rw-r--r--include/asm-generic/posix_types.h109
-rw-r--r--include/asm-generic/socket.h5
-rw-r--r--include/asm-generic/switch_to.h30
-rw-r--r--include/asm-generic/system.h141
-rw-r--r--include/asm-generic/tlbflush.h2
-rw-r--r--include/asm-generic/unistd.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h36
20 files changed, 352 insertions, 295 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index e37963c1df4d..1ced6413ea03 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -15,6 +15,8 @@
15#ifndef __ASM_GENERIC_ATOMIC_H 15#ifndef __ASM_GENERIC_ATOMIC_H
16#define __ASM_GENERIC_ATOMIC_H 16#define __ASM_GENERIC_ATOMIC_H
17 17
18#include <asm/cmpxchg.h>
19
18#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
19/* Force people to define core atomics */ 21/* Force people to define core atomics */
20# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \ 22# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
@@ -52,7 +54,6 @@
52#define atomic_set(v, i) (((v)->counter) = (i)) 54#define atomic_set(v, i) (((v)->counter) = (i))
53 55
54#include <linux/irqflags.h> 56#include <linux/irqflags.h>
55#include <asm/system.h>
56 57
57/** 58/**
58 * atomic_add_return - add integer to atomic variable 59 * atomic_add_return - add integer to atomic variable
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
new file mode 100644
index 000000000000..639d7a4d033b
--- /dev/null
+++ b/include/asm-generic/barrier.h
@@ -0,0 +1,50 @@
1/* Generic barrier definitions, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_BARRIER_H
15#define __ASM_GENERIC_BARRIER_H
16
17#ifndef __ASSEMBLY__
18
19#define nop() asm volatile ("nop")
20
21/*
22 * Force strict CPU ordering.
23 * And yes, this is required on UP too when we're talking
24 * to devices.
25 *
26 * This implementation only contains a compiler barrier.
27 */
28
29#define mb() asm volatile ("": : :"memory")
30#define rmb() mb()
31#define wmb() asm volatile ("": : :"memory")
32
33#ifdef CONFIG_SMP
34#define smp_mb() mb()
35#define smp_rmb() rmb()
36#define smp_wmb() wmb()
37#else
38#define smp_mb() barrier()
39#define smp_rmb() barrier()
40#define smp_wmb() barrier()
41#endif
42
43#define set_mb(var, value) do { var = value; mb(); } while (0)
44#define set_wmb(var, value) do { var = value; wmb(); } while (0)
45
46#define read_barrier_depends() do {} while (0)
47#define smp_read_barrier_depends() do {} while (0)
48
49#endif /* !__ASSEMBLY__ */
50#endif /* __ASM_GENERIC_BARRIER_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index ecc44a8e2b44..9ae6c34dc191 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,7 +2,7 @@
2#define _ASM_GENERIC_BITOPS_ATOMIC_H_ 2#define _ASM_GENERIC_BITOPS_ATOMIC_H_
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5#include <asm/system.h> 5#include <linux/irqflags.h>
6 6
7#ifdef CONFIG_SMP 7#ifdef CONFIG_SMP
8#include <asm/spinlock.h> 8#include <asm/spinlock.h>
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 84458b0c38d1..2520a6e241dc 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -134,7 +134,7 @@ extern void warn_slowpath_null(const char *file, const int line);
134#endif 134#endif
135 135
136#define WARN_ON_ONCE(condition) ({ \ 136#define WARN_ON_ONCE(condition) ({ \
137 static bool __warned; \ 137 static bool __section(.data.unlikely) __warned; \
138 int __ret_warn_once = !!(condition); \ 138 int __ret_warn_once = !!(condition); \
139 \ 139 \
140 if (unlikely(__ret_warn_once)) \ 140 if (unlikely(__ret_warn_once)) \
@@ -144,7 +144,7 @@ extern void warn_slowpath_null(const char *file, const int line);
144}) 144})
145 145
146#define WARN_ONCE(condition, format...) ({ \ 146#define WARN_ONCE(condition, format...) ({ \
147 static bool __warned; \ 147 static bool __section(.data.unlikely) __warned; \
148 int __ret_warn_once = !!(condition); \ 148 int __ret_warn_once = !!(condition); \
149 \ 149 \
150 if (unlikely(__ret_warn_once)) \ 150 if (unlikely(__ret_warn_once)) \
@@ -154,7 +154,7 @@ extern void warn_slowpath_null(const char *file, const int line);
154}) 154})
155 155
156#define WARN_TAINT_ONCE(condition, taint, format...) ({ \ 156#define WARN_TAINT_ONCE(condition, taint, format...) ({ \
157 static bool __warned; \ 157 static bool __section(.data.unlikely) __warned; \
158 int __ret_warn_once = !!(condition); \ 158 int __ret_warn_once = !!(condition); \
159 \ 159 \
160 if (unlikely(__ret_warn_once)) \ 160 if (unlikely(__ret_warn_once)) \
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
index 213ac6e8fe39..14883026015d 100644
--- a/include/asm-generic/cmpxchg.h
+++ b/include/asm-generic/cmpxchg.h
@@ -1,22 +1,98 @@
1/*
2 * Generic UP xchg and cmpxchg using interrupt disablement. Does not
3 * support SMP.
4 */
5
1#ifndef __ASM_GENERIC_CMPXCHG_H 6#ifndef __ASM_GENERIC_CMPXCHG_H
2#define __ASM_GENERIC_CMPXCHG_H 7#define __ASM_GENERIC_CMPXCHG_H
3 8
4/*
5 * Generic cmpxchg
6 *
7 * Uses the local cmpxchg. Does not support SMP.
8 */
9#ifdef CONFIG_SMP 9#ifdef CONFIG_SMP
10#error "Cannot use generic cmpxchg on SMP" 10#error "Cannot use generic cmpxchg on SMP"
11#endif 11#endif
12 12
13#include <linux/types.h>
14#include <linux/irqflags.h>
15
16#ifndef xchg
17
18/*
19 * This function doesn't exist, so you'll get a linker error if
20 * something tries to do an invalidly-sized xchg().
21 */
22extern void __xchg_called_with_bad_pointer(void);
23
24static inline
25unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
26{
27 unsigned long ret, flags;
28
29 switch (size) {
30 case 1:
31#ifdef __xchg_u8
32 return __xchg_u8(x, ptr);
33#else
34 local_irq_save(flags);
35 ret = *(volatile u8 *)ptr;
36 *(volatile u8 *)ptr = x;
37 local_irq_restore(flags);
38 return ret;
39#endif /* __xchg_u8 */
40
41 case 2:
42#ifdef __xchg_u16
43 return __xchg_u16(x, ptr);
44#else
45 local_irq_save(flags);
46 ret = *(volatile u16 *)ptr;
47 *(volatile u16 *)ptr = x;
48 local_irq_restore(flags);
49 return ret;
50#endif /* __xchg_u16 */
51
52 case 4:
53#ifdef __xchg_u32
54 return __xchg_u32(x, ptr);
55#else
56 local_irq_save(flags);
57 ret = *(volatile u32 *)ptr;
58 *(volatile u32 *)ptr = x;
59 local_irq_restore(flags);
60 return ret;
61#endif /* __xchg_u32 */
62
63#ifdef CONFIG_64BIT
64 case 8:
65#ifdef __xchg_u64
66 return __xchg_u64(x, ptr);
67#else
68 local_irq_save(flags);
69 ret = *(volatile u64 *)ptr;
70 *(volatile u64 *)ptr = x;
71 local_irq_restore(flags);
72 return ret;
73#endif /* __xchg_u64 */
74#endif /* CONFIG_64BIT */
75
76 default:
77 __xchg_called_with_bad_pointer();
78 return x;
79 }
80}
81
82#define xchg(ptr, x) \
83 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
84
85#endif /* xchg */
86
13/* 87/*
14 * Atomic compare and exchange. 88 * Atomic compare and exchange.
15 * 89 *
16 * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether 90 * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
17 * a cmpxchg primitive faster than repeated local irq save/restore exists. 91 * a cmpxchg primitive faster than repeated local irq save/restore exists.
18 */ 92 */
93#include <asm-generic/cmpxchg-local.h>
94
19#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) 95#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
20#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) 96#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
21 97
22#endif 98#endif /* __ASM_GENERIC_CMPXCHG_H */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index 9fa3f96e38cf..2e248d8924dc 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -2,6 +2,7 @@
2#define _ASM_GENERIC_DMA_MAPPING_H 2#define _ASM_GENERIC_DMA_MAPPING_H
3 3
4#include <linux/kmemcheck.h> 4#include <linux/kmemcheck.h>
5#include <linux/bug.h>
5#include <linux/scatterlist.h> 6#include <linux/scatterlist.h>
6#include <linux/dma-debug.h> 7#include <linux/dma-debug.h>
7#include <linux/dma-attrs.h> 8#include <linux/dma-attrs.h>
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
new file mode 100644
index 000000000000..567766b0074a
--- /dev/null
+++ b/include/asm-generic/exec.h
@@ -0,0 +1,19 @@
1/* Generic process execution definitions, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_EXEC_H
15#define __ASM_GENERIC_EXEC_H
16
17#define arch_align_stack(x) (x)
18
19#endif /* __ASM_GENERIC_EXEC_H */
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index 67e7245dc9b3..65e4468ac53d 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -4,21 +4,58 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7#include <linux/log2.h>
7 8
8/* Pure 2^n version of get_order */ 9/*
9static inline __attribute_const__ int get_order(unsigned long size) 10 * Runtime evaluation of get_order()
11 */
12static inline __attribute_const__
13int __get_order(unsigned long size)
10{ 14{
11 int order; 15 int order;
12 16
13 size = (size - 1) >> (PAGE_SHIFT - 1); 17 size--;
14 order = -1; 18 size >>= PAGE_SHIFT;
15 do { 19#if BITS_PER_LONG == 32
16 size >>= 1; 20 order = fls(size);
17 order++; 21#else
18 } while (size); 22 order = fls64(size);
23#endif
19 return order; 24 return order;
20} 25}
21 26
27/**
28 * get_order - Determine the allocation order of a memory size
29 * @size: The size for which to get the order
30 *
31 * Determine the allocation order of a particular sized block of memory. This
32 * is on a logarithmic scale, where:
33 *
34 * 0 -> 2^0 * PAGE_SIZE and below
35 * 1 -> 2^1 * PAGE_SIZE to 2^0 * PAGE_SIZE + 1
36 * 2 -> 2^2 * PAGE_SIZE to 2^1 * PAGE_SIZE + 1
37 * 3 -> 2^3 * PAGE_SIZE to 2^2 * PAGE_SIZE + 1
38 * 4 -> 2^4 * PAGE_SIZE to 2^3 * PAGE_SIZE + 1
39 * ...
40 *
41 * The order returned is used to find the smallest allocation granule required
42 * to hold an object of the specified size.
43 *
44 * The result is undefined if the size is 0.
45 *
46 * This function may be used to initialise variables with compile time
47 * evaluations of constants.
48 */
49#define get_order(n) \
50( \
51 __builtin_constant_p(n) ? ( \
52 ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
53 (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
54 ilog2((n) - 1) - PAGE_SHIFT + 1) \
55 ) : \
56 __get_order(n) \
57)
58
22#endif /* __ASSEMBLY__ */ 59#endif /* __ASSEMBLY__ */
23 60
24#endif /* __ASM_GENERIC_GETORDER_H */ 61#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 1ff4e221cb4d..5f52690c3c8f 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -142,9 +142,9 @@ extern int __must_check gpiochip_reserve(int start, int ngpio);
142/* add/remove chips */ 142/* add/remove chips */
143extern int gpiochip_add(struct gpio_chip *chip); 143extern int gpiochip_add(struct gpio_chip *chip);
144extern int __must_check gpiochip_remove(struct gpio_chip *chip); 144extern int __must_check gpiochip_remove(struct gpio_chip *chip);
145extern struct gpio_chip *gpiochip_find(void *data, 145extern struct gpio_chip *gpiochip_find(const void *data,
146 int (*match)(struct gpio_chip *chip, 146 int (*match)(struct gpio_chip *chip,
147 void *data)); 147 const void *data));
148 148
149 149
150/* Always use the library code for GPIO management calls, 150/* Always use the library code for GPIO management calls,
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
index 787abbb6d867..d030d2c2647a 100644
--- a/include/asm-generic/mman-common.h
+++ b/include/asm-generic/mman-common.h
@@ -48,6 +48,10 @@
48#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ 48#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
49#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ 49#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
50 50
51#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
52 overrides the coredump filter bits */
53#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
54
51/* compatibility flags */ 55/* compatibility flags */
52#define MAP_FILE 0 56#define MAP_FILE 0
53 57
diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h
index 4a5aca2a2c94..a5b5d5a89a4f 100644
--- a/include/asm-generic/pci-bridge.h
+++ b/include/asm-generic/pci-bridge.h
@@ -45,6 +45,11 @@ static inline void pci_add_flags(int flags)
45 pci_flags |= flags; 45 pci_flags |= flags;
46} 46}
47 47
48static inline void pci_clear_flags(int flags)
49{
50 pci_flags &= ~flags;
51}
52
48static inline int pci_has_flag(int flag) 53static inline int pci_has_flag(int flag)
49{ 54{
50 return pci_flags & flag; 55 return pci_flags & flag;
@@ -52,6 +57,7 @@ static inline int pci_has_flag(int flag)
52#else 57#else
53static inline void pci_set_flags(int flags) { } 58static inline void pci_set_flags(int flags) { }
54static inline void pci_add_flags(int flags) { } 59static inline void pci_add_flags(int flags) { }
60static inline void pci_clear_flags(int flags) { }
55static inline int pci_has_flag(int flag) 61static inline int pci_has_flag(int flag)
56{ 62{
57 return 0; 63 return 0;
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index 26373cff4546..e80a0495e5b0 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -6,30 +6,6 @@
6#ifndef _ASM_GENERIC_PCI_H 6#ifndef _ASM_GENERIC_PCI_H
7#define _ASM_GENERIC_PCI_H 7#define _ASM_GENERIC_PCI_H
8 8
9/**
10 * pcibios_resource_to_bus - convert resource to PCI bus address
11 * @dev: device which owns this resource
12 * @region: converted bus-centric region (start,end)
13 * @res: resource to convert
14 *
15 * Convert a resource to a PCI device bus address or bus window.
16 */
17static inline void
18pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
19 struct resource *res)
20{
21 region->start = res->start;
22 region->end = res->end;
23}
24
25static inline void
26pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
27 struct pci_bus_region *region)
28{
29 res->start = region->start;
30 res->end = region->end;
31}
32
33static inline struct resource * 9static inline struct resource *
34pcibios_select_root(struct pci_dev *pdev, struct resource *res) 10pcibios_select_root(struct pci_dev *pdev, struct resource *res)
35{ 11{
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 76bff2bff15e..125c54e98517 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -5,6 +5,7 @@
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6 6
7#include <linux/mm_types.h> 7#include <linux/mm_types.h>
8#include <linux/bug.h>
8 9
9#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 10#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
10extern int ptep_set_access_flags(struct vm_area_struct *vma, 11extern int ptep_set_access_flags(struct vm_area_struct *vma,
@@ -425,6 +426,8 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
425 unsigned long size); 426 unsigned long size);
426#endif 427#endif
427 428
429#ifdef CONFIG_MMU
430
428#ifndef CONFIG_TRANSPARENT_HUGEPAGE 431#ifndef CONFIG_TRANSPARENT_HUGEPAGE
429static inline int pmd_trans_huge(pmd_t pmd) 432static inline int pmd_trans_huge(pmd_t pmd)
430{ 433{
@@ -441,7 +444,66 @@ static inline int pmd_write(pmd_t pmd)
441 return 0; 444 return 0;
442} 445}
443#endif /* __HAVE_ARCH_PMD_WRITE */ 446#endif /* __HAVE_ARCH_PMD_WRITE */
447#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
448
449/*
450 * This function is meant to be used by sites walking pagetables with
451 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
452 * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd
453 * into a null pmd and the transhuge page fault can convert a null pmd
454 * into an hugepmd or into a regular pmd (if the hugepage allocation
455 * fails). While holding the mmap_sem in read mode the pmd becomes
456 * stable and stops changing under us only if it's not null and not a
457 * transhuge pmd. When those races occurs and this function makes a
458 * difference vs the standard pmd_none_or_clear_bad, the result is
459 * undefined so behaving like if the pmd was none is safe (because it
460 * can return none anyway). The compiler level barrier() is critically
461 * important to compute the two checks atomically on the same pmdval.
462 */
463static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
464{
465 /* depend on compiler for an atomic pmd read */
466 pmd_t pmdval = *pmd;
467 /*
468 * The barrier will stabilize the pmdval in a register or on
469 * the stack so that it will stop changing under the code.
470 */
471#ifdef CONFIG_TRANSPARENT_HUGEPAGE
472 barrier();
473#endif
474 if (pmd_none(pmdval))
475 return 1;
476 if (unlikely(pmd_bad(pmdval))) {
477 if (!pmd_trans_huge(pmdval))
478 pmd_clear_bad(pmd);
479 return 1;
480 }
481 return 0;
482}
483
484/*
485 * This is a noop if Transparent Hugepage Support is not built into
486 * the kernel. Otherwise it is equivalent to
487 * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in
488 * places that already verified the pmd is not none and they want to
489 * walk ptes while holding the mmap sem in read mode (write mode don't
490 * need this). If THP is not enabled, the pmd can't go away under the
491 * code even if MADV_DONTNEED runs, but if THP is enabled we need to
492 * run a pmd_trans_unstable before walking the ptes after
493 * split_huge_page_pmd returns (because it may have run when the pmd
494 * become null, but then a page fault can map in a THP and not a
495 * regular page).
496 */
497static inline int pmd_trans_unstable(pmd_t *pmd)
498{
499#ifdef CONFIG_TRANSPARENT_HUGEPAGE
500 return pmd_none_or_trans_huge_or_clear_bad(pmd);
501#else
502 return 0;
444#endif 503#endif
504}
505
506#endif /* CONFIG_MMU */
445 507
446#endif /* !__ASSEMBLY__ */ 508#endif /* !__ASSEMBLY__ */
447 509
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
index 3dab00860e71..91d44bd4dde3 100644
--- a/include/asm-generic/posix_types.h
+++ b/include/asm-generic/posix_types.h
@@ -10,8 +10,13 @@
10 * architectures, so that you can override them. 10 * architectures, so that you can override them.
11 */ 11 */
12 12
13#ifndef __kernel_long_t
14typedef long __kernel_long_t;
15typedef unsigned long __kernel_ulong_t;
16#endif
17
13#ifndef __kernel_ino_t 18#ifndef __kernel_ino_t
14typedef unsigned long __kernel_ino_t; 19typedef __kernel_ulong_t __kernel_ino_t;
15#endif 20#endif
16 21
17#ifndef __kernel_mode_t 22#ifndef __kernel_mode_t
@@ -19,7 +24,7 @@ typedef unsigned int __kernel_mode_t;
19#endif 24#endif
20 25
21#ifndef __kernel_nlink_t 26#ifndef __kernel_nlink_t
22typedef unsigned long __kernel_nlink_t; 27typedef __kernel_ulong_t __kernel_nlink_t;
23#endif 28#endif
24 29
25#ifndef __kernel_pid_t 30#ifndef __kernel_pid_t
@@ -36,7 +41,7 @@ typedef unsigned int __kernel_gid_t;
36#endif 41#endif
37 42
38#ifndef __kernel_suseconds_t 43#ifndef __kernel_suseconds_t
39typedef long __kernel_suseconds_t; 44typedef __kernel_long_t __kernel_suseconds_t;
40#endif 45#endif
41 46
42#ifndef __kernel_daddr_t 47#ifndef __kernel_daddr_t
@@ -44,8 +49,8 @@ typedef int __kernel_daddr_t;
44#endif 49#endif
45 50
46#ifndef __kernel_uid32_t 51#ifndef __kernel_uid32_t
47typedef __kernel_uid_t __kernel_uid32_t; 52typedef unsigned int __kernel_uid32_t;
48typedef __kernel_gid_t __kernel_gid32_t; 53typedef unsigned int __kernel_gid32_t;
49#endif 54#endif
50 55
51#ifndef __kernel_old_uid_t 56#ifndef __kernel_old_uid_t
@@ -67,99 +72,29 @@ typedef unsigned int __kernel_size_t;
67typedef int __kernel_ssize_t; 72typedef int __kernel_ssize_t;
68typedef int __kernel_ptrdiff_t; 73typedef int __kernel_ptrdiff_t;
69#else 74#else
70typedef unsigned long __kernel_size_t; 75typedef __kernel_ulong_t __kernel_size_t;
71typedef long __kernel_ssize_t; 76typedef __kernel_long_t __kernel_ssize_t;
72typedef long __kernel_ptrdiff_t; 77typedef __kernel_long_t __kernel_ptrdiff_t;
73#endif 78#endif
74#endif 79#endif
75 80
81#ifndef __kernel_fsid_t
82typedef struct {
83 int val[2];
84} __kernel_fsid_t;
85#endif
86
76/* 87/*
77 * anything below here should be completely generic 88 * anything below here should be completely generic
78 */ 89 */
79typedef long __kernel_off_t; 90typedef __kernel_long_t __kernel_off_t;
80typedef long long __kernel_loff_t; 91typedef long long __kernel_loff_t;
81typedef long __kernel_time_t; 92typedef __kernel_long_t __kernel_time_t;
82typedef long __kernel_clock_t; 93typedef __kernel_long_t __kernel_clock_t;
83typedef int __kernel_timer_t; 94typedef int __kernel_timer_t;
84typedef int __kernel_clockid_t; 95typedef int __kernel_clockid_t;
85typedef char * __kernel_caddr_t; 96typedef char * __kernel_caddr_t;
86typedef unsigned short __kernel_uid16_t; 97typedef unsigned short __kernel_uid16_t;
87typedef unsigned short __kernel_gid16_t; 98typedef unsigned short __kernel_gid16_t;
88 99
89typedef struct {
90 int val[2];
91} __kernel_fsid_t;
92
93#ifdef __KERNEL__
94
95#undef __FD_SET
96static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
97{
98 unsigned long __tmp = __fd / __NFDBITS;
99 unsigned long __rem = __fd % __NFDBITS;
100 __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
101}
102
103#undef __FD_CLR
104static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
105{
106 unsigned long __tmp = __fd / __NFDBITS;
107 unsigned long __rem = __fd % __NFDBITS;
108 __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
109}
110
111#undef __FD_ISSET
112static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
113{
114 unsigned long __tmp = __fd / __NFDBITS;
115 unsigned long __rem = __fd % __NFDBITS;
116 return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
117}
118
119/*
120 * This will unroll the loop for the normal constant case (8 ints,
121 * for a 256-bit fd_set)
122 */
123#undef __FD_ZERO
124static inline void __FD_ZERO(__kernel_fd_set *__p)
125{
126 unsigned long *__tmp = __p->fds_bits;
127 int __i;
128
129 if (__builtin_constant_p(__FDSET_LONGS)) {
130 switch (__FDSET_LONGS) {
131 case 16:
132 __tmp[ 0] = 0; __tmp[ 1] = 0;
133 __tmp[ 2] = 0; __tmp[ 3] = 0;
134 __tmp[ 4] = 0; __tmp[ 5] = 0;
135 __tmp[ 6] = 0; __tmp[ 7] = 0;
136 __tmp[ 8] = 0; __tmp[ 9] = 0;
137 __tmp[10] = 0; __tmp[11] = 0;
138 __tmp[12] = 0; __tmp[13] = 0;
139 __tmp[14] = 0; __tmp[15] = 0;
140 return;
141
142 case 8:
143 __tmp[ 0] = 0; __tmp[ 1] = 0;
144 __tmp[ 2] = 0; __tmp[ 3] = 0;
145 __tmp[ 4] = 0; __tmp[ 5] = 0;
146 __tmp[ 6] = 0; __tmp[ 7] = 0;
147 return;
148
149 case 4:
150 __tmp[ 0] = 0; __tmp[ 1] = 0;
151 __tmp[ 2] = 0; __tmp[ 3] = 0;
152 return;
153 }
154 }
155 __i = __FDSET_LONGS;
156 while (__i) {
157 __i--;
158 *__tmp = 0;
159 __tmp++;
160 }
161}
162
163#endif /* __KERNEL__ */
164
165#endif /* __ASM_GENERIC_POSIX_TYPES_H */ 100#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
index 49c1704173e7..b1bea03274d5 100644
--- a/include/asm-generic/socket.h
+++ b/include/asm-generic/socket.h
@@ -67,4 +67,9 @@
67 67
68#define SO_WIFI_STATUS 41 68#define SO_WIFI_STATUS 41
69#define SCM_WIFI_STATUS SO_WIFI_STATUS 69#define SCM_WIFI_STATUS SO_WIFI_STATUS
70#define SO_PEEK_OFF 42
71
72/* Instruct lower device to use last 4-bytes of skb data as FCS */
73#define SO_NOFCS 43
74
70#endif /* __ASM_GENERIC_SOCKET_H */ 75#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
new file mode 100644
index 000000000000..052c4ac04fd5
--- /dev/null
+++ b/include/asm-generic/switch_to.h
@@ -0,0 +1,30 @@
1/* Generic task switch macro wrapper, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_SWITCH_TO_H
15#define __ASM_GENERIC_SWITCH_TO_H
16
17#include <linux/thread_info.h>
18
19/*
20 * Context switching is now performed out-of-line in switch_to.S
21 */
22extern struct task_struct *__switch_to(struct task_struct *,
23 struct task_struct *);
24
25#define switch_to(prev, next, last) \
26 do { \
27 ((last) = __switch_to((prev), (next))); \
28 } while (0)
29
30#endif /* __ASM_GENERIC_SWITCH_TO_H */
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
deleted file mode 100644
index 215efa74f5a2..000000000000
--- a/include/asm-generic/system.h
+++ /dev/null
@@ -1,141 +0,0 @@
1/* Generic system definitions, based on MN10300 definitions.
2 *
3 * It should be possible to use these on really simple architectures,
4 * but it serves more as a starting point for new ports.
5 *
6 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7 * Written by David Howells (dhowells@redhat.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public Licence
11 * as published by the Free Software Foundation; either version
12 * 2 of the Licence, or (at your option) any later version.
13 */
14#ifndef __ASM_GENERIC_SYSTEM_H
15#define __ASM_GENERIC_SYSTEM_H
16
17#ifndef __ASSEMBLY__
18
19#include <linux/types.h>
20#include <linux/irqflags.h>
21
22#include <asm/cmpxchg-local.h>
23#include <asm/cmpxchg.h>
24
25struct task_struct;
26
27/* context switching is now performed out-of-line in switch_to.S */
28extern struct task_struct *__switch_to(struct task_struct *,
29 struct task_struct *);
30#define switch_to(prev, next, last) \
31 do { \
32 ((last) = __switch_to((prev), (next))); \
33 } while (0)
34
35#define arch_align_stack(x) (x)
36
37#define nop() asm volatile ("nop")
38
39#endif /* !__ASSEMBLY__ */
40
41/*
42 * Force strict CPU ordering.
43 * And yes, this is required on UP too when we're talking
44 * to devices.
45 *
46 * This implementation only contains a compiler barrier.
47 */
48
49#define mb() asm volatile ("": : :"memory")
50#define rmb() mb()
51#define wmb() asm volatile ("": : :"memory")
52
53#ifdef CONFIG_SMP
54#define smp_mb() mb()
55#define smp_rmb() rmb()
56#define smp_wmb() wmb()
57#else
58#define smp_mb() barrier()
59#define smp_rmb() barrier()
60#define smp_wmb() barrier()
61#endif
62
63#define set_mb(var, value) do { var = value; mb(); } while (0)
64#define set_wmb(var, value) do { var = value; wmb(); } while (0)
65
66#define read_barrier_depends() do {} while (0)
67#define smp_read_barrier_depends() do {} while (0)
68
69/*
70 * we make sure local_irq_enable() doesn't cause priority inversion
71 */
72#ifndef __ASSEMBLY__
73
74/* This function doesn't exist, so you'll get a linker error
75 * if something tries to do an invalid xchg(). */
76extern void __xchg_called_with_bad_pointer(void);
77
78static inline
79unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
80{
81 unsigned long ret, flags;
82
83 switch (size) {
84 case 1:
85#ifdef __xchg_u8
86 return __xchg_u8(x, ptr);
87#else
88 local_irq_save(flags);
89 ret = *(volatile u8 *)ptr;
90 *(volatile u8 *)ptr = x;
91 local_irq_restore(flags);
92 return ret;
93#endif /* __xchg_u8 */
94
95 case 2:
96#ifdef __xchg_u16
97 return __xchg_u16(x, ptr);
98#else
99 local_irq_save(flags);
100 ret = *(volatile u16 *)ptr;
101 *(volatile u16 *)ptr = x;
102 local_irq_restore(flags);
103 return ret;
104#endif /* __xchg_u16 */
105
106 case 4:
107#ifdef __xchg_u32
108 return __xchg_u32(x, ptr);
109#else
110 local_irq_save(flags);
111 ret = *(volatile u32 *)ptr;
112 *(volatile u32 *)ptr = x;
113 local_irq_restore(flags);
114 return ret;
115#endif /* __xchg_u32 */
116
117#ifdef CONFIG_64BIT
118 case 8:
119#ifdef __xchg_u64
120 return __xchg_u64(x, ptr);
121#else
122 local_irq_save(flags);
123 ret = *(volatile u64 *)ptr;
124 *(volatile u64 *)ptr = x;
125 local_irq_restore(flags);
126 return ret;
127#endif /* __xchg_u64 */
128#endif /* CONFIG_64BIT */
129
130 default:
131 __xchg_called_with_bad_pointer();
132 return x;
133 }
134}
135
136#define xchg(ptr, x) \
137 ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
138
139#endif /* !__ASSEMBLY__ */
140
141#endif /* __ASM_GENERIC_SYSTEM_H */
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
index c7af037024c7..d6d0a88430fe 100644
--- a/include/asm-generic/tlbflush.h
+++ b/include/asm-generic/tlbflush.h
@@ -9,6 +9,8 @@
9#error need to implement an architecture specific asm/tlbflush.h 9#error need to implement an architecture specific asm/tlbflush.h
10#endif 10#endif
11 11
12#include <linux/bug.h>
13
12static inline void flush_tlb_mm(struct mm_struct *mm) 14static inline void flush_tlb_mm(struct mm_struct *mm)
13{ 15{
14 BUG(); 16 BUG();
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 2292d1af9d70..991ef01cd77e 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
218 218
219/* fs/sendfile.c */ 219/* fs/sendfile.c */
220#define __NR3264_sendfile 71 220#define __NR3264_sendfile 71
221__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile) 221__SYSCALL(__NR3264_sendfile, sys_sendfile64)
222 222
223/* fs/select.c */ 223/* fs/select.c */
224#define __NR_pselect6 72 224#define __NR_pselect6 72
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index b5e2e4c6b017..8aeadf6b553a 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -167,6 +167,7 @@
167 CPU_KEEP(exit.data) \ 167 CPU_KEEP(exit.data) \
168 MEM_KEEP(init.data) \ 168 MEM_KEEP(init.data) \
169 MEM_KEEP(exit.data) \ 169 MEM_KEEP(exit.data) \
170 *(.data.unlikely) \
170 STRUCT_ALIGN(); \ 171 STRUCT_ALIGN(); \
171 *(__tracepoints) \ 172 *(__tracepoints) \
172 /* implement dynamic printk debug */ \ 173 /* implement dynamic printk debug */ \
@@ -615,30 +616,23 @@
615 *(.init.setup) \ 616 *(.init.setup) \
616 VMLINUX_SYMBOL(__setup_end) = .; 617 VMLINUX_SYMBOL(__setup_end) = .;
617 618
618#define INITCALLS \ 619#define INIT_CALLS_LEVEL(level) \
619 *(.initcallearly.init) \ 620 VMLINUX_SYMBOL(__initcall##level##_start) = .; \
620 VMLINUX_SYMBOL(__early_initcall_end) = .; \ 621 *(.initcall##level##.init) \
621 *(.initcall0.init) \ 622 *(.initcall##level##s.init) \
622 *(.initcall0s.init) \
623 *(.initcall1.init) \
624 *(.initcall1s.init) \
625 *(.initcall2.init) \
626 *(.initcall2s.init) \
627 *(.initcall3.init) \
628 *(.initcall3s.init) \
629 *(.initcall4.init) \
630 *(.initcall4s.init) \
631 *(.initcall5.init) \
632 *(.initcall5s.init) \
633 *(.initcallrootfs.init) \
634 *(.initcall6.init) \
635 *(.initcall6s.init) \
636 *(.initcall7.init) \
637 *(.initcall7s.init)
638 623
639#define INIT_CALLS \ 624#define INIT_CALLS \
640 VMLINUX_SYMBOL(__initcall_start) = .; \ 625 VMLINUX_SYMBOL(__initcall_start) = .; \
641 INITCALLS \ 626 *(.initcallearly.init) \
627 INIT_CALLS_LEVEL(0) \
628 INIT_CALLS_LEVEL(1) \
629 INIT_CALLS_LEVEL(2) \
630 INIT_CALLS_LEVEL(3) \
631 INIT_CALLS_LEVEL(4) \
632 INIT_CALLS_LEVEL(5) \
633 INIT_CALLS_LEVEL(rootfs) \
634 INIT_CALLS_LEVEL(6) \
635 INIT_CALLS_LEVEL(7) \
642 VMLINUX_SYMBOL(__initcall_end) = .; 636 VMLINUX_SYMBOL(__initcall_end) = .;
643 637
644#define CON_INITCALL \ 638#define CON_INITCALL \