aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /include/asm-generic
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild.asm6
-rw-r--r--include/asm-generic/bug.h37
-rw-r--r--include/asm-generic/dma-coherent.h32
-rw-r--r--include/asm-generic/dma-mapping-broken.h2
-rw-r--r--include/asm-generic/dma-mapping.h4
-rw-r--r--include/asm-generic/gpio.h56
-rw-r--r--include/asm-generic/int-ll64.h2
-rw-r--r--include/asm-generic/ioctl.h4
-rw-r--r--include/asm-generic/mutex-dec.h26
-rw-r--r--include/asm-generic/mutex-xchg.h9
-rw-r--r--include/asm-generic/pci-dma-compat.h4
-rw-r--r--include/asm-generic/pgtable-nopmd.h6
-rw-r--r--include/asm-generic/rtc.h36
-rw-r--r--include/asm-generic/sections.h6
-rw-r--r--include/asm-generic/siginfo.h2
-rw-r--r--include/asm-generic/statfs.h65
-rw-r--r--include/asm-generic/syscall.h141
-rw-r--r--include/asm-generic/vmlinux.lds.h43
18 files changed, 389 insertions, 92 deletions
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 7cd25b8e7c9a..1870d5e05f1c 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,6 +1,10 @@
1ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
2 $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
1header-y += kvm.h 3header-y += kvm.h
4endif
2 5
3ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),) 6ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
7 $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
4unifdef-y += a.out.h 8unifdef-y += a.out.h
5endif 9endif
6unifdef-y += auxvec.h 10unifdef-y += auxvec.h
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 2632328d8646..12c07c1866b2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -22,7 +22,7 @@ struct bug_entry {
22 22
23#ifndef HAVE_ARCH_BUG 23#ifndef HAVE_ARCH_BUG
24#define BUG() do { \ 24#define BUG() do { \
25 printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ 25 printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
26 panic("BUG!"); \ 26 panic("BUG!"); \
27} while (0) 27} while (0)
28#endif 28#endif
@@ -34,9 +34,14 @@ struct bug_entry {
34#ifndef __WARN 34#ifndef __WARN
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36extern void warn_on_slowpath(const char *file, const int line); 36extern void warn_on_slowpath(const char *file, const int line);
37extern void warn_slowpath(const char *file, const int line,
38 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
37#define WANT_WARN_ON_SLOWPATH 39#define WANT_WARN_ON_SLOWPATH
38#endif 40#endif
39#define __WARN() warn_on_slowpath(__FILE__, __LINE__) 41#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
42#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
43#else
44#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
40#endif 45#endif
41 46
42#ifndef WARN_ON 47#ifndef WARN_ON
@@ -48,6 +53,15 @@ extern void warn_on_slowpath(const char *file, const int line);
48}) 53})
49#endif 54#endif
50 55
56#ifndef WARN
57#define WARN(condition, format...) ({ \
58 int __ret_warn_on = !!(condition); \
59 if (unlikely(__ret_warn_on)) \
60 __WARN_printf(format); \
61 unlikely(__ret_warn_on); \
62})
63#endif
64
51#else /* !CONFIG_BUG */ 65#else /* !CONFIG_BUG */
52#ifndef HAVE_ARCH_BUG 66#ifndef HAVE_ARCH_BUG
53#define BUG() 67#define BUG()
@@ -63,6 +77,14 @@ extern void warn_on_slowpath(const char *file, const int line);
63 unlikely(__ret_warn_on); \ 77 unlikely(__ret_warn_on); \
64}) 78})
65#endif 79#endif
80
81#ifndef WARN
82#define WARN(condition, format...) ({ \
83 int __ret_warn_on = !!(condition); \
84 unlikely(__ret_warn_on); \
85})
86#endif
87
66#endif 88#endif
67 89
68#define WARN_ON_ONCE(condition) ({ \ 90#define WARN_ON_ONCE(condition) ({ \
@@ -75,6 +97,19 @@ extern void warn_on_slowpath(const char *file, const int line);
75 unlikely(__ret_warn_once); \ 97 unlikely(__ret_warn_once); \
76}) 98})
77 99
100#define WARN_ONCE(condition, format...) ({ \
101 static int __warned; \
102 int __ret_warn_once = !!(condition); \
103 \
104 if (unlikely(__ret_warn_once)) \
105 if (WARN(!__warned, format)) \
106 __warned = 1; \
107 unlikely(__ret_warn_once); \
108})
109
110#define WARN_ON_RATELIMIT(condition, state) \
111 WARN_ON((condition) && __ratelimit(state))
112
78#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
79# define WARN_ON_SMP(x) WARN_ON(x) 114# define WARN_ON_SMP(x) WARN_ON(x)
80#else 115#else
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
new file mode 100644
index 000000000000..85a3ffaa0242
--- /dev/null
+++ b/include/asm-generic/dma-coherent.h
@@ -0,0 +1,32 @@
1#ifndef DMA_COHERENT_H
2#define DMA_COHERENT_H
3
4#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
5/*
6 * These two functions are only for dma allocator.
7 * Don't use them in device drivers.
8 */
9int dma_alloc_from_coherent(struct device *dev, ssize_t size,
10 dma_addr_t *dma_handle, void **ret);
11int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
12
13/*
14 * Standard interface
15 */
16#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
17extern int
18dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
19 dma_addr_t device_addr, size_t size, int flags);
20
21extern void
22dma_release_declared_memory(struct device *dev);
23
24extern void *
25dma_mark_declared_memory_occupied(struct device *dev,
26 dma_addr_t device_addr, size_t size);
27#else
28#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
29#define dma_release_from_coherent(dev, order, vaddr) (0)
30#endif
31
32#endif
diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h
index e2468f894d2a..82cd0cb1c3fe 100644
--- a/include/asm-generic/dma-mapping-broken.h
+++ b/include/asm-generic/dma-mapping-broken.h
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
61#define dma_sync_sg_for_device dma_sync_sg_for_cpu 61#define dma_sync_sg_for_device dma_sync_sg_for_cpu
62 62
63extern int 63extern int
64dma_mapping_error(dma_addr_t dma_addr); 64dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
65 65
66extern int 66extern int
67dma_supported(struct device *dev, u64 mask); 67dma_supported(struct device *dev, u64 mask);
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index 783ab9944d70..189486c3f92e 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
144} 144}
145 145
146static inline int 146static inline int
147dma_mapping_error(dma_addr_t dma_addr) 147dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
148{ 148{
149 return pci_dma_mapping_error(dma_addr); 149 return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
150} 150}
151 151
152 152
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 6be061d09da9..81797ec9ab29 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -2,8 +2,9 @@
2#define _ASM_GENERIC_GPIO_H 2#define _ASM_GENERIC_GPIO_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/errno.h>
5 6
6#ifdef CONFIG_HAVE_GPIO_LIB 7#ifdef CONFIG_GPIOLIB
7 8
8#include <linux/compiler.h> 9#include <linux/compiler.h>
9 10
@@ -13,7 +14,7 @@
13 * 14 *
14 * While the GPIO programming interface defines valid GPIO numbers 15 * While the GPIO programming interface defines valid GPIO numbers
15 * to be in the range 0..MAX_INT, this library restricts them to the 16 * to be in the range 0..MAX_INT, this library restricts them to the
16 * smaller range 0..ARCH_NR_GPIOS. 17 * smaller range 0..ARCH_NR_GPIOS-1.
17 */ 18 */
18 19
19#ifndef ARCH_NR_GPIOS 20#ifndef ARCH_NR_GPIOS
@@ -32,11 +33,19 @@ struct module;
32/** 33/**
33 * struct gpio_chip - abstract a GPIO controller 34 * struct gpio_chip - abstract a GPIO controller
34 * @label: for diagnostics 35 * @label: for diagnostics
36 * @dev: optional device providing the GPIOs
37 * @owner: helps prevent removal of modules exporting active GPIOs
38 * @request: optional hook for chip-specific activation, such as
39 * enabling module power and clock; may sleep
40 * @free: optional hook for chip-specific deactivation, such as
41 * disabling module power and clock; may sleep
35 * @direction_input: configures signal "offset" as input, or returns error 42 * @direction_input: configures signal "offset" as input, or returns error
36 * @get: returns value for signal "offset"; for output signals this 43 * @get: returns value for signal "offset"; for output signals this
37 * returns either the value actually sensed, or zero 44 * returns either the value actually sensed, or zero
38 * @direction_output: configures signal "offset" as output, or returns error 45 * @direction_output: configures signal "offset" as output, or returns error
39 * @set: assigns output value for signal "offset" 46 * @set: assigns output value for signal "offset"
47 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
48 * implementation may not sleep
40 * @dbg_show: optional routine to show contents in debugfs; default code 49 * @dbg_show: optional routine to show contents in debugfs; default code
41 * will be used when this is omitted, but custom code can show extra 50 * will be used when this is omitted, but custom code can show extra
42 * state (such as pullup/pulldown configuration). 51 * state (such as pullup/pulldown configuration).
@@ -58,9 +67,15 @@ struct module;
58 * is calculated by subtracting @base from the gpio number. 67 * is calculated by subtracting @base from the gpio number.
59 */ 68 */
60struct gpio_chip { 69struct gpio_chip {
61 char *label; 70 const char *label;
71 struct device *dev;
62 struct module *owner; 72 struct module *owner;
63 73
74 int (*request)(struct gpio_chip *chip,
75 unsigned offset);
76 void (*free)(struct gpio_chip *chip,
77 unsigned offset);
78
64 int (*direction_input)(struct gpio_chip *chip, 79 int (*direction_input)(struct gpio_chip *chip,
65 unsigned offset); 80 unsigned offset);
66 int (*get)(struct gpio_chip *chip, 81 int (*get)(struct gpio_chip *chip,
@@ -69,11 +84,16 @@ struct gpio_chip {
69 unsigned offset, int value); 84 unsigned offset, int value);
70 void (*set)(struct gpio_chip *chip, 85 void (*set)(struct gpio_chip *chip,
71 unsigned offset, int value); 86 unsigned offset, int value);
87
88 int (*to_irq)(struct gpio_chip *chip,
89 unsigned offset);
90
72 void (*dbg_show)(struct seq_file *s, 91 void (*dbg_show)(struct seq_file *s,
73 struct gpio_chip *chip); 92 struct gpio_chip *chip);
74 int base; 93 int base;
75 u16 ngpio; 94 u16 ngpio;
76 unsigned can_sleep:1; 95 unsigned can_sleep:1;
96 unsigned exported:1;
77}; 97};
78 98
79extern const char *gpiochip_is_requested(struct gpio_chip *chip, 99extern const char *gpiochip_is_requested(struct gpio_chip *chip,
@@ -107,8 +127,20 @@ extern void __gpio_set_value(unsigned gpio, int value);
107 127
108extern int __gpio_cansleep(unsigned gpio); 128extern int __gpio_cansleep(unsigned gpio);
109 129
130extern int __gpio_to_irq(unsigned gpio);
131
132#ifdef CONFIG_GPIO_SYSFS
133
134/*
135 * A sysfs interface can be exported by individual drivers if they want,
136 * but more typically is configured entirely from userspace.
137 */
138extern int gpio_export(unsigned gpio, bool direction_may_change);
139extern void gpio_unexport(unsigned gpio);
140
141#endif /* CONFIG_GPIO_SYSFS */
110 142
111#else 143#else /* !CONFIG_HAVE_GPIO_LIB */
112 144
113static inline int gpio_is_valid(int number) 145static inline int gpio_is_valid(int number)
114{ 146{
@@ -137,6 +169,20 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
137 gpio_set_value(gpio, value); 169 gpio_set_value(gpio, value);
138} 170}
139 171
140#endif 172#endif /* !CONFIG_HAVE_GPIO_LIB */
173
174#ifndef CONFIG_GPIO_SYSFS
175
176/* sysfs support is only available with gpiolib, where it's optional */
177
178static inline int gpio_export(unsigned gpio, bool direction_may_change)
179{
180 return -ENOSYS;
181}
182
183static inline void gpio_unexport(unsigned gpio)
184{
185}
186#endif /* CONFIG_GPIO_SYSFS */
141 187
142#endif /* _ASM_GENERIC_GPIO_H */ 188#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index 260948905e4e..f9bc9ac29b36 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -26,7 +26,7 @@ typedef unsigned int __u32;
26#ifdef __GNUC__ 26#ifdef __GNUC__
27__extension__ typedef __signed__ long long __s64; 27__extension__ typedef __signed__ long long __s64;
28__extension__ typedef unsigned long long __u64; 28__extension__ typedef unsigned long long __u64;
29#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 29#else
30typedef __signed__ long long __s64; 30typedef __signed__ long long __s64;
31typedef unsigned long long __u64; 31typedef unsigned long long __u64;
32#endif 32#endif
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h
index 864181385579..15828b2d663c 100644
--- a/include/asm-generic/ioctl.h
+++ b/include/asm-generic/ioctl.h
@@ -68,12 +68,16 @@
68 ((nr) << _IOC_NRSHIFT) | \ 68 ((nr) << _IOC_NRSHIFT) | \
69 ((size) << _IOC_SIZESHIFT)) 69 ((size) << _IOC_SIZESHIFT))
70 70
71#ifdef __KERNEL__
71/* provoke compile error for invalid uses of size argument */ 72/* provoke compile error for invalid uses of size argument */
72extern unsigned int __invalid_size_argument_for_IOC; 73extern unsigned int __invalid_size_argument_for_IOC;
73#define _IOC_TYPECHECK(t) \ 74#define _IOC_TYPECHECK(t) \
74 ((sizeof(t) == sizeof(t[1]) && \ 75 ((sizeof(t) == sizeof(t[1]) && \
75 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \ 76 sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
76 sizeof(t) : __invalid_size_argument_for_IOC) 77 sizeof(t) : __invalid_size_argument_for_IOC)
78#else
79#define _IOC_TYPECHECK(t) (sizeof(t))
80#endif
77 81
78/* used to create numbers */ 82/* used to create numbers */
79#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) 83#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be6743f..f104af7cf437 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{ 22{
23 if (unlikely(atomic_dec_return(count) < 0)) 23 if (unlikely(atomic_dec_return(count) < 0))
24 fail_fn(count); 24 fail_fn(count);
25 else
26 smp_mb();
27} 25}
28 26
29/** 27/**
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
41{ 39{
42 if (unlikely(atomic_dec_return(count) < 0)) 40 if (unlikely(atomic_dec_return(count) < 0))
43 return fail_fn(count); 41 return fail_fn(count);
44 else { 42 return 0;
45 smp_mb();
46 return 0;
47 }
48} 43}
49 44
50/** 45/**
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
63static inline void 58static inline void
64__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 59__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
65{ 60{
66 smp_mb();
67 if (unlikely(atomic_inc_return(count) <= 0)) 61 if (unlikely(atomic_inc_return(count) <= 0))
68 fail_fn(count); 62 fail_fn(count);
69} 63}
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
88static inline int 82static inline int
89__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 83__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
90{ 84{
91 /* 85 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
92 * We have two variants here. The cmpxchg based one is the best one
93 * because it never induce a false contention state. It is included
94 * here because architectures using the inc/dec algorithms over the
95 * xchg ones are much more likely to support cmpxchg natively.
96 *
97 * If not we fall back to the spinlock based variant - that is
98 * just as efficient (and simpler) as a 'destructive' probing of
99 * the mutex state would be.
100 */
101#ifdef __HAVE_ARCH_CMPXCHG
102 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
103 smp_mb();
104 return 1; 86 return 1;
105 }
106 return 0; 87 return 0;
107#else
108 return fail_fn(count);
109#endif
110} 88}
111 89
112#endif 90#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2cbfebe..580a6d35c700 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{ 27{
28 if (unlikely(atomic_xchg(count, 0) != 1)) 28 if (unlikely(atomic_xchg(count, 0) != 1))
29 fail_fn(count); 29 fail_fn(count);
30 else
31 smp_mb();
32} 30}
33 31
34/** 32/**
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
46{ 44{
47 if (unlikely(atomic_xchg(count, 0) != 1)) 45 if (unlikely(atomic_xchg(count, 0) != 1))
48 return fail_fn(count); 46 return fail_fn(count);
49 else { 47 return 0;
50 smp_mb();
51 return 0;
52 }
53} 48}
54 49
55/** 50/**
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
67static inline void 62static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 63__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{ 64{
70 smp_mb();
71 if (unlikely(atomic_xchg(count, 1) != 0)) 65 if (unlikely(atomic_xchg(count, 1) != 0))
72 fail_fn(count); 66 fail_fn(count);
73} 67}
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
110 if (prev < 0) 104 if (prev < 0)
111 prev = 0; 105 prev = 0;
112 } 106 }
113 smp_mb();
114 107
115 return prev; 108 return prev;
116} 109}
diff --git a/include/asm-generic/pci-dma-compat.h b/include/asm-generic/pci-dma-compat.h
index 25c10e96b2b7..37b3706226e7 100644
--- a/include/asm-generic/pci-dma-compat.h
+++ b/include/asm-generic/pci-dma-compat.h
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
99} 99}
100 100
101static inline int 101static inline int
102pci_dma_mapping_error(dma_addr_t dma_addr) 102pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
103{ 103{
104 return dma_mapping_error(dma_addr); 104 return dma_mapping_error(&pdev->dev, dma_addr);
105} 105}
106 106
107#endif 107#endif
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 087325ede76c..a7cdc48e8b78 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -5,6 +5,8 @@
5 5
6#include <asm-generic/pgtable-nopud.h> 6#include <asm-generic/pgtable-nopud.h>
7 7
8struct mm_struct;
9
8#define __PAGETABLE_PMD_FOLDED 10#define __PAGETABLE_PMD_FOLDED
9 11
10/* 12/*
@@ -54,7 +56,9 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
54 * inside the pud, so has no extra memory associated with it. 56 * inside the pud, so has no extra memory associated with it.
55 */ 57 */
56#define pmd_alloc_one(mm, address) NULL 58#define pmd_alloc_one(mm, address) NULL
57#define pmd_free(mm, x) do { } while (0) 59static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
60{
61}
58#define __pmd_free_tlb(tlb, x) do { } while (0) 62#define __pmd_free_tlb(tlb, x) do { } while (0)
59 63
60#undef pmd_addr_end 64#undef pmd_addr_end
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index be4af0029ac0..89061c1a67d4 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -15,6 +15,7 @@
15#include <linux/mc146818rtc.h> 15#include <linux/mc146818rtc.h>
16#include <linux/rtc.h> 16#include <linux/rtc.h>
17#include <linux/bcd.h> 17#include <linux/bcd.h>
18#include <linux/delay.h>
18 19
19#define RTC_PIE 0x40 /* periodic interrupt enable */ 20#define RTC_PIE 0x40 /* periodic interrupt enable */
20#define RTC_AIE 0x20 /* alarm interrupt enable */ 21#define RTC_AIE 0x20 /* alarm interrupt enable */
@@ -43,7 +44,6 @@ static inline unsigned char rtc_is_updating(void)
43 44
44static inline unsigned int get_rtc_time(struct rtc_time *time) 45static inline unsigned int get_rtc_time(struct rtc_time *time)
45{ 46{
46 unsigned long uip_watchdog = jiffies;
47 unsigned char ctrl; 47 unsigned char ctrl;
48 unsigned long flags; 48 unsigned long flags;
49 49
@@ -53,19 +53,15 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
53 53
54 /* 54 /*
55 * read RTC once any update in progress is done. The update 55 * read RTC once any update in progress is done. The update
56 * can take just over 2ms. We wait 10 to 20ms. There is no need to 56 * can take just over 2ms. We wait 20ms. There is no need to
57 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. 57 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
58 * If you need to know *exactly* when a second has started, enable 58 * If you need to know *exactly* when a second has started, enable
59 * periodic update complete interrupts, (via ioctl) and then 59 * periodic update complete interrupts, (via ioctl) and then
60 * immediately read /dev/rtc which will block until you get the IRQ. 60 * immediately read /dev/rtc which will block until you get the IRQ.
61 * Once the read clears, read the RTC time (again via ioctl). Easy. 61 * Once the read clears, read the RTC time (again via ioctl). Easy.
62 */ 62 */
63 63 if (rtc_is_updating())
64 if (rtc_is_updating() != 0) 64 mdelay(20);
65 while (jiffies - uip_watchdog < 2*HZ/100) {
66 barrier();
67 cpu_relax();
68 }
69 65
70 /* 66 /*
71 * Only the values that we read from the RTC are set. We leave 67 * Only the values that we read from the RTC are set. We leave
@@ -88,12 +84,12 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
88 84
89 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) 85 if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
90 { 86 {
91 BCD_TO_BIN(time->tm_sec); 87 time->tm_sec = bcd2bin(time->tm_sec);
92 BCD_TO_BIN(time->tm_min); 88 time->tm_min = bcd2bin(time->tm_min);
93 BCD_TO_BIN(time->tm_hour); 89 time->tm_hour = bcd2bin(time->tm_hour);
94 BCD_TO_BIN(time->tm_mday); 90 time->tm_mday = bcd2bin(time->tm_mday);
95 BCD_TO_BIN(time->tm_mon); 91 time->tm_mon = bcd2bin(time->tm_mon);
96 BCD_TO_BIN(time->tm_year); 92 time->tm_year = bcd2bin(time->tm_year);
97 } 93 }
98 94
99#ifdef CONFIG_MACH_DECSTATION 95#ifdef CONFIG_MACH_DECSTATION
@@ -163,12 +159,12 @@ static inline int set_rtc_time(struct rtc_time *time)
163 159
164 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) 160 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
165 || RTC_ALWAYS_BCD) { 161 || RTC_ALWAYS_BCD) {
166 BIN_TO_BCD(sec); 162 sec = bin2bcd(sec);
167 BIN_TO_BCD(min); 163 min = bin2bcd(min);
168 BIN_TO_BCD(hrs); 164 hrs = bin2bcd(hrs);
169 BIN_TO_BCD(day); 165 day = bin2bcd(day);
170 BIN_TO_BCD(mon); 166 mon = bin2bcd(mon);
171 BIN_TO_BCD(yrs); 167 yrs = bin2bcd(yrs);
172 } 168 }
173 169
174 save_control = CMOS_READ(RTC_CONTROL); 170 save_control = CMOS_READ(RTC_CONTROL);
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 8feeae1f2369..79a7ff925bf8 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -14,4 +14,10 @@ extern char __kprobes_text_start[], __kprobes_text_end[];
14extern char __initdata_begin[], __initdata_end[]; 14extern char __initdata_begin[], __initdata_end[];
15extern char __start_rodata[], __end_rodata[]; 15extern char __start_rodata[], __end_rodata[];
16 16
17/* function descriptor handling (if any). Override
18 * in asm/sections.h */
19#ifndef dereference_function_descriptor
20#define dereference_function_descriptor(p) (p)
21#endif
22
17#endif /* _ASM_GENERIC_SECTIONS_H_ */ 23#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
index 8786e01e0db8..969570167e9e 100644
--- a/include/asm-generic/siginfo.h
+++ b/include/asm-generic/siginfo.h
@@ -199,6 +199,8 @@ typedef struct siginfo {
199 */ 199 */
200#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */ 200#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
201#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */ 201#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
202#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
203#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
202#define NSIGTRAP 2 204#define NSIGTRAP 2
203 205
204/* 206/*
diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h
index 1d01043e797d..6129d6802149 100644
--- a/include/asm-generic/statfs.h
+++ b/include/asm-generic/statfs.h
@@ -6,33 +6,64 @@
6typedef __kernel_fsid_t fsid_t; 6typedef __kernel_fsid_t fsid_t;
7#endif 7#endif
8 8
9/*
10 * Most 64-bit platforms use 'long', while most 32-bit platforms use '__u32'.
11 * Yes, they differ in signedness as well as size.
12 * Special cases can override it for themselves -- except for S390x, which
13 * is just a little too special for us. And MIPS, which I'm not touching
14 * with a 10' pole.
15 */
16#ifndef __statfs_word
17#if BITS_PER_LONG == 64
18#define __statfs_word long
19#else
20#define __statfs_word __u32
21#endif
22#endif
23
9struct statfs { 24struct statfs {
10 __u32 f_type; 25 __statfs_word f_type;
11 __u32 f_bsize; 26 __statfs_word f_bsize;
12 __u32 f_blocks; 27 __statfs_word f_blocks;
13 __u32 f_bfree; 28 __statfs_word f_bfree;
14 __u32 f_bavail; 29 __statfs_word f_bavail;
15 __u32 f_files; 30 __statfs_word f_files;
16 __u32 f_ffree; 31 __statfs_word f_ffree;
17 __kernel_fsid_t f_fsid; 32 __kernel_fsid_t f_fsid;
18 __u32 f_namelen; 33 __statfs_word f_namelen;
19 __u32 f_frsize; 34 __statfs_word f_frsize;
20 __u32 f_spare[5]; 35 __statfs_word f_spare[5];
21}; 36};
22 37
38/*
39 * ARM needs to avoid the 32-bit padding at the end, for consistency
40 * between EABI and OABI
41 */
42#ifndef ARCH_PACK_STATFS64
43#define ARCH_PACK_STATFS64
44#endif
45
23struct statfs64 { 46struct statfs64 {
24 __u32 f_type; 47 __statfs_word f_type;
25 __u32 f_bsize; 48 __statfs_word f_bsize;
26 __u64 f_blocks; 49 __u64 f_blocks;
27 __u64 f_bfree; 50 __u64 f_bfree;
28 __u64 f_bavail; 51 __u64 f_bavail;
29 __u64 f_files; 52 __u64 f_files;
30 __u64 f_ffree; 53 __u64 f_ffree;
31 __kernel_fsid_t f_fsid; 54 __kernel_fsid_t f_fsid;
32 __u32 f_namelen; 55 __statfs_word f_namelen;
33 __u32 f_frsize; 56 __statfs_word f_frsize;
34 __u32 f_spare[5]; 57 __statfs_word f_spare[5];
35}; 58} ARCH_PACK_STATFS64;
59
60/*
61 * IA64 and x86_64 need to avoid the 32-bit padding at the end,
62 * to be compatible with the i386 ABI
63 */
64#ifndef ARCH_PACK_COMPAT_STATFS64
65#define ARCH_PACK_COMPAT_STATFS64
66#endif
36 67
37struct compat_statfs64 { 68struct compat_statfs64 {
38 __u32 f_type; 69 __u32 f_type;
@@ -46,6 +77,6 @@ struct compat_statfs64 {
46 __u32 f_namelen; 77 __u32 f_namelen;
47 __u32 f_frsize; 78 __u32 f_frsize;
48 __u32 f_spare[5]; 79 __u32 f_spare[5];
49}; 80} ARCH_PACK_COMPAT_STATFS64;
50 81
51#endif 82#endif
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
new file mode 100644
index 000000000000..ea8087b55ffc
--- /dev/null
+++ b/include/asm-generic/syscall.h
@@ -0,0 +1,141 @@
1/*
2 * Access to user system call parameters and results
3 *
4 * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU General Public License v.2.
9 *
10 * This file is a stub providing documentation for what functions
11 * asm-ARCH/syscall.h files need to define. Most arch definitions
12 * will be simple inlines.
13 *
14 * All of these functions expect to be called with no locks,
15 * and only when the caller is sure that the task of interest
16 * cannot return to user mode while we are looking at it.
17 */
18
19#ifndef _ASM_SYSCALL_H
20#define _ASM_SYSCALL_H 1
21
22struct task_struct;
23struct pt_regs;
24
25/**
26 * syscall_get_nr - find what system call a task is executing
27 * @task: task of interest, must be blocked
28 * @regs: task_pt_regs() of @task
29 *
30 * If @task is executing a system call or is at system call
31 * tracing about to attempt one, returns the system call number.
32 * If @task is not executing a system call, i.e. it's blocked
33 * inside the kernel for a fault or signal, returns -1.
34 *
35 * It's only valid to call this when @task is known to be blocked.
36 */
37long syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
38
39/**
40 * syscall_rollback - roll back registers after an aborted system call
41 * @task: task of interest, must be in system call exit tracing
42 * @regs: task_pt_regs() of @task
43 *
44 * It's only valid to call this when @task is stopped for system
45 * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
46 * after tracehook_report_syscall_entry() returned nonzero to prevent
47 * the system call from taking place.
48 *
49 * This rolls back the register state in @regs so it's as if the
50 * system call instruction was a no-op. The registers containing
51 * the system call number and arguments are as they were before the
52 * system call instruction. This may not be the same as what the
53 * register state looked like at system call entry tracing.
54 */
55void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
56
57/**
58 * syscall_get_error - check result of traced system call
59 * @task: task of interest, must be blocked
60 * @regs: task_pt_regs() of @task
61 *
62 * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
63 *
64 * It's only valid to call this when @task is stopped for tracing on exit
65 * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
66 */
67long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
68
69/**
70 * syscall_get_return_value - get the return value of a traced system call
71 * @task: task of interest, must be blocked
72 * @regs: task_pt_regs() of @task
73 *
74 * Returns the return value of the successful system call.
75 * This value is meaningless if syscall_get_error() returned nonzero.
76 *
77 * It's only valid to call this when @task is stopped for tracing on exit
78 * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
79 */
80long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
81
82/**
83 * syscall_set_return_value - change the return value of a traced system call
84 * @task: task of interest, must be blocked
85 * @regs: task_pt_regs() of @task
86 * @error: negative error code, or zero to indicate success
87 * @val: user return value if @error is zero
88 *
89 * This changes the results of the system call that user mode will see.
90 * If @error is zero, the user sees a successful system call with a
91 * return value of @val. If @error is nonzero, it's a negated errno
92 * code; the user sees a failed system call with this errno code.
93 *
94 * It's only valid to call this when @task is stopped for tracing on exit
95 * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
96 */
97void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
98 int error, long val);
99
100/**
101 * syscall_get_arguments - extract system call parameter values
102 * @task: task of interest, must be blocked
103 * @regs: task_pt_regs() of @task
104 * @i: argument index [0,5]
105 * @n: number of arguments; n+i must be [1,6].
106 * @args: array filled with argument values
107 *
108 * Fetches @n arguments to the system call starting with the @i'th argument
109 * (from 0 through 5). Argument @i is stored in @args[0], and so on.
110 * An arch inline version is probably optimal when @i and @n are constants.
111 *
112 * It's only valid to call this when @task is stopped for tracing on
113 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
114 * It's invalid to call this with @i + @n > 6; we only support system calls
115 * taking up to 6 arguments.
116 */
117void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
118 unsigned int i, unsigned int n, unsigned long *args);
119
120/**
121 * syscall_set_arguments - change system call parameter value
122 * @task: task of interest, must be in system call entry tracing
123 * @regs: task_pt_regs() of @task
124 * @i: argument index [0,5]
125 * @n: number of arguments; n+i must be [1,6].
126 * @args: array of argument values to store
127 *
128 * Changes @n arguments to the system call starting with the @i'th argument.
129 * Argument @i gets value @args[0], and so on.
130 * An arch inline version is probably optimal when @i and @n are constants.
131 *
132 * It's only valid to call this when @task is stopped for tracing on
133 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
134 * It's invalid to call this with @i + @n > 6; we only support system calls
135 * taking up to 6 arguments.
136 */
137void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
138 unsigned int i, unsigned int n,
139 const unsigned long *args);
140
141#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 729f6b0a60e9..80744606bad1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -37,6 +37,13 @@
37#define MEM_DISCARD(sec) *(.mem##sec) 37#define MEM_DISCARD(sec) *(.mem##sec)
38#endif 38#endif
39 39
40#ifdef CONFIG_FTRACE_MCOUNT_RECORD
41#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
42 *(__mcount_loc) \
43 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
44#else
45#define MCOUNT_REC()
46#endif
40 47
41/* .data section */ 48/* .data section */
42#define DATA_DATA \ 49#define DATA_DATA \
@@ -52,7 +59,10 @@
52 . = ALIGN(8); \ 59 . = ALIGN(8); \
53 VMLINUX_SYMBOL(__start___markers) = .; \ 60 VMLINUX_SYMBOL(__start___markers) = .; \
54 *(__markers) \ 61 *(__markers) \
55 VMLINUX_SYMBOL(__stop___markers) = .; 62 VMLINUX_SYMBOL(__stop___markers) = .; \
63 VMLINUX_SYMBOL(__start___tracepoints) = .; \
64 *(__tracepoints) \
65 VMLINUX_SYMBOL(__stop___tracepoints) = .;
56 66
57#define RO_DATA(align) \ 67#define RO_DATA(align) \
58 . = ALIGN((align)); \ 68 . = ALIGN((align)); \
@@ -61,6 +71,7 @@
61 *(.rodata) *(.rodata.*) \ 71 *(.rodata) *(.rodata.*) \
62 *(__vermagic) /* Kernel version magic */ \ 72 *(__vermagic) /* Kernel version magic */ \
63 *(__markers_strings) /* Markers: strings */ \ 73 *(__markers_strings) /* Markers: strings */ \
74 *(__tracepoints_strings)/* Tracepoints: strings */ \
64 } \ 75 } \
65 \ 76 \
66 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ 77 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
@@ -188,6 +199,7 @@
188 /* __*init sections */ \ 199 /* __*init sections */ \
189 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ 200 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
190 *(.ref.rodata) \ 201 *(.ref.rodata) \
202 MCOUNT_REC() \
191 DEV_KEEP(init.rodata) \ 203 DEV_KEEP(init.rodata) \
192 DEV_KEEP(exit.rodata) \ 204 DEV_KEEP(exit.rodata) \
193 CPU_KEEP(init.rodata) \ 205 CPU_KEEP(init.rodata) \
@@ -221,6 +233,7 @@
221 * during second ld run in second ld pass when generating System.map */ 233 * during second ld run in second ld pass when generating System.map */
222#define TEXT_TEXT \ 234#define TEXT_TEXT \
223 ALIGN_FUNCTION(); \ 235 ALIGN_FUNCTION(); \
236 *(.text.hot) \
224 *(.text) \ 237 *(.text) \
225 *(.ref.text) \ 238 *(.ref.text) \
226 *(.text.init.refok) \ 239 *(.text.init.refok) \
@@ -230,7 +243,8 @@
230 CPU_KEEP(init.text) \ 243 CPU_KEEP(init.text) \
231 CPU_KEEP(exit.text) \ 244 CPU_KEEP(exit.text) \
232 MEM_KEEP(init.text) \ 245 MEM_KEEP(init.text) \
233 MEM_KEEP(exit.text) 246 MEM_KEEP(exit.text) \
247 *(.text.unlikely)
234 248
235 249
236/* sched.text is aling to function alignment to secure we have same 250/* sched.text is aling to function alignment to secure we have same
@@ -266,7 +280,15 @@
266 CPU_DISCARD(init.data) \ 280 CPU_DISCARD(init.data) \
267 CPU_DISCARD(init.rodata) \ 281 CPU_DISCARD(init.rodata) \
268 MEM_DISCARD(init.data) \ 282 MEM_DISCARD(init.data) \
269 MEM_DISCARD(init.rodata) 283 MEM_DISCARD(init.rodata) \
284 /* implement dynamic printk debug */ \
285 VMLINUX_SYMBOL(__start___verbose_strings) = .; \
286 *(__verbose_strings) \
287 VMLINUX_SYMBOL(__stop___verbose_strings) = .; \
288 . = ALIGN(8); \
289 VMLINUX_SYMBOL(__start___verbose) = .; \
290 *(__verbose) \
291 VMLINUX_SYMBOL(__stop___verbose) = .;
270 292
271#define INIT_TEXT \ 293#define INIT_TEXT \
272 *(.init.text) \ 294 *(.init.text) \
@@ -331,9 +353,9 @@
331#define BUG_TABLE \ 353#define BUG_TABLE \
332 . = ALIGN(8); \ 354 . = ALIGN(8); \
333 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ 355 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
334 __start___bug_table = .; \ 356 VMLINUX_SYMBOL(__start___bug_table) = .; \
335 *(__bug_table) \ 357 *(__bug_table) \
336 __stop___bug_table = .; \ 358 VMLINUX_SYMBOL(__stop___bug_table) = .; \
337 } 359 }
338#else 360#else
339#define BUG_TABLE 361#define BUG_TABLE
@@ -343,9 +365,9 @@
343#define TRACEDATA \ 365#define TRACEDATA \
344 . = ALIGN(4); \ 366 . = ALIGN(4); \
345 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ 367 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
346 __tracedata_start = .; \ 368 VMLINUX_SYMBOL(__tracedata_start) = .; \
347 *(.tracedata) \ 369 *(.tracedata) \
348 __tracedata_end = .; \ 370 VMLINUX_SYMBOL(__tracedata_end) = .; \
349 } 371 }
350#else 372#else
351#define TRACEDATA 373#define TRACEDATA
@@ -359,6 +381,8 @@
359 } 381 }
360 382
361#define INITCALLS \ 383#define INITCALLS \
384 *(.initcallearly.init) \
385 VMLINUX_SYMBOL(__early_initcall_end) = .; \
362 *(.initcall0.init) \ 386 *(.initcall0.init) \
363 *(.initcall0s.init) \ 387 *(.initcall0s.init) \
364 *(.initcall1.init) \ 388 *(.initcall1.init) \
@@ -379,9 +403,10 @@
379 403
380#define PERCPU(align) \ 404#define PERCPU(align) \
381 . = ALIGN(align); \ 405 . = ALIGN(align); \
382 __per_cpu_start = .; \ 406 VMLINUX_SYMBOL(__per_cpu_start) = .; \
383 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ 407 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
408 *(.data.percpu.page_aligned) \
384 *(.data.percpu) \ 409 *(.data.percpu) \
385 *(.data.percpu.shared_aligned) \ 410 *(.data.percpu.shared_aligned) \
386 } \ 411 } \
387 __per_cpu_end = .; 412 VMLINUX_SYMBOL(__per_cpu_end) = .;