aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/Kbuild.asm2
-rw-r--r--include/asm-generic/atomic.h2
-rw-r--r--include/asm-generic/gpio.h6
-rw-r--r--include/asm-generic/pgtable.h57
-rw-r--r--include/asm-generic/vmlinux.lds.h14
5 files changed, 78 insertions, 3 deletions
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 92a6d91d0c1a..7cd25b8e7c9a 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -1,6 +1,6 @@
1header-y += kvm.h 1header-y += kvm.h
2 2
3ifeq ($(wildcard include/asm-$(SRCARCH)/a.out.h),include/asm-$(SRCARCH)/a.out.h) 3ifneq ($(wildcard $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
4unifdef-y += a.out.h 4unifdef-y += a.out.h
5endif 5endif
6unifdef-y += auxvec.h 6unifdef-y += auxvec.h
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 85fd0aa27a8c..4ec0a296bdec 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -2,7 +2,7 @@
2#define _ASM_GENERIC_ATOMIC_H 2#define _ASM_GENERIC_ATOMIC_H
3/* 3/*
4 * Copyright (C) 2005 Silicon Graphics, Inc. 4 * Copyright (C) 2005 Silicon Graphics, Inc.
5 * Christoph Lameter <clameter@sgi.com> 5 * Christoph Lameter
6 * 6 *
7 * Allows to provide arch independent atomic definitions without the need to 7 * Allows to provide arch independent atomic definitions without the need to
8 * edit all arch specific atomic.h files. 8 * edit all arch specific atomic.h files.
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index ecf675a59d21..6be061d09da9 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -1,8 +1,12 @@
1#ifndef _ASM_GENERIC_GPIO_H 1#ifndef _ASM_GENERIC_GPIO_H
2#define _ASM_GENERIC_GPIO_H 2#define _ASM_GENERIC_GPIO_H
3 3
4#include <linux/types.h>
5
4#ifdef CONFIG_HAVE_GPIO_LIB 6#ifdef CONFIG_HAVE_GPIO_LIB
5 7
8#include <linux/compiler.h>
9
6/* Platforms may implement their GPIO interface with library code, 10/* Platforms may implement their GPIO interface with library code,
7 * at a small performance cost for non-inlined operations and some 11 * at a small performance cost for non-inlined operations and some
8 * extra memory (for code and for per-GPIO table entries). 12 * extra memory (for code and for per-GPIO table entries).
@@ -74,7 +78,7 @@ struct gpio_chip {
74 78
75extern const char *gpiochip_is_requested(struct gpio_chip *chip, 79extern const char *gpiochip_is_requested(struct gpio_chip *chip,
76 unsigned offset); 80 unsigned offset);
77extern int __init __must_check gpiochip_reserve(int start, int ngpio); 81extern int __must_check gpiochip_reserve(int start, int ngpio);
78 82
79/* add/remove chips */ 83/* add/remove chips */
80extern int gpiochip_add(struct gpio_chip *chip); 84extern int gpiochip_add(struct gpio_chip *chip);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 44ef329531c3..4fce3db2cecc 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -197,6 +197,63 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd)
197} 197}
198#endif /* CONFIG_MMU */ 198#endif /* CONFIG_MMU */
199 199
200static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
201 unsigned long addr,
202 pte_t *ptep)
203{
204 /*
205 * Get the current pte state, but zero it out to make it
206 * non-present, preventing the hardware from asynchronously
207 * updating it.
208 */
209 return ptep_get_and_clear(mm, addr, ptep);
210}
211
212static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
213 unsigned long addr,
214 pte_t *ptep, pte_t pte)
215{
216 /*
217 * The pte is non-present, so there's no hardware state to
218 * preserve.
219 */
220 set_pte_at(mm, addr, ptep, pte);
221}
222
223#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
224/*
225 * Start a pte protection read-modify-write transaction, which
226 * protects against asynchronous hardware modifications to the pte.
227 * The intention is not to prevent the hardware from making pte
228 * updates, but to prevent any updates it may make from being lost.
229 *
230 * This does not protect against other software modifications of the
231 * pte; the appropriate pte lock must be held over the transation.
232 *
233 * Note that this interface is intended to be batchable, meaning that
234 * ptep_modify_prot_commit may not actually update the pte, but merely
235 * queue the update to be done at some later time. The update must be
236 * actually committed before the pte lock is released, however.
237 */
238static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
239 unsigned long addr,
240 pte_t *ptep)
241{
242 return __ptep_modify_prot_start(mm, addr, ptep);
243}
244
245/*
246 * Commit an update to a pte, leaving any hardware-controlled bits in
247 * the PTE unmodified.
248 */
249static inline void ptep_modify_prot_commit(struct mm_struct *mm,
250 unsigned long addr,
251 pte_t *ptep, pte_t pte)
252{
253 __ptep_modify_prot_commit(mm, addr, ptep, pte);
254}
255#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
256
200/* 257/*
201 * A facility to provide lazy MMU batching. This allows PTE updates and 258 * A facility to provide lazy MMU batching. This allows PTE updates and
202 * page invalidations to be delayed until a call to leave lazy MMU mode 259 * page invalidations to be delayed until a call to leave lazy MMU mode
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f054778e916c..f1992dc5c424 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -93,6 +93,8 @@
93 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ 93 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
94 } \ 94 } \
95 \ 95 \
96 TRACEDATA \
97 \
96 /* Kernel symbol table: Normal symbols */ \ 98 /* Kernel symbol table: Normal symbols */ \
97 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 99 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
98 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 100 VMLINUX_SYMBOL(__start___ksymtab) = .; \
@@ -318,6 +320,18 @@
318 __stop___bug_table = .; \ 320 __stop___bug_table = .; \
319 } 321 }
320 322
323#ifdef CONFIG_PM_TRACE
324#define TRACEDATA \
325 . = ALIGN(4); \
326 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
327 __tracedata_start = .; \
328 *(.tracedata) \
329 __tracedata_end = .; \
330 }
331#else
332#define TRACEDATA
333#endif
334
321#define NOTES \ 335#define NOTES \
322 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ 336 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start_notes) = .; \ 337 VMLINUX_SYMBOL(__start_notes) = .; \