diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/atomic.h | 5 | ||||
-rw-r--r-- | include/asm-generic/cmpxchg-local.h | 1 | ||||
-rw-r--r-- | include/asm-generic/hardirq.h | 2 | ||||
-rw-r--r-- | include/asm-generic/irqflags.h | 52 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 4 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 14 |
6 files changed, 46 insertions, 32 deletions
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index a6cc019a41e0..e994197f84b7 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -43,6 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | #define atomic_set(v, i) (((v)->counter) = (i)) | 44 | #define atomic_set(v, i) (((v)->counter) = (i)) |
45 | 45 | ||
46 | #include <linux/irqflags.h> | ||
46 | #include <asm/system.h> | 47 | #include <asm/system.h> |
47 | 48 | ||
48 | /** | 49 | /** |
@@ -57,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
57 | unsigned long flags; | 58 | unsigned long flags; |
58 | int temp; | 59 | int temp; |
59 | 60 | ||
60 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | 61 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
61 | temp = v->counter; | 62 | temp = v->counter; |
62 | temp += i; | 63 | temp += i; |
63 | v->counter = temp; | 64 | v->counter = temp; |
@@ -78,7 +79,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
78 | unsigned long flags; | 79 | unsigned long flags; |
79 | int temp; | 80 | int temp; |
80 | 81 | ||
81 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | 82 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
82 | temp = v->counter; | 83 | temp = v->counter; |
83 | temp -= i; | 84 | temp -= i; |
84 | v->counter = temp; | 85 | v->counter = temp; |
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index b2ba2fc8829a..2533fddd34a6 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H | 2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/irqflags.h> | ||
5 | 6 | ||
6 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); | 7 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); |
7 | 8 | ||
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 62f59080e5cc..04d0a977cd43 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h | |||
@@ -3,13 +3,13 @@ | |||
3 | 3 | ||
4 | #include <linux/cache.h> | 4 | #include <linux/cache.h> |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/irq.h> | ||
7 | 6 | ||
8 | typedef struct { | 7 | typedef struct { |
9 | unsigned int __softirq_pending; | 8 | unsigned int __softirq_pending; |
10 | } ____cacheline_aligned irq_cpustat_t; | 9 | } ____cacheline_aligned irq_cpustat_t; |
11 | 10 | ||
12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | 11 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
12 | #include <linux/irq.h> | ||
13 | 13 | ||
14 | #ifndef ack_bad_irq | 14 | #ifndef ack_bad_irq |
15 | static inline void ack_bad_irq(unsigned int irq) | 15 | static inline void ack_bad_irq(unsigned int irq) |
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 9aebf618275a..1f40d0024cf3 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h | |||
@@ -5,68 +5,62 @@ | |||
5 | * All architectures should implement at least the first two functions, | 5 | * All architectures should implement at least the first two functions, |
6 | * usually inline assembly will be the best way. | 6 | * usually inline assembly will be the best way. |
7 | */ | 7 | */ |
8 | #ifndef RAW_IRQ_DISABLED | 8 | #ifndef ARCH_IRQ_DISABLED |
9 | #define RAW_IRQ_DISABLED 0 | 9 | #define ARCH_IRQ_DISABLED 0 |
10 | #define RAW_IRQ_ENABLED 1 | 10 | #define ARCH_IRQ_ENABLED 1 |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | /* read interrupt enabled status */ | 13 | /* read interrupt enabled status */ |
14 | #ifndef __raw_local_save_flags | 14 | #ifndef arch_local_save_flags |
15 | unsigned long __raw_local_save_flags(void); | 15 | unsigned long arch_local_save_flags(void); |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | /* set interrupt enabled status */ | 18 | /* set interrupt enabled status */ |
19 | #ifndef raw_local_irq_restore | 19 | #ifndef arch_local_irq_restore |
20 | void raw_local_irq_restore(unsigned long flags); | 20 | void arch_local_irq_restore(unsigned long flags); |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* get status and disable interrupts */ | 23 | /* get status and disable interrupts */ |
24 | #ifndef __raw_local_irq_save | 24 | #ifndef arch_local_irq_save |
25 | static inline unsigned long __raw_local_irq_save(void) | 25 | static inline unsigned long arch_local_irq_save(void) |
26 | { | 26 | { |
27 | unsigned long flags; | 27 | unsigned long flags; |
28 | flags = __raw_local_save_flags(); | 28 | flags = arch_local_save_flags(); |
29 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 29 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
30 | return flags; | 30 | return flags; |
31 | } | 31 | } |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | /* test flags */ | 34 | /* test flags */ |
35 | #ifndef raw_irqs_disabled_flags | 35 | #ifndef arch_irqs_disabled_flags |
36 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 36 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
37 | { | 37 | { |
38 | return flags == RAW_IRQ_DISABLED; | 38 | return flags == ARCH_IRQ_DISABLED; |
39 | } | 39 | } |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* unconditionally enable interrupts */ | 42 | /* unconditionally enable interrupts */ |
43 | #ifndef raw_local_irq_enable | 43 | #ifndef arch_local_irq_enable |
44 | static inline void raw_local_irq_enable(void) | 44 | static inline void arch_local_irq_enable(void) |
45 | { | 45 | { |
46 | raw_local_irq_restore(RAW_IRQ_ENABLED); | 46 | arch_local_irq_restore(ARCH_IRQ_ENABLED); |
47 | } | 47 | } |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* unconditionally disable interrupts */ | 50 | /* unconditionally disable interrupts */ |
51 | #ifndef raw_local_irq_disable | 51 | #ifndef arch_local_irq_disable |
52 | static inline void raw_local_irq_disable(void) | 52 | static inline void arch_local_irq_disable(void) |
53 | { | 53 | { |
54 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 54 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
55 | } | 55 | } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* test hardware interrupt enable bit */ | 58 | /* test hardware interrupt enable bit */ |
59 | #ifndef raw_irqs_disabled | 59 | #ifndef arch_irqs_disabled |
60 | static inline int raw_irqs_disabled(void) | 60 | static inline int arch_irqs_disabled(void) |
61 | { | 61 | { |
62 | return raw_irqs_disabled_flags(__raw_local_save_flags()); | 62 | return arch_irqs_disabled_flags(arch_local_save_flags()); |
63 | } | 63 | } |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #define raw_local_save_flags(flags) \ | ||
67 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
68 | |||
69 | #define raw_local_irq_save(flags) \ | ||
70 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
71 | |||
72 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ | 66 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e2bd73e8f9c0..f4d4120e5128 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) | 129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | #ifndef flush_tlb_fix_spurious_fault | ||
133 | #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) | ||
134 | #endif | ||
135 | |||
132 | #ifndef pgprot_noncached | 136 | #ifndef pgprot_noncached |
133 | #define pgprot_noncached(prot) (prot) | 137 | #define pgprot_noncached(prot) (prot) |
134 | #endif | 138 | #endif |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a92a170fb7d..f4229fb315e1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -220,6 +220,8 @@ | |||
220 | \ | 220 | \ |
221 | BUG_TABLE \ | 221 | BUG_TABLE \ |
222 | \ | 222 | \ |
223 | JUMP_TABLE \ | ||
224 | \ | ||
223 | /* PCI quirks */ \ | 225 | /* PCI quirks */ \ |
224 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 226 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
225 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 227 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -563,6 +565,14 @@ | |||
563 | #define BUG_TABLE | 565 | #define BUG_TABLE |
564 | #endif | 566 | #endif |
565 | 567 | ||
568 | #define JUMP_TABLE \ | ||
569 | . = ALIGN(8); \ | ||
570 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
571 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
572 | *(__jump_table) \ | ||
573 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
574 | } | ||
575 | |||
566 | #ifdef CONFIG_PM_TRACE | 576 | #ifdef CONFIG_PM_TRACE |
567 | #define TRACEDATA \ | 577 | #define TRACEDATA \ |
568 | . = ALIGN(4); \ | 578 | . = ALIGN(4); \ |
@@ -677,7 +687,9 @@ | |||
677 | - LOAD_OFFSET) { \ | 687 | - LOAD_OFFSET) { \ |
678 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 688 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
679 | *(.data..percpu..first) \ | 689 | *(.data..percpu..first) \ |
690 | . = ALIGN(PAGE_SIZE); \ | ||
680 | *(.data..percpu..page_aligned) \ | 691 | *(.data..percpu..page_aligned) \ |
692 | *(.data..percpu..readmostly) \ | ||
681 | *(.data..percpu) \ | 693 | *(.data..percpu) \ |
682 | *(.data..percpu..shared_aligned) \ | 694 | *(.data..percpu..shared_aligned) \ |
683 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 695 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
@@ -703,7 +715,9 @@ | |||
703 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 715 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
704 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 716 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
705 | *(.data..percpu..first) \ | 717 | *(.data..percpu..first) \ |
718 | . = ALIGN(PAGE_SIZE); \ | ||
706 | *(.data..percpu..page_aligned) \ | 719 | *(.data..percpu..page_aligned) \ |
720 | *(.data..percpu..readmostly) \ | ||
707 | *(.data..percpu) \ | 721 | *(.data..percpu) \ |
708 | *(.data..percpu..shared_aligned) \ | 722 | *(.data..percpu..shared_aligned) \ |
709 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 723 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |