aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/atomic.h9
-rw-r--r--arch/arm/include/asm/atomic.h3
-rw-r--r--arch/arm/kernel/kprobes.c2
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi.h2
-rw-r--r--arch/avr32/include/asm/atomic.h2
-rw-r--r--arch/avr32/kernel/traps.c1
-rw-r--r--arch/blackfin/include/asm/atomic.h4
-rw-r--r--arch/cris/include/asm/atomic.h4
-rw-r--r--arch/h8300/include/asm/atomic.h3
-rw-r--r--arch/ia64/include/asm/atomic.h6
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/m68knommu/include/asm/atomic.h2
-rw-r--r--arch/mips/include/asm/atomic.h5
-rw-r--r--arch/parisc/include/asm/atomic.h11
-rw-r--r--arch/powerpc/include/asm/atomic.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h6
-rw-r--r--arch/powerpc/kernel/kprobes.c7
-rw-r--r--arch/powerpc/mm/hugetlbpage.c7
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/s390/include/asm/atomic.h7
-rw-r--r--arch/s390/kernel/kprobes.c7
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/sh/include/asm/atomic.h7
-rw-r--r--arch/sh/kernel/traps_32.c1
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/atomic_64.h3
-rw-r--r--arch/um/kernel/trap.c24
-rw-r--r--arch/x86/include/asm/atomic_32.h10
-rw-r--r--arch/x86/include/asm/atomic_64.h18
-rw-r--r--arch/x86/include/asm/unwind.h13
-rw-r--r--arch/x86/kernel/kprobes.c7
-rw-r--r--arch/x86/kernel/traps.c2
-rw-r--r--arch/x86/mm/fault.c24
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--arch/x86/mm/init_64.c2
37 files changed, 74 insertions, 150 deletions
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index ca88e54dec93..62b363584b2b 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -1,6 +1,7 @@
1#ifndef _ALPHA_ATOMIC_H 1#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H 2#define _ALPHA_ATOMIC_H
3 3
4#include <linux/types.h>
4#include <asm/barrier.h> 5#include <asm/barrier.h>
5#include <asm/system.h> 6#include <asm/system.h>
6 7
@@ -13,14 +14,6 @@
13 */ 14 */
14 15
15 16
16/*
17 * Counter is volatile to make sure gcc doesn't try to be clever
18 * and move things around on us. We need to use _exactly_ the address
19 * the user gave us, not some alias that contains the same information.
20 */
21typedef struct { volatile int counter; } atomic_t;
22typedef struct { volatile long counter; } atomic64_t;
23
24#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 17#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
25#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) 18#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
26 19
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 325f881ccb50..ee99723b3a6c 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,10 +12,9 @@
12#define __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/types.h>
15#include <asm/system.h> 16#include <asm/system.h>
16 17
17typedef struct { volatile int counter; } atomic_t;
18
19#define ATOMIC_INIT(i) { (i) } 18#define ATOMIC_INIT(i) { (i) }
20 19
21#ifdef __KERNEL__ 20#ifdef __KERNEL__
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 3f9abe0e9aff..f692efddd449 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -92,9 +92,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
92void __kprobes arch_remove_kprobe(struct kprobe *p) 92void __kprobes arch_remove_kprobe(struct kprobe *p)
93{ 93{
94 if (p->ainsn.insn) { 94 if (p->ainsn.insn) {
95 mutex_lock(&kprobe_mutex);
96 free_insn_slot(p->ainsn.insn, 0); 95 free_insn_slot(p->ainsn.insn, 0);
97 mutex_unlock(&kprobe_mutex);
98 p->ainsn.insn = NULL; 96 p->ainsn.insn = NULL;
99 } 97 }
100} 98}
diff --git a/arch/arm/mach-s3c2410/include/mach/spi.h b/arch/arm/mach-s3c2410/include/mach/spi.h
index 774f3adfe8ad..1d300fb112b1 100644
--- a/arch/arm/mach-s3c2410/include/mach/spi.h
+++ b/arch/arm/mach-s3c2410/include/mach/spi.h
@@ -14,7 +14,7 @@
14#define __ASM_ARCH_SPI_H __FILE__ 14#define __ASM_ARCH_SPI_H __FILE__
15 15
16struct s3c2410_spi_info { 16struct s3c2410_spi_info {
17 unsigned long pin_cs; /* simple gpio cs */ 17 int pin_cs; /* simple gpio cs */
18 unsigned int num_cs; /* total chipselects */ 18 unsigned int num_cs; /* total chipselects */
19 int bus_num; /* bus number to use. */ 19 int bus_num; /* bus number to use. */
20 20
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 7ef3862a73d0..318815107748 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -14,9 +14,9 @@
14#ifndef __ASM_AVR32_ATOMIC_H 14#ifndef __ASM_AVR32_ATOMIC_H
15#define __ASM_AVR32_ATOMIC_H 15#define __ASM_AVR32_ATOMIC_H
16 16
17#include <linux/types.h>
17#include <asm/system.h> 18#include <asm/system.h>
18 19
19typedef struct { volatile int counter; } atomic_t;
20#define ATOMIC_INIT(i) { (i) } 20#define ATOMIC_INIT(i) { (i) }
21 21
22#define atomic_read(v) ((v)->counter) 22#define atomic_read(v) ((v)->counter)
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index 0d987373bc01..d547c8df157d 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/hardirq.h>
10#include <linux/init.h> 11#include <linux/init.h>
11#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
12#include <linux/kdebug.h> 13#include <linux/kdebug.h>
diff --git a/arch/blackfin/include/asm/atomic.h b/arch/blackfin/include/asm/atomic.h
index 7cf508718605..25776c19064b 100644
--- a/arch/blackfin/include/asm/atomic.h
+++ b/arch/blackfin/include/asm/atomic.h
@@ -1,6 +1,7 @@
1#ifndef __ARCH_BLACKFIN_ATOMIC__ 1#ifndef __ARCH_BLACKFIN_ATOMIC__
2#define __ARCH_BLACKFIN_ATOMIC__ 2#define __ARCH_BLACKFIN_ATOMIC__
3 3
4#include <linux/types.h>
4#include <asm/system.h> /* local_irq_XXX() */ 5#include <asm/system.h> /* local_irq_XXX() */
5 6
6/* 7/*
@@ -13,9 +14,6 @@
13 * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001 14 * Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
14 */ 15 */
15 16
16typedef struct {
17 int counter;
18} atomic_t;
19#define ATOMIC_INIT(i) { (i) } 17#define ATOMIC_INIT(i) { (i) }
20 18
21#define atomic_read(v) ((v)->counter) 19#define atomic_read(v) ((v)->counter)
diff --git a/arch/cris/include/asm/atomic.h b/arch/cris/include/asm/atomic.h
index f71ea686a2ea..5718dd8902a1 100644
--- a/arch/cris/include/asm/atomic.h
+++ b/arch/cris/include/asm/atomic.h
@@ -4,7 +4,7 @@
4#define __ASM_CRIS_ATOMIC__ 4#define __ASM_CRIS_ATOMIC__
5 5
6#include <linux/compiler.h> 6#include <linux/compiler.h>
7 7#include <linux/types.h>
8#include <asm/system.h> 8#include <asm/system.h>
9#include <arch/atomic.h> 9#include <arch/atomic.h>
10 10
@@ -13,8 +13,6 @@
13 * resource counting etc.. 13 * resource counting etc..
14 */ 14 */
15 15
16typedef struct { volatile int counter; } atomic_t;
17
18#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
19 17
20#define atomic_read(v) ((v)->counter) 18#define atomic_read(v) ((v)->counter)
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index b4cf0ea97ede..833186c8dc3b 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -1,12 +1,13 @@
1#ifndef __ARCH_H8300_ATOMIC__ 1#ifndef __ARCH_H8300_ATOMIC__
2#define __ARCH_H8300_ATOMIC__ 2#define __ARCH_H8300_ATOMIC__
3 3
4#include <linux/types.h>
5
4/* 6/*
5 * Atomic operations that C can't guarantee us. Useful for 7 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc.. 8 * resource counting etc..
7 */ 9 */
8 10
9typedef struct { int counter; } atomic_t;
10#define ATOMIC_INIT(i) { (i) } 11#define ATOMIC_INIT(i) { (i) }
11 12
12#define atomic_read(v) ((v)->counter) 13#define atomic_read(v) ((v)->counter)
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 50c2b83fd5a0..d37292bd9875 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -17,12 +17,6 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20/*
21 * On IA-64, counter must always be volatile to ensure that that the
22 * memory accesses are ordered.
23 */
24typedef struct { volatile __s32 counter; } atomic_t;
25typedef struct { volatile __s64 counter; } atomic64_t;
26 20
27#define ATOMIC_INIT(i) ((atomic_t) { (i) }) 21#define ATOMIC_INIT(i) ((atomic_t) { (i) })
28#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 22#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f07688da947c..097b84d54e73 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -670,9 +670,11 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
670 670
671void __kprobes arch_remove_kprobe(struct kprobe *p) 671void __kprobes arch_remove_kprobe(struct kprobe *p)
672{ 672{
673 mutex_lock(&kprobe_mutex); 673 if (p->ainsn.insn) {
674 free_insn_slot(p->ainsn.insn, p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); 674 free_insn_slot(p->ainsn.insn,
675 mutex_unlock(&kprobe_mutex); 675 p->ainsn.inst_flag & INST_FLAG_BOOSTABLE);
676 p->ainsn.insn = NULL;
677 }
676} 678}
677/* 679/*
678 * We are resuming execution after a single step fault, so the pt_regs 680 * We are resuming execution after a single step fault, so the pt_regs
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 054bcd9439aa..56e12903973c 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -692,7 +692,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
692 pgdat = NODE_DATA(nid); 692 pgdat = NODE_DATA(nid);
693 693
694 zone = pgdat->node_zones + ZONE_NORMAL; 694 zone = pgdat->node_zones + ZONE_NORMAL;
695 ret = __add_pages(zone, start_pfn, nr_pages); 695 ret = __add_pages(nid, zone, start_pfn, nr_pages);
696 696
697 if (ret) 697 if (ret)
698 printk("%s: Problem encountered in __add_pages() as ret=%d\n", 698 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
diff --git a/arch/m68knommu/include/asm/atomic.h b/arch/m68knommu/include/asm/atomic.h
index d5632a305dae..6bb674855a3f 100644
--- a/arch/m68knommu/include/asm/atomic.h
+++ b/arch/m68knommu/include/asm/atomic.h
@@ -1,6 +1,7 @@
1#ifndef __ARCH_M68KNOMMU_ATOMIC__ 1#ifndef __ARCH_M68KNOMMU_ATOMIC__
2#define __ARCH_M68KNOMMU_ATOMIC__ 2#define __ARCH_M68KNOMMU_ATOMIC__
3 3
4#include <linux/types.h>
4#include <asm/system.h> 5#include <asm/system.h>
5 6
6/* 7/*
@@ -12,7 +13,6 @@
12 * We do not have SMP m68k systems, so we don't have to deal with that. 13 * We do not have SMP m68k systems, so we don't have to deal with that.
13 */ 14 */
14 15
15typedef struct { int counter; } atomic_t;
16#define ATOMIC_INIT(i) { (i) } 16#define ATOMIC_INIT(i) { (i) }
17 17
18#define atomic_read(v) ((v)->counter) 18#define atomic_read(v) ((v)->counter)
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 1232be3885b0..c996c3b4d074 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -15,13 +15,12 @@
15#define _ASM_ATOMIC_H 15#define _ASM_ATOMIC_H
16 16
17#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <linux/types.h>
18#include <asm/barrier.h> 19#include <asm/barrier.h>
19#include <asm/cpu-features.h> 20#include <asm/cpu-features.h>
20#include <asm/war.h> 21#include <asm/war.h>
21#include <asm/system.h> 22#include <asm/system.h>
22 23
23typedef struct { volatile int counter; } atomic_t;
24
25#define ATOMIC_INIT(i) { (i) } 24#define ATOMIC_INIT(i) { (i) }
26 25
27/* 26/*
@@ -404,8 +403,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
404 403
405#ifdef CONFIG_64BIT 404#ifdef CONFIG_64BIT
406 405
407typedef struct { volatile long counter; } atomic64_t;
408
409#define ATOMIC64_INIT(i) { (i) } 406#define ATOMIC64_INIT(i) { (i) }
410 407
411/* 408/*
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 57fcc4a5ebb4..edbfe25c5fc1 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -155,14 +155,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
155#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 155#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
156#endif 156#endif
157 157
158/* Note that we need not lock read accesses - aligned word writes/reads 158/*
159 * are atomic, so a reader never sees unconsistent values. 159 * Note that we need not lock read accesses - aligned word writes/reads
160 * 160 * are atomic, so a reader never sees inconsistent values.
161 * Cache-line alignment would conflict with, for example, linux/module.h
162 */ 161 */
163 162
164typedef struct { volatile int counter; } atomic_t;
165
166/* It's possible to reduce all atomic operations to either 163/* It's possible to reduce all atomic operations to either
167 * __atomic_add_return, atomic_set and atomic_read (the latter 164 * __atomic_add_return, atomic_set and atomic_read (the latter
168 * is there only for consistency). 165 * is there only for consistency).
@@ -260,8 +257,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
260 257
261#ifdef CONFIG_64BIT 258#ifdef CONFIG_64BIT
262 259
263typedef struct { volatile s64 counter; } atomic64_t;
264
265#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) 260#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
266 261
267static __inline__ int 262static __inline__ int
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 499be5bdd6fa..b401950f5259 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -5,7 +5,7 @@
5 * PowerPC atomic operations 5 * PowerPC atomic operations
6 */ 6 */
7 7
8typedef struct { int counter; } atomic_t; 8#include <linux/types.h>
9 9
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11#include <linux/compiler.h> 11#include <linux/compiler.h>
@@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
251 251
252#ifdef __powerpc64__ 252#ifdef __powerpc64__
253 253
254typedef struct { long counter; } atomic64_t;
255
256#define ATOMIC64_INIT(i) { (i) } 254#define ATOMIC64_INIT(i) { (i) }
257 255
258static __inline__ long atomic64_read(const atomic64_t *v) 256static __inline__ long atomic64_read(const atomic64_t *v)
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 26f0d0ab27a5..b1dafb6a9743 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -18,6 +18,12 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
18 pte_t *ptep); 18 pte_t *ptep);
19 19
20/* 20/*
21 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
22 * to override the version in mm/hugetlb.c
23 */
24#define vma_mmu_pagesize vma_mmu_pagesize
25
26/*
21 * If the arch doesn't supply something else, assume that hugepage 27 * If the arch doesn't supply something else, assume that hugepage
22 * size aligned regions are ok without further preparation. 28 * size aligned regions are ok without further preparation.
23 */ 29 */
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index de79915452c8..989edcdf0297 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -96,9 +96,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
96 96
97void __kprobes arch_remove_kprobe(struct kprobe *p) 97void __kprobes arch_remove_kprobe(struct kprobe *p)
98{ 98{
99 mutex_lock(&kprobe_mutex); 99 if (p->ainsn.insn) {
100 free_insn_slot(p->ainsn.insn, 0); 100 free_insn_slot(p->ainsn.insn, 0);
101 mutex_unlock(&kprobe_mutex); 101 p->ainsn.insn = NULL;
102 }
102} 103}
103 104
104static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 105static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 201c7a5486cb..9920d6a7cf29 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -512,6 +512,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
512 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); 512 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
513} 513}
514 514
515unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
516{
517 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
518
519 return 1UL << mmu_psize_to_shift(psize);
520}
521
515/* 522/*
516 * Called by asm hashtable.S for doing lazy icache flush 523 * Called by asm hashtable.S for doing lazy icache flush
517 */ 524 */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 53b06ebb3f2f..f00f09a77f12 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -132,7 +132,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
132 /* this should work for most non-highmem platforms */ 132 /* this should work for most non-highmem platforms */
133 zone = pgdata->node_zones; 133 zone = pgdata->node_zones;
134 134
135 return __add_pages(zone, start_pfn, nr_pages); 135 return __add_pages(nid, zone, start_pfn, nr_pages);
136} 136}
137#endif /* CONFIG_MEMORY_HOTPLUG */ 137#endif /* CONFIG_MEMORY_HOTPLUG */
138 138
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2d184655bc5d..de432f2de2d2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -2,6 +2,7 @@
2#define __ARCH_S390_ATOMIC__ 2#define __ARCH_S390_ATOMIC__
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h>
5 6
6/* 7/*
7 * include/asm-s390/atomic.h 8 * include/asm-s390/atomic.h
@@ -23,9 +24,6 @@
23 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment 24 * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
24 */ 25 */
25 26
26typedef struct {
27 int counter;
28} __attribute__ ((aligned (4))) atomic_t;
29#define ATOMIC_INIT(i) { (i) } 27#define ATOMIC_INIT(i) { (i) }
30 28
31#ifdef __KERNEL__ 29#ifdef __KERNEL__
@@ -149,9 +147,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
149#undef __CS_LOOP 147#undef __CS_LOOP
150 148
151#ifdef __s390x__ 149#ifdef __s390x__
152typedef struct {
153 long long counter;
154} __attribute__ ((aligned (8))) atomic64_t;
155#define ATOMIC64_INIT(i) { (i) } 150#define ATOMIC64_INIT(i) { (i) }
156 151
157#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) 152#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 569079ec4ff0..9b92856632cf 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -218,9 +218,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
218 218
219void __kprobes arch_remove_kprobe(struct kprobe *p) 219void __kprobes arch_remove_kprobe(struct kprobe *p)
220{ 220{
221 mutex_lock(&kprobe_mutex); 221 if (p->ainsn.insn) {
222 free_insn_slot(p->ainsn.insn, 0); 222 free_insn_slot(p->ainsn.insn, 0);
223 mutex_unlock(&kprobe_mutex); 223 p->ainsn.insn = NULL;
224 }
224} 225}
225 226
226static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 227static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 158b0d6d7046..f0258ca3b17e 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -183,7 +183,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
183 rc = vmem_add_mapping(start, size); 183 rc = vmem_add_mapping(start, size);
184 if (rc) 184 if (rc)
185 return rc; 185 return rc;
186 rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size)); 186 rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
187 if (rc) 187 if (rc)
188 vmem_remove_mapping(start, size); 188 vmem_remove_mapping(start, size);
189 return rc; 189 return rc;
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index c043ef003028..6327ffbb1992 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -7,16 +7,15 @@
7 * 7 *
8 */ 8 */
9 9
10typedef struct { volatile int counter; } atomic_t; 10#include <linux/compiler.h>
11#include <linux/types.h>
12#include <asm/system.h>
11 13
12#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) 14#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
13 15
14#define atomic_read(v) ((v)->counter) 16#define atomic_read(v) ((v)->counter)
15#define atomic_set(v,i) ((v)->counter = (i)) 17#define atomic_set(v,i) ((v)->counter = (i))
16 18
17#include <linux/compiler.h>
18#include <asm/system.h>
19
20#if defined(CONFIG_GUSA_RB) 19#if defined(CONFIG_GUSA_RB)
21#include <asm/atomic-grb.h> 20#include <asm/atomic-grb.h>
22#elif defined(CONFIG_CPU_SH4A) 21#elif defined(CONFIG_CPU_SH4A)
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 88807a2aacc3..c0aa3d83ec0e 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/ptrace.h> 15#include <linux/ptrace.h>
16#include <linux/hardirq.h>
16#include <linux/init.h> 17#include <linux/init.h>
17#include <linux/spinlock.h> 18#include <linux/spinlock.h>
18#include <linux/module.h> 19#include <linux/module.h>
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 6cbef8caeb56..3edf297c829b 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -311,7 +311,8 @@ int arch_add_memory(int nid, u64 start, u64 size)
311 pgdat = NODE_DATA(nid); 311 pgdat = NODE_DATA(nid);
312 312
313 /* We only have ZONE_NORMAL, so this is easy.. */ 313 /* We only have ZONE_NORMAL, so this is easy.. */
314 ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages); 314 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
315 start_pfn, nr_pages);
315 if (unlikely(ret)) 316 if (unlikely(ret))
316 printk("%s: Failed, __add_pages() == %d\n", __func__, ret); 317 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
317 318
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 5c944b5a8040..ce465975a6a5 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -13,8 +13,6 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15 15
16typedef struct { volatile int counter; } atomic_t;
17
18#ifdef __KERNEL__ 16#ifdef __KERNEL__
19 17
20#define ATOMIC_INIT(i) { (i) } 18#define ATOMIC_INIT(i) { (i) }
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 5982c5ae7f07..a0a706492696 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -10,9 +10,6 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/system.h> 11#include <asm/system.h>
12 12
13typedef struct { volatile int counter; } atomic_t;
14typedef struct { volatile __s64 counter; } atomic64_t;
15
16#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
17#define ATOMIC64_INIT(i) { (i) } 14#define ATOMIC64_INIT(i) { (i) }
18 15
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 44e490419495..7384d8accfe7 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -64,11 +64,10 @@ good_area:
64 64
65 do { 65 do {
66 int fault; 66 int fault;
67survive: 67
68 fault = handle_mm_fault(mm, vma, address, is_write); 68 fault = handle_mm_fault(mm, vma, address, is_write);
69 if (unlikely(fault & VM_FAULT_ERROR)) { 69 if (unlikely(fault & VM_FAULT_ERROR)) {
70 if (fault & VM_FAULT_OOM) { 70 if (fault & VM_FAULT_OOM) {
71 err = -ENOMEM;
72 goto out_of_memory; 71 goto out_of_memory;
73 } else if (fault & VM_FAULT_SIGBUS) { 72 } else if (fault & VM_FAULT_SIGBUS) {
74 err = -EACCES; 73 err = -EACCES;
@@ -104,18 +103,14 @@ out:
104out_nosemaphore: 103out_nosemaphore:
105 return err; 104 return err;
106 105
107/*
108 * We ran out of memory, or some other thing happened to us that made
109 * us unable to handle the page fault gracefully.
110 */
111out_of_memory: 106out_of_memory:
112 if (is_global_init(current)) { 107 /*
113 up_read(&mm->mmap_sem); 108 * We ran out of memory, call the OOM killer, and return the userspace
114 yield(); 109 * (which will retry the fault, or kill us if we got oom-killed).
115 down_read(&mm->mmap_sem); 110 */
116 goto survive; 111 up_read(&mm->mmap_sem);
117 } 112 pagefault_out_of_memory();
118 goto out; 113 return 0;
119} 114}
120 115
121static void bad_segv(struct faultinfo fi, unsigned long ip) 116static void bad_segv(struct faultinfo fi, unsigned long ip)
@@ -214,9 +209,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
214 si.si_addr = (void __user *)address; 209 si.si_addr = (void __user *)address;
215 current->thread.arch.faultinfo = fi; 210 current->thread.arch.faultinfo = fi;
216 force_sig_info(SIGBUS, &si, current); 211 force_sig_info(SIGBUS, &si, current);
217 } else if (err == -ENOMEM) {
218 printk(KERN_INFO "VM: killing process %s\n", current->comm);
219 do_exit(SIGKILL);
220 } else { 212 } else {
221 BUG_ON(err != -EFAULT); 213 BUG_ON(err != -EFAULT);
222 si.si_signo = SIGSEGV; 214 si.si_signo = SIGSEGV;
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
index ad5b9f6ecddf..85b46fba4229 100644
--- a/arch/x86/include/asm/atomic_32.h
+++ b/arch/x86/include/asm/atomic_32.h
@@ -2,6 +2,7 @@
2#define _ASM_X86_ATOMIC_32_H 2#define _ASM_X86_ATOMIC_32_H
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <linux/types.h>
5#include <asm/processor.h> 6#include <asm/processor.h>
6#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
7 8
@@ -10,15 +11,6 @@
10 * resource counting etc.. 11 * resource counting etc..
11 */ 12 */
12 13
13/*
14 * Make sure gcc doesn't try to be clever and move things around
15 * on us. We need to use _exactly_ the address the user gave us,
16 * not some alias that contains the same information.
17 */
18typedef struct {
19 int counter;
20} atomic_t;
21
22#define ATOMIC_INIT(i) { (i) } 14#define ATOMIC_INIT(i) { (i) }
23 15
24/** 16/**
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
index 279d2a731f3f..8c21731984da 100644
--- a/arch/x86/include/asm/atomic_64.h
+++ b/arch/x86/include/asm/atomic_64.h
@@ -1,25 +1,15 @@
1#ifndef _ASM_X86_ATOMIC_64_H 1#ifndef _ASM_X86_ATOMIC_64_H
2#define _ASM_X86_ATOMIC_64_H 2#define _ASM_X86_ATOMIC_64_H
3 3
4#include <linux/types.h>
4#include <asm/alternative.h> 5#include <asm/alternative.h>
5#include <asm/cmpxchg.h> 6#include <asm/cmpxchg.h>
6 7
7/* atomic_t should be 32 bit signed type */
8
9/* 8/*
10 * Atomic operations that C can't guarantee us. Useful for 9 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc.. 10 * resource counting etc..
12 */ 11 */
13 12
14/*
15 * Make sure gcc doesn't try to be clever and move things around
16 * on us. We need to use _exactly_ the address the user gave us,
17 * not some alias that contains the same information.
18 */
19typedef struct {
20 int counter;
21} atomic_t;
22
23#define ATOMIC_INIT(i) { (i) } 13#define ATOMIC_INIT(i) { (i) }
24 14
25/** 15/**
@@ -191,11 +181,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
191#define atomic_inc_return(v) (atomic_add_return(1, v)) 181#define atomic_inc_return(v) (atomic_add_return(1, v))
192#define atomic_dec_return(v) (atomic_sub_return(1, v)) 182#define atomic_dec_return(v) (atomic_sub_return(1, v))
193 183
194/* An 64bit atomic type */ 184/* The 64-bit atomic type */
195
196typedef struct {
197 long counter;
198} atomic64_t;
199 185
200#define ATOMIC64_INIT(i) { (i) } 186#define ATOMIC64_INIT(i) { (i) }
201 187
diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h
deleted file mode 100644
index 8b064bd9c553..000000000000
--- a/arch/x86/include/asm/unwind.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ASM_X86_UNWIND_H
2#define _ASM_X86_UNWIND_H
3
4#define UNW_PC(frame) ((void)(frame), 0UL)
5#define UNW_SP(frame) ((void)(frame), 0UL)
6#define UNW_FP(frame) ((void)(frame), 0UL)
7
8static inline int arch_unw_user_mode(const void *info)
9{
10 return 0;
11}
12
13#endif /* _ASM_X86_UNWIND_H */
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 6c27679ec6aa..eead6f8f9218 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -376,9 +376,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
376 376
377void __kprobes arch_remove_kprobe(struct kprobe *p) 377void __kprobes arch_remove_kprobe(struct kprobe *p)
378{ 378{
379 mutex_lock(&kprobe_mutex); 379 if (p->ainsn.insn) {
380 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 380 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
381 mutex_unlock(&kprobe_mutex); 381 p->ainsn.insn = NULL;
382 }
382} 383}
383 384
384static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 385static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ce6650eb64e9..c9a666cdd3db 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -20,7 +20,6 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/unwind.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/kexec.h> 25#include <linux/kexec.h>
@@ -51,7 +50,6 @@
51#include <asm/debugreg.h> 50#include <asm/debugreg.h>
52#include <asm/atomic.h> 51#include <asm/atomic.h>
53#include <asm/system.h> 52#include <asm/system.h>
54#include <asm/unwind.h>
55#include <asm/traps.h> 53#include <asm/traps.h>
56#include <asm/desc.h> 54#include <asm/desc.h>
57#include <asm/i387.h> 55#include <asm/i387.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 57ec8c86a877..9e268b6b204e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -667,7 +667,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
667 if (unlikely(in_atomic() || !mm)) 667 if (unlikely(in_atomic() || !mm))
668 goto bad_area_nosemaphore; 668 goto bad_area_nosemaphore;
669 669
670again:
671 /* 670 /*
672 * When running in the kernel we expect faults to occur only to 671 * When running in the kernel we expect faults to occur only to
673 * addresses in user space. All other faults represent errors in the 672 * addresses in user space. All other faults represent errors in the
@@ -859,25 +858,14 @@ no_context:
859 oops_end(flags, regs, sig); 858 oops_end(flags, regs, sig);
860#endif 859#endif
861 860
862/*
863 * We ran out of memory, or some other thing happened to us that made
864 * us unable to handle the page fault gracefully.
865 */
866out_of_memory: 861out_of_memory:
862 /*
863 * We ran out of memory, call the OOM killer, and return the userspace
864 * (which will retry the fault, or kill us if we got oom-killed).
865 */
867 up_read(&mm->mmap_sem); 866 up_read(&mm->mmap_sem);
868 if (is_global_init(tsk)) { 867 pagefault_out_of_memory();
869 yield(); 868 return;
870 /*
871 * Re-lookup the vma - in theory the vma tree might
872 * have changed:
873 */
874 goto again;
875 }
876
877 printk("VM: killing process %s\n", tsk->comm);
878 if (error_code & PF_USER)
879 do_group_exit(SIGKILL);
880 goto no_context;
881 869
882do_sigbus: 870do_sigbus:
883 up_read(&mm->mmap_sem); 871 up_read(&mm->mmap_sem);
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f99a6c6c432e..544d724caeee 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -1079,7 +1079,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
1079 unsigned long start_pfn = start >> PAGE_SHIFT; 1079 unsigned long start_pfn = start >> PAGE_SHIFT;
1080 unsigned long nr_pages = size >> PAGE_SHIFT; 1080 unsigned long nr_pages = size >> PAGE_SHIFT;
1081 1081
1082 return __add_pages(zone, start_pfn, nr_pages); 1082 return __add_pages(nid, zone, start_pfn, nr_pages);
1083} 1083}
1084#endif 1084#endif
1085 1085
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 9f7a0d24d42a..54c437e96541 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -857,7 +857,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
857 if (last_mapped_pfn > max_pfn_mapped) 857 if (last_mapped_pfn > max_pfn_mapped)
858 max_pfn_mapped = last_mapped_pfn; 858 max_pfn_mapped = last_mapped_pfn;
859 859
860 ret = __add_pages(zone, start_pfn, nr_pages); 860 ret = __add_pages(nid, zone, start_pfn, nr_pages);
861 WARN_ON_ONCE(ret); 861 WARN_ON_ONCE(ret);
862 862
863 return ret; 863 return ret;