aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/include/asm/tlb.h4
-rw-r--r--arch/arm/include/asm/tlb.h4
-rw-r--r--arch/avr32/include/asm/pgalloc.h2
-rw-r--r--arch/cris/include/asm/pgalloc.h2
-rw-r--r--arch/frv/include/asm/pgalloc.h4
-rw-r--r--arch/frv/include/asm/pgtable.h2
-rw-r--r--arch/ia64/include/asm/pgalloc.h6
-rw-r--r--arch/ia64/include/asm/tlb.h12
-rw-r--r--arch/m32r/include/asm/pgalloc.h4
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h6
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h4
-rw-r--r--arch/microblaze/Makefile35
-rw-r--r--arch/microblaze/include/asm/io.h1
-rw-r--r--arch/microblaze/include/asm/pgalloc.h4
-rw-r--r--arch/microblaze/include/asm/pgtable.h6
-rw-r--r--arch/microblaze/include/asm/prom.h23
-rw-r--r--arch/microblaze/include/asm/tlb.h2
-rw-r--r--arch/microblaze/include/asm/uaccess.h2
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c2
-rw-r--r--arch/microblaze/kernel/head.S17
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S109
-rw-r--r--arch/microblaze/kernel/module.c19
-rw-r--r--arch/microblaze/kernel/setup.c8
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c99
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/microblaze/mm/fault.c15
-rw-r--r--arch/mips/include/asm/pgalloc.h15
-rw-r--r--arch/mn10300/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/tlb.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc.h6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/s390/include/asm/tlb.h9
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S11
-rw-r--r--arch/s390/power/swsusp.c36
-rw-r--r--arch/s390/power/swsusp_asm64.S35
-rw-r--r--arch/sh/include/asm/pgalloc.h10
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h8
-rw-r--r--arch/sparc/include/asm/tlb_64.h6
-rw-r--r--arch/um/include/asm/pgalloc.h4
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/include/asm/pgalloc.h25
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h10
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c6
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c253
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/mfgpt_32.c2
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/kernel/vmlinux.lds.S7
-rw-r--r--arch/x86/mm/pgtable.c6
-rw-r--r--arch/x86/mm/srat_64.c6
-rw-r--r--arch/xtensa/include/asm/tlb.h2
62 files changed, 533 insertions, 392 deletions
diff --git a/arch/alpha/include/asm/tlb.h b/arch/alpha/include/asm/tlb.h
index c13636575fba..42866759f3fa 100644
--- a/arch/alpha/include/asm/tlb.h
+++ b/arch/alpha/include/asm/tlb.h
@@ -9,7 +9,7 @@
9 9
10#include <asm-generic/tlb.h> 10#include <asm-generic/tlb.h>
11 11
12#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 12#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
13#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 13#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
14 14
15#endif 15#endif
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 321c83e43a1e..f41a6f57cd12 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -102,8 +102,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
102} 102}
103 103
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
105#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) 105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
106#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) 106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
107 107
108#define tlb_migrate_finish(mm) do { } while (0) 108#define tlb_migrate_finish(mm) do { } while (0)
109 109
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 640821323943..92ecd8446ef8 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -83,7 +83,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
83 quicklist_free_page(QUICK_PT, NULL, pte); 83 quicklist_free_page(QUICK_PT, NULL, pte);
84} 84}
85 85
86#define __pte_free_tlb(tlb,pte) \ 86#define __pte_free_tlb(tlb,pte,addr) \
87do { \ 87do { \
88 pgtable_page_dtor(pte); \ 88 pgtable_page_dtor(pte); \
89 tlb_remove_page((tlb), pte); \ 89 tlb_remove_page((tlb), pte); \
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h
index a1ba761d0573..6da975db112f 100644
--- a/arch/cris/include/asm/pgalloc.h
+++ b/arch/cris/include/asm/pgalloc.h
@@ -47,7 +47,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
47 __free_page(pte); 47 __free_page(pte);
48} 48}
49 49
50#define __pte_free_tlb(tlb,pte) \ 50#define __pte_free_tlb(tlb,pte,address) \
51do { \ 51do { \
52 pgtable_page_dtor(pte); \ 52 pgtable_page_dtor(pte); \
53 tlb_remove_page((tlb), pte); \ 53 tlb_remove_page((tlb), pte); \
diff --git a/arch/frv/include/asm/pgalloc.h b/arch/frv/include/asm/pgalloc.h
index 971e6addb009..416d19a632f2 100644
--- a/arch/frv/include/asm/pgalloc.h
+++ b/arch/frv/include/asm/pgalloc.h
@@ -49,7 +49,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
49 __free_page(pte); 49 __free_page(pte);
50} 50}
51 51
52#define __pte_free_tlb(tlb,pte) \ 52#define __pte_free_tlb(tlb,pte,address) \
53do { \ 53do { \
54 pgtable_page_dtor(pte); \ 54 pgtable_page_dtor(pte); \
55 tlb_remove_page((tlb),(pte)); \ 55 tlb_remove_page((tlb),(pte)); \
@@ -62,7 +62,7 @@ do { \
62 */ 62 */
63#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); }) 63#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
64#define pmd_free(mm, x) do { } while (0) 64#define pmd_free(mm, x) do { } while (0)
65#define __pmd_free_tlb(tlb,x) do { } while (0) 65#define __pmd_free_tlb(tlb,x,a) do { } while (0)
66 66
67#endif /* CONFIG_MMU */ 67#endif /* CONFIG_MMU */
68 68
diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h
index 33233011b1c1..22c60692b551 100644
--- a/arch/frv/include/asm/pgtable.h
+++ b/arch/frv/include/asm/pgtable.h
@@ -225,7 +225,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
225 */ 225 */
226#define pud_alloc_one(mm, address) NULL 226#define pud_alloc_one(mm, address) NULL
227#define pud_free(mm, x) do { } while (0) 227#define pud_free(mm, x) do { } while (0)
228#define __pud_free_tlb(tlb, x) do { } while (0) 228#define __pud_free_tlb(tlb, x, address) do { } while (0)
229 229
230/* 230/*
231 * The "pud_xxx()" functions here are trivial for a folded two-level 231 * The "pud_xxx()" functions here are trivial for a folded two-level
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index b9ac1a6fc216..96a8d927db28 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
48{ 48{
49 quicklist_free(0, NULL, pud); 49 quicklist_free(0, NULL, pud);
50} 50}
51#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud) 51#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud)
52#endif /* CONFIG_PGTABLE_4 */ 52#endif /* CONFIG_PGTABLE_4 */
53 53
54static inline void 54static inline void
@@ -67,7 +67,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
67 quicklist_free(0, NULL, pmd); 67 quicklist_free(0, NULL, pmd);
68} 68}
69 69
70#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 70#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd)
71 71
72static inline void 72static inline void
73pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte) 73pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
@@ -117,6 +117,6 @@ static inline void check_pgt_cache(void)
117 quicklist_trim(0, NULL, 25, 16); 117 quicklist_trim(0, NULL, 25, 16);
118} 118}
119 119
120#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 120#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
121 121
122#endif /* _ASM_IA64_PGALLOC_H */ 122#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 20d8a39680c2..85d965cb19a0 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -236,22 +236,22 @@ do { \
236 __tlb_remove_tlb_entry(tlb, ptep, addr); \ 236 __tlb_remove_tlb_entry(tlb, ptep, addr); \
237} while (0) 237} while (0)
238 238
239#define pte_free_tlb(tlb, ptep) \ 239#define pte_free_tlb(tlb, ptep, address) \
240do { \ 240do { \
241 tlb->need_flush = 1; \ 241 tlb->need_flush = 1; \
242 __pte_free_tlb(tlb, ptep); \ 242 __pte_free_tlb(tlb, ptep, address); \
243} while (0) 243} while (0)
244 244
245#define pmd_free_tlb(tlb, ptep) \ 245#define pmd_free_tlb(tlb, ptep, address) \
246do { \ 246do { \
247 tlb->need_flush = 1; \ 247 tlb->need_flush = 1; \
248 __pmd_free_tlb(tlb, ptep); \ 248 __pmd_free_tlb(tlb, ptep, address); \
249} while (0) 249} while (0)
250 250
251#define pud_free_tlb(tlb, pudp) \ 251#define pud_free_tlb(tlb, pudp, address) \
252do { \ 252do { \
253 tlb->need_flush = 1; \ 253 tlb->need_flush = 1; \
254 __pud_free_tlb(tlb, pudp); \ 254 __pud_free_tlb(tlb, pudp, address); \
255} while (0) 255} while (0)
256 256
257#endif /* _ASM_IA64_TLB_H */ 257#endif /* _ASM_IA64_TLB_H */
diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h
index f11a2b909cdb..0fc736198979 100644
--- a/arch/m32r/include/asm/pgalloc.h
+++ b/arch/m32r/include/asm/pgalloc.h
@@ -58,7 +58,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
58 __free_page(pte); 58 __free_page(pte);
59} 59}
60 60
61#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 61#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
62 62
63/* 63/*
64 * allocating and freeing a pmd is trivial: the 1-entry pmd is 64 * allocating and freeing a pmd is trivial: the 1-entry pmd is
@@ -68,7 +68,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
68 68
69#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) 69#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
70#define pmd_free(mm, x) do { } while (0) 70#define pmd_free(mm, x) do { } while (0)
71#define __pmd_free_tlb(tlb, x) do { } while (0) 71#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
72#define pgd_populate(mm, pmd, pte) BUG() 72#define pgd_populate(mm, pmd, pte) BUG()
73 73
74#define check_pgt_cache() do { } while (0) 74#define check_pgt_cache() do { } while (0)
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index d08bf6261df8..15ee4c74a9f0 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -54,7 +54,8 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
54 __free_page(page); 54 __free_page(page);
55} 55}
56 56
57static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page) 57static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
58 unsigned long address)
58{ 59{
59 pgtable_page_dtor(page); 60 pgtable_page_dtor(page);
60 cache_page(kmap(page)); 61 cache_page(kmap(page));
@@ -73,7 +74,8 @@ static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
73 return free_pointer_table(pmd); 74 return free_pointer_table(pmd);
74} 75}
75 76
76static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 77static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
78 unsigned long address)
77{ 79{
78 return free_pointer_table(pmd); 80 return free_pointer_table(pmd);
79} 81}
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index d4c83f143816..48d80d5a666f 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -32,7 +32,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t page)
32 __free_page(page); 32 __free_page(page);
33} 33}
34 34
35#define __pte_free_tlb(tlb,pte) \ 35#define __pte_free_tlb(tlb,pte,addr) \
36do { \ 36do { \
37 pgtable_page_dtor(pte); \ 37 pgtable_page_dtor(pte); \
38 tlb_remove_page((tlb), pte); \ 38 tlb_remove_page((tlb), pte); \
@@ -80,7 +80,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
80 * inside the pgd, so has no extra memory associated with it. 80 * inside the pgd, so has no extra memory associated with it.
81 */ 81 */
82#define pmd_free(mm, x) do { } while (0) 82#define pmd_free(mm, x) do { } while (0)
83#define __pmd_free_tlb(tlb, x) do { } while (0) 83#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
84 84
85static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 85static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
86{ 86{
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index d0bcf80a1136..8439598d4655 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -6,14 +6,16 @@ endif
6 6
7# What CPU vesion are we building for, and crack it open 7# What CPU vesion are we building for, and crack it open
8# as major.minor.rev 8# as major.minor.rev
9CPU_VER=$(subst ",,$(CONFIG_XILINX_MICROBLAZE0_HW_VER) ) 9CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
10CPU_MAJOR=$(shell echo $(CPU_VER) | cut -d '.' -f 1) 10CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
11CPU_MINOR=$(shell echo $(CPU_VER) | cut -d '.' -f 2) 11CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2)
12CPU_REV=$(shell echo $(CPU_VER) | cut -d '.' -f 3) 12CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3)
13 13
14export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV 14export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
15 15
16# Use cpu-related CONFIG_ vars to set compile options. 16# Use cpu-related CONFIG_ vars to set compile options.
17# The various CONFIG_XILINX cpu features options are integers 0/1/2...
18# rather than bools y/n
17 19
18# Work out HW multipler support. This is icky. 20# Work out HW multipler support. This is icky.
19# 1. Spartan2 has no HW multiplers. 21# 1. Spartan2 has no HW multiplers.
@@ -34,30 +36,29 @@ CPUFLAGS-$(CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR) += -mxl-pattern-compare
34 36
35CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER)) 37CPUFLAGS-1 += $(call cc-option,-mcpu=v$(CPU_VER))
36 38
37# The various CONFIG_XILINX cpu features options are integers 0/1/2...
38# rather than bools y/n
39
40# r31 holds current when in kernel mode 39# r31 holds current when in kernel mode
41CFLAGS_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2) 40KBUILD_KERNEL += -ffixed-r31 $(CPUFLAGS-1) $(CPUFLAGS-2)
42 41
43LDFLAGS := 42LDFLAGS :=
44LDFLAGS_vmlinux := 43LDFLAGS_vmlinux :=
45LDFLAGS_BLOB := --format binary --oformat elf32-microblaze
46 44
47LIBGCC := $(shell $(CC) $(CFLAGS_KERNEL) -print-libgcc-file-name) 45LIBGCC := $(shell $(CC) $(KBUILD_KERNEL) -print-libgcc-file-name)
48 46
49head-y := arch/microblaze/kernel/head.o 47head-y := arch/microblaze/kernel/head.o
50libs-y += arch/microblaze/lib/ $(LIBGCC) 48libs-y += arch/microblaze/lib/
51core-y += arch/microblaze/kernel/ arch/microblaze/mm/ \ 49libs-y += $(LIBGCC)
52 arch/microblaze/platform/ 50core-y += arch/microblaze/kernel/
51core-y += arch/microblaze/mm/
52core-y += arch/microblaze/platform/
53 53
54boot := arch/$(ARCH)/boot 54boot := arch/microblaze/boot
55 55
56# defines filename extension depending memory management type 56# defines filename extension depending memory management type
57ifeq ($(CONFIG_MMU),) 57ifeq ($(CONFIG_MMU),)
58MMUEXT := -nommu 58MMU := -nommu
59endif 59endif
60export MMUEXT 60
61export MMU
61 62
62all: linux.bin 63all: linux.bin
63 64
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index 5c173424d074..7c3ec13b44d8 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -14,7 +14,6 @@
14#include <asm/byteorder.h> 14#include <asm/byteorder.h>
15#include <asm/page.h> 15#include <asm/page.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <asm/byteorder.h>
18#include <linux/mm.h> /* Get struct page {...} */ 17#include <linux/mm.h> /* Get struct page {...} */
19 18
20 19
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index 59a757e46ba5..b0131da1387b 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -180,7 +180,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
180 __free_page(ptepage); 180 __free_page(ptepage);
181} 181}
182 182
183#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 183#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
184 184
185#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte)) 185#define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = page_address(pte))
186 186
@@ -193,7 +193,7 @@ extern inline void pte_free(struct mm_struct *mm, struct page *ptepage)
193 */ 193 */
194#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); }) 194#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
195/*#define pmd_free(mm, x) do { } while (0)*/ 195/*#define pmd_free(mm, x) do { } while (0)*/
196#define __pmd_free_tlb(tlb, x) do { } while (0) 196#define __pmd_free_tlb(tlb, x, addr) do { } while (0)
197#define pgd_populate(mm, pmd, pte) BUG() 197#define pgd_populate(mm, pmd, pte) BUG()
198 198
199extern int do_check_pgt_cache(int, int); 199extern int do_check_pgt_cache(int, int);
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 4c57a586a989..cc3a4dfc3eaa 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -185,6 +185,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
185 185
186/* Definitions for MicroBlaze. */ 186/* Definitions for MicroBlaze. */
187#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ 187#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
188#define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */
188#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 189#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
189#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 190#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
190#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ 191#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
@@ -320,8 +321,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
320static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 321static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
321static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 322static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
322static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 323static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
323/* FIXME */ 324static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
324static inline int pte_file(pte_t pte) { return 0; }
325 325
326static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 326static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
327static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 327static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -488,7 +488,7 @@ static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
488/* Encode and decode a nonlinear file mapping entry */ 488/* Encode and decode a nonlinear file mapping entry */
489#define PTE_FILE_MAX_BITS 29 489#define PTE_FILE_MAX_BITS 29
490#define pte_to_pgoff(pte) (pte_val(pte) >> 3) 490#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
491#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) }) 491#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
492 492
493extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 493extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
494 494
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 20f7b3a926e8..37e6f305a68e 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -16,6 +16,18 @@
16#define _ASM_MICROBLAZE_PROM_H 16#define _ASM_MICROBLAZE_PROM_H
17#ifdef __KERNEL__ 17#ifdef __KERNEL__
18 18
19/* Definitions used by the flattened device tree */
20#define OF_DT_HEADER 0xd00dfeed /* marker */
21#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
22#define OF_DT_END_NODE 0x2 /* End node */
23#define OF_DT_PROP 0x3 /* Property: name off, size, content */
24#define OF_DT_NOP 0x4 /* nop */
25#define OF_DT_END 0x9
26
27#define OF_DT_VERSION 0x10
28
29#ifndef __ASSEMBLY__
30
19#include <linux/types.h> 31#include <linux/types.h>
20#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
21#include <linux/platform_device.h> 33#include <linux/platform_device.h>
@@ -29,16 +41,6 @@
29#define of_prop_cmp(s1, s2) strcmp((s1), (s2)) 41#define of_prop_cmp(s1, s2) strcmp((s1), (s2))
30#define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) 42#define of_node_cmp(s1, s2) strcasecmp((s1), (s2))
31 43
32/* Definitions used by the flattened device tree */
33#define OF_DT_HEADER 0xd00dfeed /* marker */
34#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
35#define OF_DT_END_NODE 0x2 /* End node */
36#define OF_DT_PROP 0x3 /* Property: name off, size, content */
37#define OF_DT_NOP 0x4 /* nop */
38#define OF_DT_END 0x9
39
40#define OF_DT_VERSION 0x10
41
42/* 44/*
43 * This is what gets passed to the kernel by prom_init or kexec 45 * This is what gets passed to the kernel by prom_init or kexec
44 * 46 *
@@ -309,5 +311,6 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
309 */ 311 */
310#include <linux/of.h> 312#include <linux/of.h>
311 313
314#endif /* __ASSEMBLY__ */
312#endif /* __KERNEL__ */ 315#endif /* __KERNEL__ */
313#endif /* _ASM_MICROBLAZE_PROM_H */ 316#endif /* _ASM_MICROBLAZE_PROM_H */
diff --git a/arch/microblaze/include/asm/tlb.h b/arch/microblaze/include/asm/tlb.h
index c472d2801132..e8abd4a0349c 100644
--- a/arch/microblaze/include/asm/tlb.h
+++ b/arch/microblaze/include/asm/tlb.h
@@ -11,7 +11,7 @@
11#ifndef _ASM_MICROBLAZE_TLB_H 11#ifndef _ASM_MICROBLAZE_TLB_H
12#define _ASM_MICROBLAZE_TLB_H 12#define _ASM_MICROBLAZE_TLB_H
13 13
14#define tlb_flush(tlb) do {} while (0) 14#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
15 15
16#include <asm-generic/tlb.h> 16#include <asm-generic/tlb.h>
17 17
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 65adad61e7e9..5431b4631a7a 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -189,7 +189,7 @@ extern long strnlen_user(const char *src, long count);
189 189
190#define __put_user(x, ptr) \ 190#define __put_user(x, ptr) \
191({ \ 191({ \
192 __typeof__(*(ptr)) __gu_val = x; \ 192 __typeof__(*(ptr)) volatile __gu_val = (x); \
193 long __gu_err = 0; \ 193 long __gu_err = 0; \
194 switch (sizeof(__gu_val)) { \ 194 switch (sizeof(__gu_val)) { \
195 case 1: \ 195 case 1: \
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index f4a5e19a20eb..d487729683de 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -17,4 +17,4 @@ obj-$(CONFIG_HEART_BEAT) += heartbeat.o
17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 17obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
18obj-$(CONFIG_MMU) += misc.o 18obj-$(CONFIG_MMU) += misc.o
19 19
20obj-y += entry$(MMUEXT).o 20obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
index 153f57c57b6d..c259786e7faa 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -22,7 +22,7 @@
22 22
23#define CI(c, p) { ci->c = PVR_##p(pvr); } 23#define CI(c, p) { ci->c = PVR_##p(pvr); }
24#define err_printk(x) \ 24#define err_printk(x) \
25 early_printk("ERROR: Microblaze " x " - different for PVR and DTS\n"); 25 early_printk("ERROR: Microblaze " x "-different for PVR and DTS\n");
26 26
27void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu) 27void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
28{ 28{
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 450ca6bb828d..adb448f93d5f 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
18static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER; 18static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
19 19
20#define err_printk(x) \ 20#define err_printk(x) \
21 early_printk("ERROR: Microblaze " x "- different for kernel and DTS\n"); 21 early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
22 22
23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu) 23void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
24{ 24{
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index a10bea119b94..c411c6757deb 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -26,6 +26,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
26 {"7.10.b", 0x09}, 26 {"7.10.b", 0x09},
27 {"7.10.c", 0x0a}, 27 {"7.10.c", 0x0a},
28 {"7.10.d", 0x0b}, 28 {"7.10.d", 0x0b},
29 {"7.20.a", 0x0c},
30 {"7.20.b", 0x0d},
29 /* FIXME There is no keycode defined in MBV for these versions */ 31 /* FIXME There is no keycode defined in MBV for these versions */
30 {"2.10.a", 0x10}, 32 {"2.10.a", 0x10},
31 {"3.00.a", 0x20}, 33 {"3.00.a", 0x20},
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index e568d6ec621b..e41c6ce2a7be 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -31,6 +31,7 @@
31#include <linux/linkage.h> 31#include <linux/linkage.h>
32#include <asm/thread_info.h> 32#include <asm/thread_info.h>
33#include <asm/page.h> 33#include <asm/page.h>
34#include <asm/prom.h> /* for OF_DT_HEADER */
34 35
35#ifdef CONFIG_MMU 36#ifdef CONFIG_MMU
36#include <asm/setup.h> /* COMMAND_LINE_SIZE */ 37#include <asm/setup.h> /* COMMAND_LINE_SIZE */
@@ -54,11 +55,19 @@ ENTRY(_start)
54 andi r1, r1, ~2 55 andi r1, r1, ~2
55 mts rmsr, r1 56 mts rmsr, r1
56 57
57/* save fdt to kernel location */ 58/* r7 may point to an FDT, or there may be one linked in.
58/* r7 stores pointer to fdt blob */ 59 if it's in r7, we've got to save it away ASAP.
59 beqi r7, no_fdt_arg 60 We ensure r7 points to a valid FDT, just in case the bootloader
61 is broken or non-existent */
62 beqi r7, no_fdt_arg /* NULL pointer? don't copy */
63 lw r11, r0, r7 /* Does r7 point to a */
64 rsubi r11, r11, OF_DT_HEADER /* valid FDT? */
65 beqi r11, _prepare_copy_fdt
66 or r7, r0, r0 /* clear R7 when not valid DTB */
67 bnei r11, no_fdt_arg /* No - get out of here */
68_prepare_copy_fdt:
60 or r11, r0, r0 /* incremment */ 69 or r11, r0, r0 /* incremment */
61 ori r4, r0, TOPHYS(_fdt_start) /* save bram context */ 70 ori r4, r0, TOPHYS(_fdt_start)
62 ori r3, r0, (0x4000 - 4) 71 ori r3, r0, (0x4000 - 4)
63_copy_fdt: 72_copy_fdt:
64 lw r12, r7, r11 /* r12 = r7 + r11 */ 73 lw r12, r7, r11 /* r12 = r7 + r11 */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 9d591cd74fc2..3288c9737671 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -74,6 +74,7 @@
74 74
75#include <asm/mmu.h> 75#include <asm/mmu.h>
76#include <asm/pgtable.h> 76#include <asm/pgtable.h>
77#include <asm/signal.h>
77#include <asm/asm-offsets.h> 78#include <asm/asm-offsets.h>
78 79
79/* Helpful Macros */ 80/* Helpful Macros */
@@ -428,19 +429,9 @@ handle_unaligned_ex:
428 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 429 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
429 nop 430 nop
430_no_delayslot: 431_no_delayslot:
431#endif 432 /* jump to high level unaligned handler */
432 433 RESTORE_STATE;
433#ifdef CONFIG_MMU 434 bri unaligned_data_trap
434 /* Check if unaligned address is last on a 4k page */
435 andi r5, r4, 0xffc
436 xori r5, r5, 0xffc
437 bnei r5, _unaligned_ex2
438 _unaligned_ex1:
439 RESTORE_STATE;
440/* Another page must be accessed or physical address not in page table */
441 bri unaligned_data_trap
442
443 _unaligned_ex2:
444#endif 435#endif
445 andi r6, r3, 0x3E0; /* Mask and extract the register operand */ 436 andi r6, r3, 0x3E0; /* Mask and extract the register operand */
446 srl r6, r6; /* r6 >> 5 */ 437 srl r6, r6; /* r6 >> 5 */
@@ -450,45 +441,6 @@ _no_delayslot:
450 srl r6, r6; 441 srl r6, r6;
451 /* Store the register operand in a temporary location */ 442 /* Store the register operand in a temporary location */
452 sbi r6, r0, TOPHYS(ex_reg_op); 443 sbi r6, r0, TOPHYS(ex_reg_op);
453#ifdef CONFIG_MMU
454 /* Get physical address */
455 /* If we are faulting a kernel address, we have to use the
456 * kernel page tables.
457 */
458 ori r5, r0, CONFIG_KERNEL_START
459 cmpu r5, r4, r5
460 bgti r5, _unaligned_ex3
461 ori r5, r0, swapper_pg_dir
462 bri _unaligned_ex4
463
464 /* Get the PGD for the current thread. */
465_unaligned_ex3: /* user thread */
466 addi r5 ,CURRENT_TASK, TOPHYS(0); /* get current task address */
467 lwi r5, r5, TASK_THREAD + PGDIR
468_unaligned_ex4:
469 tophys(r5,r5)
470 BSRLI(r6,r4,20) /* Create L1 (pgdir/pmd) address */
471 andi r6, r6, 0xffc
472/* Assume pgdir aligned on 4K boundary, no need for "andi r5,r5,0xfffff003" */
473 or r5, r5, r6
474 lwi r6, r5, 0 /* Get L1 entry */
475 andi r5, r6, 0xfffff000 /* Extract L2 (pte) base address. */
476 beqi r5, _unaligned_ex1 /* Bail if no table */
477
478 tophys(r5,r5)
479 BSRLI(r6,r4,10) /* Compute PTE address */
480 andi r6, r6, 0xffc
481 andi r5, r5, 0xfffff003
482 or r5, r5, r6
483 lwi r5, r5, 0 /* Get Linux PTE */
484
485 andi r6, r5, _PAGE_PRESENT
486 beqi r6, _unaligned_ex1 /* Bail if no page */
487
488 andi r5, r5, 0xfffff000 /* Extract RPN */
489 andi r4, r4, 0x00000fff /* Extract offset */
490 or r4, r4, r5 /* Create physical address */
491#endif /* CONFIG_MMU */
492 444
493 andi r6, r3, 0x400; /* Extract ESR[S] */ 445 andi r6, r3, 0x400; /* Extract ESR[S] */
494 bnei r6, ex_sw; 446 bnei r6, ex_sw;
@@ -959,15 +911,15 @@ _unaligned_data_exception:
959 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ 911 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
960ex_lw_vm: 912ex_lw_vm:
961 beqid r6, ex_lhw_vm; 913 beqid r6, ex_lhw_vm;
962 lbui r5, r4, 0; /* Exception address in r4 - delay slot */ 914load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
963/* Load a word, byte-by-byte from destination address and save it in tmp space*/ 915/* Load a word, byte-by-byte from destination address and save it in tmp space*/
964 la r6, r0, ex_tmp_data_loc_0; 916 la r6, r0, ex_tmp_data_loc_0;
965 sbi r5, r6, 0; 917 sbi r5, r6, 0;
966 lbui r5, r4, 1; 918load2: lbui r5, r4, 1;
967 sbi r5, r6, 1; 919 sbi r5, r6, 1;
968 lbui r5, r4, 2; 920load3: lbui r5, r4, 2;
969 sbi r5, r6, 2; 921 sbi r5, r6, 2;
970 lbui r5, r4, 3; 922load4: lbui r5, r4, 3;
971 sbi r5, r6, 3; 923 sbi r5, r6, 3;
972 brid ex_lw_tail_vm; 924 brid ex_lw_tail_vm;
973/* Get the destination register value into r3 - delay slot */ 925/* Get the destination register value into r3 - delay slot */
@@ -977,7 +929,7 @@ ex_lhw_vm:
977 * save it in tmp space */ 929 * save it in tmp space */
978 la r6, r0, ex_tmp_data_loc_0; 930 la r6, r0, ex_tmp_data_loc_0;
979 sbi r5, r6, 0; 931 sbi r5, r6, 0;
980 lbui r5, r4, 1; 932load5: lbui r5, r4, 1;
981 sbi r5, r6, 1; 933 sbi r5, r6, 1;
982 lhui r3, r6, 0; /* Get the destination register value into r3 */ 934 lhui r3, r6, 0; /* Get the destination register value into r3 */
983ex_lw_tail_vm: 935ex_lw_tail_vm:
@@ -996,22 +948,53 @@ ex_sw_tail_vm:
996 swi r3, r5, 0; /* Get the word - delay slot */ 948 swi r3, r5, 0; /* Get the word - delay slot */
997 /* Store the word, byte-by-byte into destination address */ 949 /* Store the word, byte-by-byte into destination address */
998 lbui r3, r5, 0; 950 lbui r3, r5, 0;
999 sbi r3, r4, 0; 951store1: sbi r3, r4, 0;
1000 lbui r3, r5, 1; 952 lbui r3, r5, 1;
1001 sbi r3, r4, 1; 953store2: sbi r3, r4, 1;
1002 lbui r3, r5, 2; 954 lbui r3, r5, 2;
1003 sbi r3, r4, 2; 955store3: sbi r3, r4, 2;
1004 lbui r3, r5, 3; 956 lbui r3, r5, 3;
1005 brid ret_from_exc; 957 brid ret_from_exc;
1006 sbi r3, r4, 3; /* Delay slot */ 958store4: sbi r3, r4, 3; /* Delay slot */
1007ex_shw_vm: 959ex_shw_vm:
1008 /* Store the lower half-word, byte-by-byte into destination address */ 960 /* Store the lower half-word, byte-by-byte into destination address */
1009 lbui r3, r5, 2; 961 lbui r3, r5, 2;
1010 sbi r3, r4, 0; 962store5: sbi r3, r4, 0;
1011 lbui r3, r5, 3; 963 lbui r3, r5, 3;
1012 brid ret_from_exc; 964 brid ret_from_exc;
1013 sbi r3, r4, 1; /* Delay slot */ 965store6: sbi r3, r4, 1; /* Delay slot */
1014ex_sw_end_vm: /* Exception handling of store word, ends. */ 966ex_sw_end_vm: /* Exception handling of store word, ends. */
967
968/* We have to prevent cases that get/put_user macros get unaligned pointer
969 * to bad page area. We have to find out which origin instruction caused it
970 * and called fixup for that origin instruction not instruction in unaligned
971 * handler */
972ex_unaligned_fixup:
973 ori r5, r7, 0 /* setup pointer to pt_regs */
974 lwi r6, r7, PT_PC; /* faulting address is one instruction above */
975 addik r6, r6, -4 /* for finding proper fixup */
976 swi r6, r7, PT_PC; /* a save back it to PT_PC */
977 addik r7, r0, SIGSEGV
978 /* call bad_page_fault for finding aligned fixup, fixup address is saved
979 * in PT_PC which is used as return address from exception */
980 la r15, r0, ret_from_exc-8 /* setup return address */
981 brid bad_page_fault
982 nop
983
984/* We prevent all load/store because it could failed any attempt to access */
985.section __ex_table,"a";
986 .word load1,ex_unaligned_fixup;
987 .word load2,ex_unaligned_fixup;
988 .word load3,ex_unaligned_fixup;
989 .word load4,ex_unaligned_fixup;
990 .word load5,ex_unaligned_fixup;
991 .word store1,ex_unaligned_fixup;
992 .word store2,ex_unaligned_fixup;
993 .word store3,ex_unaligned_fixup;
994 .word store4,ex_unaligned_fixup;
995 .word store5,ex_unaligned_fixup;
996 .word store6,ex_unaligned_fixup;
997.previous;
1015.end _unaligned_data_exception 998.end _unaligned_data_exception
1016#endif /* CONFIG_MMU */ 999#endif /* CONFIG_MMU */
1017 1000
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
index 51414171326f..5a45b1adfef1 100644
--- a/arch/microblaze/kernel/module.c
+++ b/arch/microblaze/kernel/module.c
@@ -57,7 +57,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
57 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; 57 Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
58 Elf32_Sym *sym; 58 Elf32_Sym *sym;
59 unsigned long int *location; 59 unsigned long int *location;
60 unsigned long int locoffs;
61 unsigned long int value; 60 unsigned long int value;
62#if __GNUC__ < 4 61#if __GNUC__ < 4
63 unsigned long int old_value; 62 unsigned long int old_value;
@@ -113,10 +112,12 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
113 break; 112 break;
114 113
115 case R_MICROBLAZE_64_PCREL: 114 case R_MICROBLAZE_64_PCREL:
116 locoffs = (location[0] & 0xFFFF) << 16 | 115#if __GNUC__ < 4
116 old_value = (location[0] & 0xFFFF) << 16 |
117 (location[1] & 0xFFFF); 117 (location[1] & 0xFFFF);
118 value -= (unsigned long int)(location) + 4 + 118 value -= old_value;
119 locoffs; 119#endif
120 value -= (unsigned long int)(location) + 4;
120 location[0] = (location[0] & 0xFFFF0000) | 121 location[0] = (location[0] & 0xFFFF0000) |
121 (value >> 16); 122 (value >> 16);
122 location[1] = (location[1] & 0xFFFF0000) | 123 location[1] = (location[1] & 0xFFFF0000) |
@@ -125,6 +126,14 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
125 value); 126 value);
126 break; 127 break;
127 128
129 case R_MICROBLAZE_32_PCREL_LO:
130 pr_debug("R_MICROBLAZE_32_PCREL_LO\n");
131 break;
132
133 case R_MICROBLAZE_64_NONE:
134 pr_debug("R_MICROBLAZE_NONE\n");
135 break;
136
128 case R_MICROBLAZE_NONE: 137 case R_MICROBLAZE_NONE:
129 pr_debug("R_MICROBLAZE_NONE\n"); 138 pr_debug("R_MICROBLAZE_NONE\n");
130 break; 139 break;
@@ -133,7 +142,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
133 printk(KERN_ERR "module %s: " 142 printk(KERN_ERR "module %s: "
134 "Unknown relocation: %u\n", 143 "Unknown relocation: %u\n",
135 module->name, 144 module->name,
136 ELF32_R_TYPE(rela->r_info)); 145 ELF32_R_TYPE(rela[i].r_info));
137 return -ENOEXEC; 146 return -ENOEXEC;
138 } 147 }
139 } 148 }
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 8709bea09604..2a97bf513b64 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -138,8 +138,12 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
138 setup_early_printk(NULL); 138 setup_early_printk(NULL);
139#endif 139#endif
140 140
141 early_printk("Ramdisk addr 0x%08x, FDT 0x%08x\n", ram, fdt); 141 early_printk("Ramdisk addr 0x%08x, ", ram);
142 printk(KERN_NOTICE "Found FDT at 0x%08x\n", fdt); 142 if (fdt)
143 early_printk("FDT at 0x%08x\n", fdt);
144 else
145 early_printk("Compiled-in FDT at 0x%08x\n",
146 (unsigned int)_fdt_start);
143 147
144#ifdef CONFIG_MTD_UCLINUX 148#ifdef CONFIG_MTD_UCLINUX
145 early_printk("Found romfs @ 0x%08x (0x%08x)\n", 149 early_printk("Found romfs @ 0x%08x (0x%08x)\n",
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index e000bce09b2b..b96f1682bb24 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -33,105 +33,6 @@
33#include <linux/unistd.h> 33#include <linux/unistd.h>
34 34
35#include <asm/syscalls.h> 35#include <asm/syscalls.h>
36/*
37 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
38 *
39 * This is really horribly ugly. This will be remove with new toolchain.
40 */
41asmlinkage long
42sys_ipc(uint call, int first, int second, int third, void *ptr, long fifth)
43{
44 int version, ret;
45
46 version = call >> 16; /* hack for backward compatibility */
47 call &= 0xffff;
48
49 ret = -EINVAL;
50 switch (call) {
51 case SEMOP:
52 ret = sys_semop(first, (struct sembuf *)ptr, second);
53 break;
54 case SEMGET:
55 ret = sys_semget(first, second, third);
56 break;
57 case SEMCTL:
58 {
59 union semun fourth;
60
61 if (!ptr)
62 break;
63 ret = (access_ok(VERIFY_READ, ptr, sizeof(long)) ? 0 : -EFAULT)
64 || (get_user(fourth.__pad, (void **)ptr)) ;
65 if (ret)
66 break;
67 ret = sys_semctl(first, second, third, fourth);
68 break;
69 }
70 case MSGSND:
71 ret = sys_msgsnd(first, (struct msgbuf *) ptr, second, third);
72 break;
73 case MSGRCV:
74 switch (version) {
75 case 0: {
76 struct ipc_kludge tmp;
77
78 if (!ptr)
79 break;
80 ret = (access_ok(VERIFY_READ, ptr, sizeof(tmp))
81 ? 0 : -EFAULT) || copy_from_user(&tmp,
82 (struct ipc_kludge *) ptr, sizeof(tmp));
83 if (ret)
84 break;
85 ret = sys_msgrcv(first, tmp.msgp, second, tmp.msgtyp,
86 third);
87 break;
88 }
89 default:
90 ret = sys_msgrcv(first, (struct msgbuf *) ptr,
91 second, fifth, third);
92 break;
93 }
94 break;
95 case MSGGET:
96 ret = sys_msgget((key_t) first, second);
97 break;
98 case MSGCTL:
99 ret = sys_msgctl(first, second, (struct msqid_ds *) ptr);
100 break;
101 case SHMAT:
102 switch (version) {
103 default: {
104 ulong raddr;
105 ret = access_ok(VERIFY_WRITE, (ulong *) third,
106 sizeof(ulong)) ? 0 : -EFAULT;
107 if (ret)
108 break;
109 ret = do_shmat(first, (char *) ptr, second, &raddr);
110 if (ret)
111 break;
112 ret = put_user(raddr, (ulong *) third);
113 break;
114 }
115 case 1: /* iBCS2 emulator entry point */
116 if (!segment_eq(get_fs(), get_ds()))
117 break;
118 ret = do_shmat(first, (char *) ptr, second,
119 (ulong *) third);
120 break;
121 }
122 break;
123 case SHMDT:
124 ret = sys_shmdt((char *)ptr);
125 break;
126 case SHMGET:
127 ret = sys_shmget(first, second, third);
128 break;
129 case SHMCTL:
130 ret = sys_shmctl(first, second, (struct shmid_ds *) ptr);
131 break;
132 }
133 return ret;
134}
135 36
136asmlinkage long microblaze_vfork(struct pt_regs *regs) 37asmlinkage long microblaze_vfork(struct pt_regs *regs)
137{ 38{
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 31b32a6c5f4e..216db817beb6 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -121,7 +121,7 @@ ENTRY(sys_call_table)
121 .long sys_wait4 121 .long sys_wait4
122 .long sys_swapoff /* 115 */ 122 .long sys_swapoff /* 115 */
123 .long sys_sysinfo 123 .long sys_sysinfo
124 .long sys_ipc 124 .long sys_ni_syscall /* old sys_ipc */
125 .long sys_fsync 125 .long sys_fsync
126 .long sys_ni_syscall /* sys_sigreturn_wrapper */ 126 .long sys_ni_syscall /* sys_sigreturn_wrapper */
127 .long sys_clone /* 120 */ 127 .long sys_clone /* 120 */
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index 956607a63f4c..d9d249a66ff2 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -69,7 +69,7 @@ static int store_updates_sp(struct pt_regs *regs)
69 * It is called from do_page_fault above and from some of the procedures 69 * It is called from do_page_fault above and from some of the procedures
70 * in traps.c. 70 * in traps.c.
71 */ 71 */
72static void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 72void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
73{ 73{
74 const struct exception_table_entry *fixup; 74 const struct exception_table_entry *fixup;
75/* MS: no context */ 75/* MS: no context */
@@ -122,15 +122,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
122 } 122 }
123#endif /* CONFIG_KGDB */ 123#endif /* CONFIG_KGDB */
124 124
125 if (in_atomic() || mm == NULL) { 125 if (in_atomic() || !mm) {
126 /* FIXME */ 126 if (kernel_mode(regs))
127 if (kernel_mode(regs)) { 127 goto bad_area_nosemaphore;
128 printk(KERN_EMERG 128
129 "Page fault in kernel mode - Oooou!!! pid %d\n",
130 current->pid);
131 _exception(SIGSEGV, regs, code, address);
132 return;
133 }
134 /* in_atomic() in user mode is really bad, 129 /* in_atomic() in user mode is really bad,
135 as is current->mm == NULL. */ 130 as is current->mm == NULL. */
136 printk(KERN_EMERG "Page fault in user mode with " 131 printk(KERN_EMERG "Page fault in user mode with "
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 1275831dda29..3738f4b48cbd 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -98,23 +98,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
98 __free_pages(pte, PTE_ORDER); 98 __free_pages(pte, PTE_ORDER);
99} 99}
100 100
101#define __pte_free_tlb(tlb,pte) \ 101#define __pte_free_tlb(tlb,pte,address) \
102do { \ 102do { \
103 pgtable_page_dtor(pte); \ 103 pgtable_page_dtor(pte); \
104 tlb_remove_page((tlb), pte); \ 104 tlb_remove_page((tlb), pte); \
105} while (0) 105} while (0)
106 106
107#ifdef CONFIG_32BIT
108
109/*
110 * allocating and freeing a pmd is trivial: the 1-entry pmd is
111 * inside the pgd, so has no extra memory associated with it.
112 */
113#define pmd_free(mm, x) do { } while (0)
114#define __pmd_free_tlb(tlb, x) do { } while (0)
115
116#endif
117
118#ifdef CONFIG_64BIT 107#ifdef CONFIG_64BIT
119 108
120static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 109static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -132,7 +121,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
132 free_pages((unsigned long)pmd, PMD_ORDER); 121 free_pages((unsigned long)pmd, PMD_ORDER);
133} 122}
134 123
135#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x) 124#define __pmd_free_tlb(tlb, x, addr) pmd_free((tlb)->mm, x)
136 125
137#endif 126#endif
138 127
diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h
index ec057e1bd4cf..a19f11327cd8 100644
--- a/arch/mn10300/include/asm/pgalloc.h
+++ b/arch/mn10300/include/asm/pgalloc.h
@@ -51,6 +51,6 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
51} 51}
52 52
53 53
54#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) 54#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
55 55
56#endif /* _ASM_PGALLOC_H */ 56#endif /* _ASM_PGALLOC_H */
diff --git a/arch/parisc/include/asm/tlb.h b/arch/parisc/include/asm/tlb.h
index 383b1db310ee..07924903989e 100644
--- a/arch/parisc/include/asm/tlb.h
+++ b/arch/parisc/include/asm/tlb.h
@@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \
21 21
22#include <asm-generic/tlb.h> 22#include <asm-generic/tlb.h>
23 23
24#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 24#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
25#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 25#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
26 26
27#endif 27#endif
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 0815eb40acae..c9500d666a1d 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -16,7 +16,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
16 */ 16 */
17/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ 17/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
18#define pmd_free(mm, x) do { } while (0) 18#define pmd_free(mm, x) do { } while (0)
19#define __pmd_free_tlb(tlb,x) do { } while (0) 19#define __pmd_free_tlb(tlb,x,a) do { } while (0)
20/* #define pgd_populate(mm, pmd, pte) BUG() */ 20/* #define pgd_populate(mm, pmd, pte) BUG() */
21 21
22#ifndef CONFIG_BOOKE 22#ifndef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index afda2bdd860f..e6f069c4f713 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -118,11 +118,11 @@ static inline void pgtable_free(pgtable_free_t pgf)
118 kmem_cache_free(pgtable_cache[cachenum], p); 118 kmem_cache_free(pgtable_cache[cachenum], p);
119} 119}
120 120
121#define __pmd_free_tlb(tlb, pmd) \ 121#define __pmd_free_tlb(tlb, pmd,addr) \
122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 122 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 123 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
124#ifndef CONFIG_PPC_64K_PAGES 124#ifndef CONFIG_PPC_64K_PAGES
125#define __pud_free_tlb(tlb, pud) \ 125#define __pud_free_tlb(tlb, pud, addr) \
126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 126 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 127 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
128#endif /* CONFIG_PPC_64K_PAGES */ 128#endif /* CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 5d8480265a77..1730e5e298d6 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -38,14 +38,14 @@ static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 38extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
39 39
40#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
41#define __pte_free_tlb(tlb,ptepage) \ 41#define __pte_free_tlb(tlb,ptepage,address) \
42do { \ 42do { \
43 pgtable_page_dtor(ptepage); \ 43 pgtable_page_dtor(ptepage); \
44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 44 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \ 45 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
46} while (0) 46} while (0)
47#else 47#else
48#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte)) 48#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, (pte))
49#endif 49#endif
50 50
51 51
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9920d6a7cf29..c46ef2ffa3d9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -305,7 +305,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
305 305
306 pmd = pmd_offset(pud, start); 306 pmd = pmd_offset(pud, start);
307 pud_clear(pud); 307 pud_clear(pud);
308 pmd_free_tlb(tlb, pmd); 308 pmd_free_tlb(tlb, pmd, start);
309} 309}
310 310
311static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 311static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -348,7 +348,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
348 348
349 pud = pud_offset(pgd, start); 349 pud = pud_offset(pgd, start);
350 pgd_clear(pgd); 350 pgd_clear(pgd);
351 pud_free_tlb(tlb, pud); 351 pud_free_tlb(tlb, pud, start);
352} 352}
353 353
354/* 354/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 3d8a96d39d9d..81150b053689 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -96,7 +96,8 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
96 * pte_free_tlb frees a pte table and clears the CRSTE for the 96 * pte_free_tlb frees a pte table and clears the CRSTE for the
97 * page table from the tlb. 97 * page table from the tlb.
98 */ 98 */
99static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte) 99static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
100 unsigned long address)
100{ 101{
101 if (!tlb->fullmm) { 102 if (!tlb->fullmm) {
102 tlb->array[tlb->nr_ptes++] = pte; 103 tlb->array[tlb->nr_ptes++] = pte;
@@ -113,7 +114,8 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte)
113 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB 114 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
114 * to avoid the double free of the pmd in this case. 115 * to avoid the double free of the pmd in this case.
115 */ 116 */
116static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 117static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
118 unsigned long address)
117{ 119{
118#ifdef __s390x__ 120#ifdef __s390x__
119 if (tlb->mm->context.asce_limit <= (1UL << 31)) 121 if (tlb->mm->context.asce_limit <= (1UL << 31))
@@ -134,7 +136,8 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
134 * as the pgd. pud_free_tlb checks the asce_limit against 4TB 136 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
135 * to avoid the double free of the pud in this case. 137 * to avoid the double free of the pud in this case.
136 */ 138 */
137static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 139static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
140 unsigned long address)
138{ 141{
139#ifdef __s390x__ 142#ifdef __s390x__
140 if (tlb->mm->context.asce_limit <= (1UL << 42)) 143 if (tlb->mm->context.asce_limit <= (1UL << 42))
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index f9b144049dc9..8d15314381e0 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void)
210 machine_flags |= MACHINE_FLAG_VM; 210 machine_flags |= MACHINE_FLAG_VM;
211} 211}
212 212
213static void early_pgm_check_handler(void) 213static __init void early_pgm_check_handler(void)
214{ 214{
215 unsigned long addr; 215 unsigned long addr;
216 const struct exception_table_entry *fixup; 216 const struct exception_table_entry *fixup;
@@ -222,7 +222,7 @@ static void early_pgm_check_handler(void)
222 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; 222 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
223} 223}
224 224
225void setup_lowcore_early(void) 225static noinline __init void setup_lowcore_early(void)
226{ 226{
227 psw_t psw; 227 psw_t psw;
228 228
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 2270730f5354..be2cae083406 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -687,13 +687,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
687#ifndef CONFIG_64BIT 687#ifndef CONFIG_64BIT
688 if (MACHINE_HAS_IEEE) 688 if (MACHINE_HAS_IEEE)
689 lowcore->extended_save_area_addr = (u32) save_area; 689 lowcore->extended_save_area_addr = (u32) save_area;
690#else
691 if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
692 BUG();
693#endif 690#endif
694 set_prefix((u32)(unsigned long) lowcore); 691 set_prefix((u32)(unsigned long) lowcore);
695 local_mcck_enable(); 692 local_mcck_enable();
696 local_irq_enable(); 693 local_irq_enable();
694#ifdef CONFIG_64BIT
695 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
696 BUG();
697#endif
697 for_each_possible_cpu(cpu) 698 for_each_possible_cpu(cpu)
698 if (cpu != smp_processor_id()) 699 if (cpu != smp_processor_id())
699 smp_create_idle(cpu); 700 smp_create_idle(cpu);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 79dbfee831ec..49106c6e6f88 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -88,10 +88,17 @@ __kernel_clock_gettime:
88 llilh %r4,0x0100 88 llilh %r4,0x0100
89 sar %a4,%r4 89 sar %a4,%r4
90 lghi %r4,0 90 lghi %r4,0
91 epsw %r5,0
91 sacf 512 /* Magic ectg instruction */ 92 sacf 512 /* Magic ectg instruction */
92 .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4 93 .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
93 sacf 0 94 tml %r5,0x4000
94 sar %a4,%r2 95 jo 11f
96 tml %r5,0x8000
97 jno 10f
98 sacf 256
99 j 11f
10010: sacf 0
10111: sar %a4,%r2
95 algr %r1,%r0 /* r1 = cputime as TOD value */ 102 algr %r1,%r0 /* r1 = cputime as TOD value */
96 mghi %r1,1000 /* convert to nanoseconds */ 103 mghi %r1,1000 /* convert to nanoseconds */
97 srlg %r1,%r1,12 /* r1 = cputime in nanosec */ 104 srlg %r1,%r1,12 /* r1 = cputime in nanosec */
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c
index e6a4fe9f5f24..bd1f5c6b0b8c 100644
--- a/arch/s390/power/swsusp.c
+++ b/arch/s390/power/swsusp.c
@@ -7,24 +7,36 @@
7 * 7 *
8 */ 8 */
9 9
10#include <asm/system.h>
10 11
11/*
12 * save CPU registers before creating a hibernation image and before
13 * restoring the memory state from it
14 */
15void save_processor_state(void) 12void save_processor_state(void)
16{ 13{
17 /* implentation contained in the 14 /* swsusp_arch_suspend() actually saves all cpu register contents.
18 * swsusp_arch_suspend function 15 * Machine checks must be disabled since swsusp_arch_suspend() stores
16 * register contents to their lowcore save areas. That's the same
17 * place where register contents on machine checks would be saved.
18 * To avoid register corruption disable machine checks.
19 * We must also disable machine checks in the new psw mask for
20 * program checks, since swsusp_arch_suspend() may generate program
21 * checks. Disabling machine checks for all other new psw masks is
22 * just paranoia.
19 */ 23 */
24 local_mcck_disable();
25 /* Disable lowcore protection */
26 __ctl_clear_bit(0,28);
27 S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK;
28 S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK;
29 S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK;
30 S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK;
20} 31}
21 32
22/*
23 * restore the contents of CPU registers
24 */
25void restore_processor_state(void) 33void restore_processor_state(void)
26{ 34{
27 /* implentation contained in the 35 S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK;
28 * swsusp_arch_resume function 36 S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK;
29 */ 37 S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK;
38 S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK;
39 /* Enable lowcore protection */
40 __ctl_set_bit(0,28);
41 local_mcck_enable();
30} 42}
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 76d688da32fa..b26df5c5933e 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -32,19 +32,14 @@ swsusp_arch_suspend:
32 /* Deactivate DAT */ 32 /* Deactivate DAT */
33 stnsm __SF_EMPTY(%r15),0xfb 33 stnsm __SF_EMPTY(%r15),0xfb
34 34
35 /* Switch off lowcore protection */
36 stctg %c0,%c0,__SF_EMPTY(%r15)
37 ni __SF_EMPTY+4(%r15),0xef
38 lctlg %c0,%c0,__SF_EMPTY(%r15)
39
40 /* Store prefix register on stack */ 35 /* Store prefix register on stack */
41 stpx __SF_EMPTY(%r15) 36 stpx __SF_EMPTY(%r15)
42 37
43 /* Setup base register for lowcore (absolute 0) */ 38 /* Save prefix register contents for lowcore */
44 llgf %r1,__SF_EMPTY(%r15) 39 llgf %r4,__SF_EMPTY(%r15)
45 40
46 /* Get pointer to save area */ 41 /* Get pointer to save area */
47 aghi %r1,0x1000 42 lghi %r1,0x1000
48 43
49 /* Store registers */ 44 /* Store registers */
50 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ 45 mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */
@@ -79,17 +74,15 @@ swsusp_arch_suspend:
79 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 74 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
80 spx __SF_EMPTY(%r15) 75 spx __SF_EMPTY(%r15)
81 76
82 /* Setup lowcore */ 77 lghi %r2,0
83 brasl %r14,setup_lowcore_early 78 lghi %r3,2*PAGE_SIZE
79 lghi %r5,2*PAGE_SIZE
801: mvcle %r2,%r4,0
81 jo 1b
84 82
85 /* Save image */ 83 /* Save image */
86 brasl %r14,swsusp_save 84 brasl %r14,swsusp_save
87 85
88 /* Switch on lowcore protection */
89 stctg %c0,%c0,__SF_EMPTY(%r15)
90 oi __SF_EMPTY+4(%r15),0x10
91 lctlg %c0,%c0,__SF_EMPTY(%r15)
92
93 /* Restore prefix register and return */ 86 /* Restore prefix register and return */
94 lghi %r1,0x1000 87 lghi %r1,0x1000
95 spx 0x318(%r1) 88 spx 0x318(%r1)
@@ -117,11 +110,6 @@ swsusp_arch_resume:
117 /* Deactivate DAT */ 110 /* Deactivate DAT */
118 stnsm __SF_EMPTY(%r15),0xfb 111 stnsm __SF_EMPTY(%r15),0xfb
119 112
120 /* Switch off lowcore protection */
121 stctg %c0,%c0,__SF_EMPTY(%r15)
122 ni __SF_EMPTY+4(%r15),0xef
123 lctlg %c0,%c0,__SF_EMPTY(%r15)
124
125 /* Set prefix page to zero */ 113 /* Set prefix page to zero */
126 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) 114 xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15)
127 spx __SF_EMPTY(%r15) 115 spx __SF_EMPTY(%r15)
@@ -175,7 +163,7 @@ swsusp_arch_resume:
175 /* Load old stack */ 163 /* Load old stack */
176 lg %r15,0x2f8(%r13) 164 lg %r15,0x2f8(%r13)
177 165
178 /* Pointer to save arae */ 166 /* Pointer to save area */
179 lghi %r13,0x1000 167 lghi %r13,0x1000
180 168
181#ifdef CONFIG_SMP 169#ifdef CONFIG_SMP
@@ -187,11 +175,6 @@ swsusp_arch_resume:
187 /* Restore prefix register */ 175 /* Restore prefix register */
188 spx 0x318(%r13) 176 spx 0x318(%r13)
189 177
190 /* Switch on lowcore protection */
191 stctg %c0,%c0,__SF_EMPTY(%r15)
192 oi __SF_EMPTY+4(%r15),0x10
193 lctlg %c0,%c0,__SF_EMPTY(%r15)
194
195 /* Activate DAT */ 178 /* Activate DAT */
196 stosm __SF_EMPTY(%r15),0x04 179 stosm __SF_EMPTY(%r15),0x04
197 180
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 84dd2db7104c..63ca37bd9a95 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -73,20 +73,12 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
73 quicklist_free_page(QUICK_PT, NULL, pte); 73 quicklist_free_page(QUICK_PT, NULL, pte);
74} 74}
75 75
76#define __pte_free_tlb(tlb,pte) \ 76#define __pte_free_tlb(tlb,pte,addr) \
77do { \ 77do { \
78 pgtable_page_dtor(pte); \ 78 pgtable_page_dtor(pte); \
79 tlb_remove_page((tlb), (pte)); \ 79 tlb_remove_page((tlb), (pte)); \
80} while (0) 80} while (0)
81 81
82/*
83 * allocating and freeing a pmd is trivial: the 1-entry pmd is
84 * inside the pgd, so has no extra memory associated with it.
85 */
86
87#define pmd_free(mm, x) do { } while (0)
88#define __pmd_free_tlb(tlb,x) do { } while (0)
89
90static inline void check_pgt_cache(void) 82static inline void check_pgt_cache(void)
91{ 83{
92 quicklist_trim(QUICK_PGD, NULL, 25, 16); 84 quicklist_trim(QUICK_PGD, NULL, 25, 16);
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 9c16f737074a..da8fe7ab8728 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -91,9 +91,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
91} 91}
92 92
93#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 93#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
94#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep) 94#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
95#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp) 95#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
96#define pud_free_tlb(tlb, pudp) pud_free((tlb)->mm, pudp) 96#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
97 97
98#define tlb_migrate_finish(mm) do { } while (0) 98#define tlb_migrate_finish(mm) do { } while (0)
99 99
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 681582d26969..ca2b34456c4b 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -44,8 +44,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *) 44BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd) 45#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
46 46
47#define pmd_free(mm, pmd) free_pmd_fast(pmd) 47#define pmd_free(mm, pmd) free_pmd_fast(pmd)
48#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd) 48#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
49 49
50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *) 50BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE) 51#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@@ -62,7 +62,7 @@ BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte) 62#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
63 63
64BTFIXUPDEF_CALL(void, pte_free, pgtable_t ) 64BTFIXUPDEF_CALL(void, pte_free, pgtable_t )
65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte) 65#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
66#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 66#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
67 67
68#endif /* _SPARC_PGALLOC_H */ 68#endif /* _SPARC_PGALLOC_H */
diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
index ee38e731bfa6..dca406b9b6fc 100644
--- a/arch/sparc/include/asm/tlb_64.h
+++ b/arch/sparc/include/asm/tlb_64.h
@@ -100,9 +100,9 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
100} 100}
101 101
102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) 102#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
103#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage) 103#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
104#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp) 104#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
105#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp) 105#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
106 106
107#define tlb_migrate_finish(mm) do { } while (0) 107#define tlb_migrate_finish(mm) do { } while (0)
108#define tlb_start_vma(tlb, vma) do { } while (0) 108#define tlb_start_vma(tlb, vma) do { } while (0)
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 718984359f8c..32c8ce4e1515 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -40,7 +40,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
40 __free_page(pte); 40 __free_page(pte);
41} 41}
42 42
43#define __pte_free_tlb(tlb,pte) \ 43#define __pte_free_tlb(tlb,pte, address) \
44do { \ 44do { \
45 pgtable_page_dtor(pte); \ 45 pgtable_page_dtor(pte); \
46 tlb_remove_page((tlb),(pte)); \ 46 tlb_remove_page((tlb),(pte)); \
@@ -53,7 +53,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
53 free_page((unsigned long)pmd); 53 free_page((unsigned long)pmd);
54} 54}
55 55
56#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x)) 56#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
57#endif 57#endif
58 58
59#define check_pgt_cache() do { } while (0) 59#define check_pgt_cache() do { } while (0)
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 5240fa1c5e08..660caedac9eb 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -116,11 +116,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
116 __tlb_remove_tlb_entry(tlb, ptep, address); \ 116 __tlb_remove_tlb_entry(tlb, ptep, address); \
117 } while (0) 117 } while (0)
118 118
119#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep) 119#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
120 120
121#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp) 121#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
122 122
123#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp) 123#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
124 124
125#define tlb_migrate_finish(mm) do {} while (0) 125#define tlb_migrate_finish(mm) do {} while (0)
126 126
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index dd14c54ac718..0e8c2a0fd922 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
46 __free_page(pte); 46 __free_page(pte);
47} 47}
48 48
49extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); 49extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
50
51static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
52 unsigned long address)
53{
54 ___pte_free_tlb(tlb, pte);
55}
50 56
51static inline void pmd_populate_kernel(struct mm_struct *mm, 57static inline void pmd_populate_kernel(struct mm_struct *mm,
52 pmd_t *pmd, pte_t *pte) 58 pmd_t *pmd, pte_t *pte)
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
78 free_page((unsigned long)pmd); 84 free_page((unsigned long)pmd);
79} 85}
80 86
81extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); 87extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
88
89static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
90 unsigned long adddress)
91{
92 ___pmd_free_tlb(tlb, pmd);
93}
82 94
83#ifdef CONFIG_X86_PAE 95#ifdef CONFIG_X86_PAE
84extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); 96extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
108 free_page((unsigned long)pud); 120 free_page((unsigned long)pud);
109} 121}
110 122
111extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); 123extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
124
125static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
126 unsigned long address)
127{
128 ___pud_free_tlb(tlb, pud);
129}
130
112#endif /* PAGETABLE_LEVELS > 3 */ 131#endif /* PAGETABLE_LEVELS > 3 */
113#endif /* PAGETABLE_LEVELS > 2 */ 132#endif /* PAGETABLE_LEVELS > 2 */
114 133
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 20e6a795e160..d2c6c930b491 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -212,9 +212,9 @@ extern int __get_user_bad(void);
212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") 212 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
213#else 213#else
214#define __put_user_asm_u64(x, ptr, retval, errret) \ 214#define __put_user_asm_u64(x, ptr, retval, errret) \
215 __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) 215 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
216#define __put_user_asm_ex_u64(x, addr) \ 216#define __put_user_asm_ex_u64(x, addr) \
217 __put_user_asm_ex(x, addr, "q", "", "Zr") 217 __put_user_asm_ex(x, addr, "q", "", "er")
218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) 218#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
219#endif 219#endif
220 220
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 8cc687326eb8..db24b215fc50 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
88 ret, "l", "k", "ir", 4); 88 ret, "l", "k", "ir", 4);
89 return ret; 89 return ret;
90 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, 90 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
91 ret, "q", "", "ir", 8); 91 ret, "q", "", "er", 8);
92 return ret; 92 return ret;
93 case 10: 93 case 10:
94 __put_user_asm(*(u64 *)src, (u64 __user *)dst, 94 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
95 ret, "q", "", "ir", 10); 95 ret, "q", "", "er", 10);
96 if (unlikely(ret)) 96 if (unlikely(ret))
97 return ret; 97 return ret;
98 asm("":::"memory"); 98 asm("":::"memory");
@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
101 return ret; 101 return ret;
102 case 16: 102 case 16:
103 __put_user_asm(*(u64 *)src, (u64 __user *)dst, 103 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
104 ret, "q", "", "ir", 16); 104 ret, "q", "", "er", 16);
105 if (unlikely(ret)) 105 if (unlikely(ret))
106 return ret; 106 return ret;
107 asm("":::"memory"); 107 asm("":::"memory");
108 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, 108 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
109 ret, "q", "", "ir", 8); 109 ret, "q", "", "er", 8);
110 return ret; 110 return ret;
111 default: 111 default:
112 return copy_user_generic((__force void *)dst, src, size); 112 return copy_user_generic((__force void *)dst, src, size);
@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
157 ret, "q", "", "=r", 8); 157 ret, "q", "", "=r", 8);
158 if (likely(!ret)) 158 if (likely(!ret))
159 __put_user_asm(tmp, (u64 __user *)dst, 159 __put_user_asm(tmp, (u64 __user *)dst,
160 ret, "q", "", "ir", 8); 160 ret, "q", "", "er", 8);
161 return ret; 161 return ret;
162 } 162 }
163 default: 163 default:
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 28e5f5956042..e2485b03f1cf 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
356#endif 356#endif
357#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 357#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
358 /* check CPU config space for extended APIC ID */ 358 /* check CPU config space for extended APIC ID */
359 if (c->x86 >= 0xf) { 359 if (cpu_has_apic && c->x86 >= 0xf) {
360 unsigned int val; 360 unsigned int val;
361 val = read_pci_config(0, 24, 0, 0x68); 361 val = read_pci_config(0, 24, 0, 0x68);
362 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) 362 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 484c1e5f658e..1cfb623ce11c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
1692 const char *buf, size_t siz) 1692 const char *buf, size_t siz)
1693{ 1693{
1694 char *p; 1694 char *p;
1695 int len;
1696 1695
1697 strncpy(mce_helper, buf, sizeof(mce_helper)); 1696 strncpy(mce_helper, buf, sizeof(mce_helper));
1698 mce_helper[sizeof(mce_helper)-1] = 0; 1697 mce_helper[sizeof(mce_helper)-1] = 0;
1699 len = strlen(mce_helper);
1700 p = strchr(mce_helper, '\n'); 1698 p = strchr(mce_helper, '\n');
1701 1699
1702 if (*p) 1700 if (p)
1703 *p = 0; 1701 *p = 0;
1704 1702
1705 return len; 1703 return strlen(mce_helper) + !!p;
1706} 1704}
1707 1705
1708static ssize_t set_ignore_ce(struct sys_device *s, 1706static ssize_t set_ignore_ce(struct sys_device *s,
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 36c3dc7b8991..a7aa8f900954 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -66,6 +66,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
66}; 66};
67 67
68/* 68/*
69 * Not sure about some of these
70 */
71static const u64 p6_perfmon_event_map[] =
72{
73 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
74 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
75 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
76 [PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
77 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
78 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
79 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
80};
81
82static u64 p6_pmu_event_map(int event)
83{
84 return p6_perfmon_event_map[event];
85}
86
87/*
88 * Counter setting that is specified not to count anything.
89 * We use this to effectively disable a counter.
90 *
91 * L2_RQSTS with 0 MESI unit mask.
92 */
93#define P6_NOP_COUNTER 0x0000002EULL
94
95static u64 p6_pmu_raw_event(u64 event)
96{
97#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
98#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
99#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
100#define P6_EVNTSEL_INV_MASK 0x00800000ULL
101#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
102
103#define P6_EVNTSEL_MASK \
104 (P6_EVNTSEL_EVENT_MASK | \
105 P6_EVNTSEL_UNIT_MASK | \
106 P6_EVNTSEL_EDGE_MASK | \
107 P6_EVNTSEL_INV_MASK | \
108 P6_EVNTSEL_COUNTER_MASK)
109
110 return event & P6_EVNTSEL_MASK;
111}
112
113
114/*
69 * Intel PerfMon v3. Used on Core2 and later. 115 * Intel PerfMon v3. Used on Core2 and later.
70 */ 116 */
71static const u64 intel_perfmon_event_map[] = 117static const u64 intel_perfmon_event_map[] =
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
666{ 712{
667 struct perf_counter_attr *attr = &counter->attr; 713 struct perf_counter_attr *attr = &counter->attr;
668 struct hw_perf_counter *hwc = &counter->hw; 714 struct hw_perf_counter *hwc = &counter->hw;
715 u64 config;
669 int err; 716 int err;
670 717
671 if (!x86_pmu_initialized()) 718 if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
718 765
719 if (attr->config >= x86_pmu.max_events) 766 if (attr->config >= x86_pmu.max_events)
720 return -EINVAL; 767 return -EINVAL;
768
721 /* 769 /*
722 * The generic map: 770 * The generic map:
723 */ 771 */
724 hwc->config |= x86_pmu.event_map(attr->config); 772 config = x86_pmu.event_map(attr->config);
773
774 if (config == 0)
775 return -ENOENT;
776
777 if (config == -1LL)
778 return -EINVAL;
779
780 hwc->config |= config;
725 781
726 return 0; 782 return 0;
727} 783}
728 784
785static void p6_pmu_disable_all(void)
786{
787 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
788 u64 val;
789
790 if (!cpuc->enabled)
791 return;
792
793 cpuc->enabled = 0;
794 barrier();
795
796 /* p6 only has one enable register */
797 rdmsrl(MSR_P6_EVNTSEL0, val);
798 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
799 wrmsrl(MSR_P6_EVNTSEL0, val);
800}
801
729static void intel_pmu_disable_all(void) 802static void intel_pmu_disable_all(void)
730{ 803{
731 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 804 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
767 return x86_pmu.disable_all(); 840 return x86_pmu.disable_all();
768} 841}
769 842
843static void p6_pmu_enable_all(void)
844{
845 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
846 unsigned long val;
847
848 if (cpuc->enabled)
849 return;
850
851 cpuc->enabled = 1;
852 barrier();
853
854 /* p6 only has one enable register */
855 rdmsrl(MSR_P6_EVNTSEL0, val);
856 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
857 wrmsrl(MSR_P6_EVNTSEL0, val);
858}
859
770static void intel_pmu_enable_all(void) 860static void intel_pmu_enable_all(void)
771{ 861{
772 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 862 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
784 barrier(); 874 barrier();
785 875
786 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 876 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
877 struct perf_counter *counter = cpuc->counters[idx];
787 u64 val; 878 u64 val;
788 879
789 if (!test_bit(idx, cpuc->active_mask)) 880 if (!test_bit(idx, cpuc->active_mask))
790 continue; 881 continue;
791 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 882
792 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 883 val = counter->hw.config;
793 continue;
794 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 884 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
795 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 885 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
796 } 886 }
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
819 909
820static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 910static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
821{ 911{
822 int err; 912 (void)checking_wrmsrl(hwc->config_base + idx,
823 err = checking_wrmsrl(hwc->config_base + idx,
824 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 913 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
825} 914}
826 915
827static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 916static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
828{ 917{
829 int err; 918 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
830 err = checking_wrmsrl(hwc->config_base + idx,
831 hwc->config);
832} 919}
833 920
834static inline void 921static inline void
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
836{ 923{
837 int idx = __idx - X86_PMC_IDX_FIXED; 924 int idx = __idx - X86_PMC_IDX_FIXED;
838 u64 ctrl_val, mask; 925 u64 ctrl_val, mask;
839 int err;
840 926
841 mask = 0xfULL << (idx * 4); 927 mask = 0xfULL << (idx * 4);
842 928
843 rdmsrl(hwc->config_base, ctrl_val); 929 rdmsrl(hwc->config_base, ctrl_val);
844 ctrl_val &= ~mask; 930 ctrl_val &= ~mask;
845 err = checking_wrmsrl(hwc->config_base, ctrl_val); 931 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
932}
933
934static inline void
935p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
936{
937 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
938 u64 val = P6_NOP_COUNTER;
939
940 if (cpuc->enabled)
941 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
942
943 (void)checking_wrmsrl(hwc->config_base + idx, val);
846} 944}
847 945
848static inline void 946static inline void
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
943 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1041 err = checking_wrmsrl(hwc->config_base, ctrl_val);
944} 1042}
945 1043
1044static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1045{
1046 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1047 u64 val;
1048
1049 val = hwc->config;
1050 if (cpuc->enabled)
1051 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1052
1053 (void)checking_wrmsrl(hwc->config_base + idx, val);
1054}
1055
1056
946static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1057static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
947{ 1058{
948 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1059 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
959 1070
960 if (cpuc->enabled) 1071 if (cpuc->enabled)
961 x86_pmu_enable_counter(hwc, idx); 1072 x86_pmu_enable_counter(hwc, idx);
962 else
963 x86_pmu_disable_counter(hwc, idx);
964} 1073}
965 1074
966static int 1075static int
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
1176 local_irq_restore(flags); 1285 local_irq_restore(flags);
1177} 1286}
1178 1287
1288static int p6_pmu_handle_irq(struct pt_regs *regs)
1289{
1290 struct perf_sample_data data;
1291 struct cpu_hw_counters *cpuc;
1292 struct perf_counter *counter;
1293 struct hw_perf_counter *hwc;
1294 int idx, handled = 0;
1295 u64 val;
1296
1297 data.regs = regs;
1298 data.addr = 0;
1299
1300 cpuc = &__get_cpu_var(cpu_hw_counters);
1301
1302 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1303 if (!test_bit(idx, cpuc->active_mask))
1304 continue;
1305
1306 counter = cpuc->counters[idx];
1307 hwc = &counter->hw;
1308
1309 val = x86_perf_counter_update(counter, hwc, idx);
1310 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1311 continue;
1312
1313 /*
1314 * counter overflow
1315 */
1316 handled = 1;
1317 data.period = counter->hw.last_period;
1318
1319 if (!x86_perf_counter_set_period(counter, hwc, idx))
1320 continue;
1321
1322 if (perf_counter_overflow(counter, 1, &data))
1323 p6_pmu_disable_counter(hwc, idx);
1324 }
1325
1326 if (handled)
1327 inc_irq_stat(apic_perf_irqs);
1328
1329 return handled;
1330}
1179 1331
1180/* 1332/*
1181 * This handler is triggered by the local APIC, so the APIC IRQ handling 1333 * This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1185{ 1337{
1186 struct perf_sample_data data; 1338 struct perf_sample_data data;
1187 struct cpu_hw_counters *cpuc; 1339 struct cpu_hw_counters *cpuc;
1188 int bit, cpu, loops; 1340 int bit, loops;
1189 u64 ack, status; 1341 u64 ack, status;
1190 1342
1191 data.regs = regs; 1343 data.regs = regs;
1192 data.addr = 0; 1344 data.addr = 0;
1193 1345
1194 cpu = smp_processor_id(); 1346 cpuc = &__get_cpu_var(cpu_hw_counters);
1195 cpuc = &per_cpu(cpu_hw_counters, cpu);
1196 1347
1197 perf_disable(); 1348 perf_disable();
1198 status = intel_pmu_get_status(); 1349 status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1249 struct cpu_hw_counters *cpuc; 1400 struct cpu_hw_counters *cpuc;
1250 struct perf_counter *counter; 1401 struct perf_counter *counter;
1251 struct hw_perf_counter *hwc; 1402 struct hw_perf_counter *hwc;
1252 int cpu, idx, handled = 0; 1403 int idx, handled = 0;
1253 u64 val; 1404 u64 val;
1254 1405
1255 data.regs = regs; 1406 data.regs = regs;
1256 data.addr = 0; 1407 data.addr = 0;
1257 1408
1258 cpu = smp_processor_id(); 1409 cpuc = &__get_cpu_var(cpu_hw_counters);
1259 cpuc = &per_cpu(cpu_hw_counters, cpu);
1260 1410
1261 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1411 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1262 if (!test_bit(idx, cpuc->active_mask)) 1412 if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
1353 .priority = 1 1503 .priority = 1
1354}; 1504};
1355 1505
1506static struct x86_pmu p6_pmu = {
1507 .name = "p6",
1508 .handle_irq = p6_pmu_handle_irq,
1509 .disable_all = p6_pmu_disable_all,
1510 .enable_all = p6_pmu_enable_all,
1511 .enable = p6_pmu_enable_counter,
1512 .disable = p6_pmu_disable_counter,
1513 .eventsel = MSR_P6_EVNTSEL0,
1514 .perfctr = MSR_P6_PERFCTR0,
1515 .event_map = p6_pmu_event_map,
1516 .raw_event = p6_pmu_raw_event,
1517 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
1518 .max_period = (1ULL << 31) - 1,
1519 .version = 0,
1520 .num_counters = 2,
1521 /*
1522 * Counters have 40 bits implemented. However they are designed such
1523 * that bits [32-39] are sign extensions of bit 31. As such the
1524 * effective width of a counter for P6-like PMU is 32 bits only.
1525 *
1526 * See IA-32 Intel Architecture Software developer manual Vol 3B
1527 */
1528 .counter_bits = 32,
1529 .counter_mask = (1ULL << 32) - 1,
1530};
1531
1356static struct x86_pmu intel_pmu = { 1532static struct x86_pmu intel_pmu = {
1357 .name = "Intel", 1533 .name = "Intel",
1358 .handle_irq = intel_pmu_handle_irq, 1534 .handle_irq = intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
1392 .max_period = (1ULL << 47) - 1, 1568 .max_period = (1ULL << 47) - 1,
1393}; 1569};
1394 1570
1571static int p6_pmu_init(void)
1572{
1573 switch (boot_cpu_data.x86_model) {
1574 case 1:
1575 case 3: /* Pentium Pro */
1576 case 5:
1577 case 6: /* Pentium II */
1578 case 7:
1579 case 8:
1580 case 11: /* Pentium III */
1581 break;
1582 case 9:
1583 case 13:
1584 /* Pentium M */
1585 break;
1586 default:
1587 pr_cont("unsupported p6 CPU model %d ",
1588 boot_cpu_data.x86_model);
1589 return -ENODEV;
1590 }
1591
1592 if (!cpu_has_apic) {
1593 pr_info("no Local APIC, try rebooting with lapic");
1594 return -ENODEV;
1595 }
1596
1597 x86_pmu = p6_pmu;
1598
1599 return 0;
1600}
1601
1395static int intel_pmu_init(void) 1602static int intel_pmu_init(void)
1396{ 1603{
1397 union cpuid10_edx edx; 1604 union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
1400 unsigned int ebx; 1607 unsigned int ebx;
1401 int version; 1608 int version;
1402 1609
1403 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 1610 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1611 /* check for P6 processor family */
1612 if (boot_cpu_data.x86 == 6) {
1613 return p6_pmu_init();
1614 } else {
1404 return -ENODEV; 1615 return -ENODEV;
1616 }
1617 }
1405 1618
1406 /* 1619 /*
1407 * Check whether the Architectural PerfMon supports 1620 * Check whether the Architectural PerfMon supports
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 696f0e475c2d..92b7703d3d58 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -187,7 +187,7 @@ static void __init apic_intr_init(void)
187#ifdef CONFIG_X86_THERMAL_VECTOR 187#ifdef CONFIG_X86_THERMAL_VECTOR
188 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); 188 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
189#endif 189#endif
190#ifdef CONFIG_X86_THRESHOLD 190#ifdef CONFIG_X86_MCE_THRESHOLD
191 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 191 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
192#endif 192#endif
193#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) 193#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
index 846510b78a09..2a62d843f015 100644
--- a/arch/x86/kernel/mfgpt_32.c
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
347 347
348static struct irqaction mfgptirq = { 348static struct irqaction mfgptirq = {
349 .handler = mfgpt_tick, 349 .handler = mfgpt_tick,
350 .flags = IRQF_DISABLED | IRQF_NOBALANCING, 350 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
351 .name = "mfgpt-timer" 351 .name = "mfgpt-timer"
352}; 352};
353 353
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index d2d1ce8170f0..508e982dd072 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
249 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"), 249 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
250 }, 250 },
251 }, 251 },
252 { /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
253 .callback = set_bios_reboot,
254 .ident = "CompuLab SBC-FITPC2",
255 .matches = {
256 DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
257 DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
258 },
259 },
252 { } 260 { }
253}; 261};
254 262
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index de2cab132844..63f32d220ef2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
672 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"), 672 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
673 }, 673 },
674 }, 674 },
675 {
676 /*
677 * AMI BIOS with low memory corruption was found on Intel DG45ID board.
678 * It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
679 * match only DMI_BOARD_NAME and see if there is more bad products
680 * with this vendor.
681 */
682 .callback = dmi_low_memory_corruption,
683 .ident = "AMI BIOS",
684 .matches = {
685 DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
686 },
687 },
675#endif 688#endif
676 {} 689 {}
677}; 690};
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 367e87882041..59f31d2dd435 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -112,11 +112,6 @@ SECTIONS
112 _sdata = .; 112 _sdata = .;
113 DATA_DATA 113 DATA_DATA
114 CONSTRUCTORS 114 CONSTRUCTORS
115
116#ifdef CONFIG_X86_64
117 /* End of data section */
118 _edata = .;
119#endif
120 } :data 115 } :data
121 116
122#ifdef CONFIG_X86_32 117#ifdef CONFIG_X86_32
@@ -156,10 +151,8 @@ SECTIONS
156 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { 151 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
157 *(.data.read_mostly) 152 *(.data.read_mostly)
158 153
159#ifdef CONFIG_X86_32
160 /* End of data section */ 154 /* End of data section */
161 _edata = .; 155 _edata = .;
162#endif
163 } 156 }
164 157
165#ifdef CONFIG_X86_64 158#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 8e43bdd45456..af8f9650058c 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -25,7 +25,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
25 return pte; 25 return pte;
26} 26}
27 27
28void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) 28void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
29{ 29{
30 pgtable_page_dtor(pte); 30 pgtable_page_dtor(pte);
31 paravirt_release_pte(page_to_pfn(pte)); 31 paravirt_release_pte(page_to_pfn(pte));
@@ -33,14 +33,14 @@ void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
33} 33}
34 34
35#if PAGETABLE_LEVELS > 2 35#if PAGETABLE_LEVELS > 2
36void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) 36void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
37{ 37{
38 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); 38 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
39 tlb_remove_page(tlb, virt_to_page(pmd)); 39 tlb_remove_page(tlb, virt_to_page(pmd));
40} 40}
41 41
42#if PAGETABLE_LEVELS > 3 42#if PAGETABLE_LEVELS > 3
43void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) 43void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
44{ 44{
45 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); 45 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
46 tlb_remove_page(tlb, virt_to_page(pud)); 46 tlb_remove_page(tlb, virt_to_page(pud));
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 2dfcbf9df2ae..dbb5381f7b3b 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -79,8 +79,10 @@ static __init void bad_srat(void)
79 acpi_numa = -1; 79 acpi_numa = -1;
80 for (i = 0; i < MAX_LOCAL_APIC; i++) 80 for (i = 0; i < MAX_LOCAL_APIC; i++)
81 apicid_to_node[i] = NUMA_NO_NODE; 81 apicid_to_node[i] = NUMA_NO_NODE;
82 for (i = 0; i < MAX_NUMNODES; i++) 82 for (i = 0; i < MAX_NUMNODES; i++) {
83 nodes_add[i].start = nodes[i].end = 0; 83 nodes[i].start = nodes[i].end = 0;
84 nodes_add[i].start = nodes_add[i].end = 0;
85 }
84 remove_all_active_ranges(); 86 remove_all_active_ranges();
85} 87}
86 88
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 31c220faca02..0d766f9c1083 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -42,6 +42,6 @@
42 42
43#include <asm-generic/tlb.h> 43#include <asm-generic/tlb.h>
44 44
45#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte) 45#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
46 46
47#endif /* _XTENSA_TLB_H */ 47#endif /* _XTENSA_TLB_H */