aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-11-07 04:29:58 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-07 04:29:58 -0500
commit258594a138f4ca9adf214f5272592d7f21def610 (patch)
treed97ee71c997b0412f79b9ec4150cb52ce838fe13 /arch
parenta87d091434ed2a34d647979ab12084139ee1fe41 (diff)
parentca3273f9646694e0419cfb9d6c12deb1c9aff27c (diff)
Merge branch 'sched/urgent' into sched/core
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/system.h4
-rw-r--r--arch/arm/kernel/elf.c6
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/mach-omap2/gpmc.c6
-rw-r--r--arch/arm/mm/cache-xsc3l2.c4
-rw-r--r--arch/arm/mm/mmu.c111
-rw-r--r--arch/arm/mm/proc-v7.S12
-rw-r--r--arch/arm/plat-omap/clock.c20
-rw-r--r--arch/arm/plat-omap/include/mach/entry-macro.S4
-rw-r--r--arch/arm/plat-omap/include/mach/irqs.h2
-rw-r--r--arch/powerpc/platforms/cell/ras.c1
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c1
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/fixmap_32.h4
-rw-r--r--arch/x86/include/asm/highmem.h5
-rw-r--r--arch/x86/include/asm/irq_vectors.h20
-rw-r--r--arch/x86/include/asm/topology.h7
-rw-r--r--arch/x86/include/asm/voyager.h1
-rw-r--r--arch/x86/kernel/amd_iommu.c9
-rw-r--r--arch/x86/kernel/io_apic.c2
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/tlb_32.c6
-rw-r--r--arch/x86/kernel/tlb_64.c5
-rw-r--r--arch/x86/kernel/tsc.c8
-rw-r--r--arch/x86/mach-voyager/setup.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/iomap_32.c59
-rw-r--r--arch/x86/mm/pageattr.c8
32 files changed, 236 insertions, 114 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 809ff9ab853a..77764301844b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,10 +44,10 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#define MODULE_END (PAGE_OFFSET) 47#define MODULES_END (PAGE_OFFSET)
48#define MODULE_START (MODULE_END - 16*1048576) 48#define MODULES_VADDR (MODULES_END - 16*1048576)
49 49
50#if TASK_SIZE > MODULE_START 50#if TASK_SIZE > MODULES_VADDR
51#error Top of user space clashes with start of module space 51#error Top of user space clashes with start of module space
52#endif 52#endif
53 53
@@ -56,7 +56,7 @@
56 * Since we use sections to map it, this macro replaces the physical address 56 * Since we use sections to map it, this macro replaces the physical address
57 * with its virtual address while keeping offset from the base section. 57 * with its virtual address while keeping offset from the base section.
58 */ 58 */
59#define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) 59#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
60 60
61/* 61/*
62 * Allow 16MB-aligned ioremap pages 62 * Allow 16MB-aligned ioremap pages
@@ -94,8 +94,8 @@
94/* 94/*
95 * The module can be at any place in ram in nommu mode. 95 * The module can be at any place in ram in nommu mode.
96 */ 96 */
97#define MODULE_END (END_MEM) 97#define MODULES_END (END_MEM)
98#define MODULE_START (PHYS_OFFSET) 98#define MODULES_VADDR (PHYS_OFFSET)
99 99
100#endif /* !CONFIG_MMU */ 100#endif /* !CONFIG_MMU */
101 101
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 7aad78420f18..568020b34e3e 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -42,6 +42,10 @@
42#define CR_U (1 << 22) /* Unaligned access operation */ 42#define CR_U (1 << 22) /* Unaligned access operation */
43#define CR_XP (1 << 23) /* Extended page tables */ 43#define CR_XP (1 << 23) /* Extended page tables */
44#define CR_VE (1 << 24) /* Vectored interrupts */ 44#define CR_VE (1 << 24) /* Vectored interrupts */
45#define CR_EE (1 << 25) /* Exception (Big) Endian */
46#define CR_TRE (1 << 28) /* TEX remap enable */
47#define CR_AFE (1 << 29) /* Access flag enable */
48#define CR_TE (1 << 30) /* Thumb exception enable */
45 49
46/* 50/*
47 * This is used to ensure the compiler did actually allocate the register we 51 * This is used to ensure the compiler did actually allocate the register we
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 513f332f040d..84849098c8e8 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -21,12 +21,16 @@ int elf_check_arch(const struct elf32_hdr *x)
21 21
22 eflags = x->e_flags; 22 eflags = x->e_flags;
23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { 23 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
24 unsigned int flt_fmt;
25
24 /* APCS26 is only allowed if the CPU supports it */ 26 /* APCS26 is only allowed if the CPU supports it */
25 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) 27 if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
26 return 0; 28 return 0;
27 29
30 flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
31
28 /* VFP requires the supporting code */ 32 /* VFP requires the supporting code */
29 if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP)) 33 if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
30 return 0; 34 return 0;
31 } 35 }
32 return 1; 36 return 1;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 9203ba7d58ee..b8d965dcd6fd 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -26,12 +26,12 @@
26/* 26/*
27 * The XIP kernel text is mapped in the module area for modules and 27 * The XIP kernel text is mapped in the module area for modules and
28 * some other stuff to work without any indirect relocations. 28 * some other stuff to work without any indirect relocations.
29 * MODULE_START is redefined here and not in asm/memory.h to avoid 29 * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off. 30 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
31 */ 31 */
32extern void _etext; 32extern void _etext;
33#undef MODULE_START 33#undef MODULES_VADDR
34#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK) 34#define MODULES_VADDR (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
35#endif 35#endif
36 36
37#ifdef CONFIG_MMU 37#ifdef CONFIG_MMU
@@ -43,7 +43,7 @@ void *module_alloc(unsigned long size)
43 if (!size) 43 if (!size)
44 return NULL; 44 return NULL;
45 45
46 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END); 46 area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
47 if (!area) 47 if (!area)
48 return NULL; 48 return NULL;
49 49
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 763bdbeaf681..2249049c1d5a 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -429,18 +429,16 @@ void __init gpmc_init(void)
429 gpmc_l3_clk = clk_get(NULL, ck); 429 gpmc_l3_clk = clk_get(NULL, ck);
430 if (IS_ERR(gpmc_l3_clk)) { 430 if (IS_ERR(gpmc_l3_clk)) {
431 printk(KERN_ERR "Could not get GPMC clock %s\n", ck); 431 printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
432 return -ENODEV; 432 BUG();
433 } 433 }
434 434
435 gpmc_base = ioremap(l, SZ_4K); 435 gpmc_base = ioremap(l, SZ_4K);
436 if (!gpmc_base) { 436 if (!gpmc_base) {
437 clk_put(gpmc_l3_clk); 437 clk_put(gpmc_l3_clk);
438 printk(KERN_ERR "Could not get GPMC register memory\n"); 438 printk(KERN_ERR "Could not get GPMC register memory\n");
439 return -ENOMEM; 439 BUG();
440 } 440 }
441 441
442 BUG_ON(IS_ERR(gpmc_l3_clk));
443
444 l = gpmc_read_reg(GPMC_REVISION); 442 l = gpmc_read_reg(GPMC_REVISION);
445 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); 443 printk(KERN_INFO "GPMC revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
446 /* Set smart idle mode and automatic L3 clock gating */ 444 /* Set smart idle mode and automatic L3 clock gating */
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 10b1bae1a258..464de893a988 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
98 /* 98 /*
99 * Clean and invalidate partial last cache line. 99 * Clean and invalidate partial last cache line.
100 */ 100 */
101 if (end & (CACHE_LINE_SIZE - 1)) { 101 if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1)); 102 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); 103 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
104 end &= ~(CACHE_LINE_SIZE - 1); 104 end &= ~(CACHE_LINE_SIZE - 1);
@@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
107 /* 107 /*
108 * Invalidate all full cache lines between 'start' and 'end'. 108 * Invalidate all full cache lines between 'start' and 'end'.
109 */ 109 */
110 while (start != end) { 110 while (start < end) {
111 xsc3_l2_inv_pa(start); 111 xsc3_l2_inv_pa(start);
112 start += CACHE_LINE_SIZE; 112 start += CACHE_LINE_SIZE;
113 } 113 }
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba754064559..e63db11f16a8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
180#endif 180#endif
181 181
182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 182#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE 183#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
184 184
185static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED, 188 L_PTE_SHARED,
189 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
191 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
192 }, 192 },
193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
195 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE,
197 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
198 }, 198 },
199 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,7 @@ static struct mem_type mem_types[] = {
205 [MT_DEVICE_WC] = { /* ioremap_wc */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
207 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE, 208 .prot_sect = PROT_SECT_DEVICE,
209 .domain = DOMAIN_IO, 209 .domain = DOMAIN_IO,
210 }, 210 },
211 [MT_CACHECLEAN] = { 211 [MT_CACHECLEAN] = {
@@ -273,22 +273,23 @@ static void __init build_mem_type_table(void)
273#endif 273#endif
274 274
275 /* 275 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01 276 * Strip out features not present on earlier architectures.
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 277 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable 278 * without extended page tables don't have the 'Shared' bit.
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */ 279 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { 280 if (cpu_arch < CPU_ARCH_ARMv5)
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 281 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE; 282 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
284 } 283 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
284 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
285 mem_types[i].prot_sect &= ~PMD_SECT_S;
285 286
286 /* 287 /*
287 * ARMv5 and lower, bit 4 must be set for page tables. 288 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
288 * (was: cache "update-able on write" bit on ARM610) 289 * "update-able on write" bit on ARM610). However, Xscale and
289 * However, Xscale cores require this bit to be cleared. 290 * Xscale3 require this bit to be cleared.
290 */ 291 */
291 if (cpu_is_xscale()) { 292 if (cpu_is_xscale() || cpu_is_xsc3()) {
292 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 293 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
293 mem_types[i].prot_sect &= ~PMD_BIT4; 294 mem_types[i].prot_sect &= ~PMD_BIT4;
294 mem_types[i].prot_l1 &= ~PMD_BIT4; 295 mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +303,64 @@ static void __init build_mem_type_table(void)
302 } 303 }
303 } 304 }
304 305
306 /*
307 * Mark the device areas according to the CPU/architecture.
308 */
309 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
310 if (!cpu_is_xsc3()) {
311 /*
312 * Mark device regions on ARMv6+ as execute-never
313 * to prevent speculative instruction fetches.
314 */
315 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
316 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
317 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
318 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
319 }
320 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
321 /*
322 * For ARMv7 with TEX remapping,
323 * - shared device is SXCB=1100
324 * - nonshared device is SXCB=0100
325 * - write combine device mem is SXCB=0001
326 * (Uncached Normal memory)
327 */
328 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
329 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
330 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
331 } else if (cpu_is_xsc3()) {
332 /*
333 * For Xscale3,
334 * - shared device is TEXCB=00101
335 * - nonshared device is TEXCB=01000
336 * - write combine device mem is TEXCB=00100
337 * (Inner/Outer Uncacheable in xsc3 parlance)
338 */
339 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
340 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
341 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
342 } else {
343 /*
344 * For ARMv6 and ARMv7 without TEX remapping,
345 * - shared device is TEXCB=00001
346 * - nonshared device is TEXCB=01000
347 * - write combine device mem is TEXCB=00100
348 * (Uncached Normal in ARMv6 parlance).
349 */
350 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
351 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
352 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
353 }
354 } else {
355 /*
356 * On others, write combining is "Uncached/Buffered"
357 */
358 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
359 }
360
361 /*
362 * Now deal with the memory-type mappings
363 */
305 cp = &cache_policies[cachepolicy]; 364 cp = &cache_policies[cachepolicy];
306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 365 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307 366
@@ -317,12 +376,8 @@ static void __init build_mem_type_table(void)
317 * Enable CPU-specific coherency if supported. 376 * Enable CPU-specific coherency if supported.
318 * (Only available on XSC3 at the moment.) 377 * (Only available on XSC3 at the moment.)
319 */ 378 */
320 if (arch_is_coherent()) { 379 if (arch_is_coherent() && cpu_is_xsc3())
321 if (cpu_is_xsc3()) { 380 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
322 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
323 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
324 }
325 }
326 381
327 /* 382 /*
328 * ARMv6 and above have extended page tables. 383 * ARMv6 and above have extended page tables.
@@ -336,11 +391,6 @@ static void __init build_mem_type_table(void)
336 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 391 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
337 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 392 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
338 393
339 /*
340 * Mark the device area as "shared device"
341 */
342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
343
344#ifdef CONFIG_SMP 394#ifdef CONFIG_SMP
345 /* 395 /*
346 * Mark memory with the "shared" attribute for SMP systems 396 * Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +410,6 @@ static void __init build_mem_type_table(void)
360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 410 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 411 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
362 412
363 if (cpu_arch < CPU_ARCH_ARMv5)
364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
365
366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 413 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 414 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
368 L_PTE_DIRTY | L_PTE_WRITE | 415 L_PTE_DIRTY | L_PTE_WRITE |
@@ -654,7 +701,7 @@ static inline void prepare_page_table(struct meminfo *mi)
654 /* 701 /*
655 * Clear out all the mappings below the kernel image. 702 * Clear out all the mappings below the kernel image.
656 */ 703 */
657 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE) 704 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
658 pmd_clear(pmd_off_k(addr)); 705 pmd_clear(pmd_off_k(addr));
659 706
660#ifdef CONFIG_XIP_KERNEL 707#ifdef CONFIG_XIP_KERNEL
@@ -766,7 +813,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
766 */ 813 */
767#ifdef CONFIG_XIP_KERNEL 814#ifdef CONFIG_XIP_KERNEL
768 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 815 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
769 map.virtual = MODULE_START; 816 map.virtual = MODULES_VADDR;
770 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 817 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
771 map.type = MT_ROM; 818 map.type = MT_ROM;
772 create_mapping(&map); 819 create_mapping(&map);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 07f82db70945..4d3c0a73e7fb 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -115,7 +115,7 @@ ENTRY(cpu_v7_set_pte_ext)
115 orr r3, r3, r2 115 orr r3, r3, r2
116 orr r3, r3, #PTE_EXT_AP0 | 2 116 orr r3, r3, #PTE_EXT_AP0 | 2
117 117
118 tst r2, #1 << 4 118 tst r1, #1 << 4
119 orrne r3, r3, #PTE_EXT_TEX(1) 119 orrne r3, r3, #PTE_EXT_TEX(1)
120 120
121 tst r1, #L_PTE_WRITE 121 tst r1, #L_PTE_WRITE
@@ -192,11 +192,11 @@ __v7_setup:
192 mov pc, lr @ return to head.S:__ret 192 mov pc, lr @ return to head.S:__ret
193ENDPROC(__v7_setup) 193ENDPROC(__v7_setup)
194 194
195 /* 195 /* AT
196 * V X F I D LR 196 * TFR EV X F I D LR
197 * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 197 * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
198 * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 198 * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
199 * 0 110 0011 1.00 .111 1101 < we want 199 * 1 0 110 0011 1.00 .111 1101 < we want
200 */ 200 */
201 .type v7_crval, #object 201 .type v7_crval, #object
202v7_crval: 202v7_crval:
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index bf6a10c5fc4f..be6aab9c6834 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -428,23 +428,23 @@ static int clk_debugfs_register_one(struct clk *c)
428 if (c->id != 0) 428 if (c->id != 0)
429 sprintf(p, ":%d", c->id); 429 sprintf(p, ":%d", c->id);
430 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root); 430 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
431 if (IS_ERR(d)) 431 if (!d)
432 return PTR_ERR(d); 432 return -ENOMEM;
433 c->dent = d; 433 c->dent = d;
434 434
435 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount); 435 d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
436 if (IS_ERR(d)) { 436 if (!d) {
437 err = PTR_ERR(d); 437 err = -ENOMEM;
438 goto err_out; 438 goto err_out;
439 } 439 }
440 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate); 440 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
441 if (IS_ERR(d)) { 441 if (!d) {
442 err = PTR_ERR(d); 442 err = -ENOMEM;
443 goto err_out; 443 goto err_out;
444 } 444 }
445 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags); 445 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
446 if (IS_ERR(d)) { 446 if (!d) {
447 err = PTR_ERR(d); 447 err = -ENOMEM;
448 goto err_out; 448 goto err_out;
449 } 449 }
450 return 0; 450 return 0;
@@ -483,8 +483,8 @@ static int __init clk_debugfs_init(void)
483 int err; 483 int err;
484 484
485 d = debugfs_create_dir("clock", NULL); 485 d = debugfs_create_dir("clock", NULL);
486 if (IS_ERR(d)) 486 if (!d)
487 return PTR_ERR(d); 487 return -ENOMEM;
488 clk_debugfs_root = d; 488 clk_debugfs_root = d;
489 489
490 list_for_each_entry(c, &clocks, node) { 490 list_for_each_entry(c, &clocks, node) {
diff --git a/arch/arm/plat-omap/include/mach/entry-macro.S b/arch/arm/plat-omap/include/mach/entry-macro.S
index 030118ee204a..2276f89671d8 100644
--- a/arch/arm/plat-omap/include/mach/entry-macro.S
+++ b/arch/arm/plat-omap/include/mach/entry-macro.S
@@ -65,7 +65,8 @@
65#include <mach/omap34xx.h> 65#include <mach/omap34xx.h>
66#endif 66#endif
67 67
68#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt number */ 68#define INTCPS_SIR_IRQ_OFFSET 0x0040 /* Active interrupt offset */
69#define ACTIVEIRQ_MASK 0x7f /* Active interrupt bits */
69 70
70 .macro disable_fiq 71 .macro disable_fiq
71 .endm 72 .endm
@@ -88,6 +89,7 @@
88 cmp \irqnr, #0x0 89 cmp \irqnr, #0x0
892222: 902222:
90 ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET] 91 ldrne \irqnr, [\base, #INTCPS_SIR_IRQ_OFFSET]
92 and \irqnr, \irqnr, #ACTIVEIRQ_MASK /* Clear spurious bits */
91 93
92 .endm 94 .endm
93 95
diff --git a/arch/arm/plat-omap/include/mach/irqs.h b/arch/arm/plat-omap/include/mach/irqs.h
index a2929ac8c687..bed5274c910a 100644
--- a/arch/arm/plat-omap/include/mach/irqs.h
+++ b/arch/arm/plat-omap/include/mach/irqs.h
@@ -372,7 +372,7 @@
372 372
373/* External TWL4030 gpio interrupts are optional */ 373/* External TWL4030 gpio interrupts are optional */
374#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END 374#define TWL4030_GPIO_IRQ_BASE TWL4030_PWR_IRQ_END
375#ifdef CONFIG_TWL4030_GPIO 375#ifdef CONFIG_GPIO_TWL4030
376#define TWL4030_GPIO_NR_IRQS 18 376#define TWL4030_GPIO_NR_IRQS 18
377#else 377#else
378#define TWL4030_GPIO_NR_IRQS 0 378#define TWL4030_GPIO_NR_IRQS 0
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index fdf088f2430e..7b4cefa2199b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -16,6 +16,7 @@
16#include <linux/kexec.h> 16#include <linux/kexec.h>
17#include <linux/crash_dump.h> 17#include <linux/crash_dump.h>
18 18
19#include <asm/kexec.h>
19#include <asm/reg.h> 20#include <asm/reg.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 31481dc485de..7190493e9bdc 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -189,7 +189,6 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
189{ 189{
190 struct pci_controller *phb; 190 struct pci_controller *phb;
191 int primary; 191 int primary;
192 struct pci_bus *b;
193 192
194 primary = list_empty(&hose_list); 193 primary = list_empty(&hose_list);
195 phb = pcibios_alloc_controller(dn); 194 phb = pcibios_alloc_controller(dn);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 488a4ecd0b59..1d5550d19b66 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1494,7 +1494,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1494 def_bool X86_64 1494 def_bool X86_64
1495 depends on NUMA 1495 depends on NUMA
1496 1496
1497menu "Power management options" 1497menu "Power management and ACPI options"
1498 depends on !X86_VOYAGER 1498 depends on !X86_VOYAGER
1499 1499
1500config ARCH_HIBERNATION_HEADER 1500config ARCH_HIBERNATION_HEADER
@@ -1894,6 +1894,10 @@ config SYSVIPC_COMPAT
1894endmenu 1894endmenu
1895 1895
1896 1896
1897config HAVE_ATOMIC_IOMAP
1898 def_bool y
1899 depends on X86_32
1900
1897source "net/Kconfig" 1901source "net/Kconfig"
1898 1902
1899source "drivers/Kconfig" 1903source "drivers/Kconfig"
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8668a94f850e..23696d44a0af 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -9,6 +9,10 @@
9 9
10extern int fixmaps_set; 10extern int fixmaps_set;
11 11
12extern pte_t *kmap_pte;
13extern pgprot_t kmap_prot;
14extern pte_t *pkmap_page_table;
15
12void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); 16void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
13void native_set_fixmap(enum fixed_addresses idx, 17void native_set_fixmap(enum fixed_addresses idx,
14 unsigned long phys, pgprot_t flags); 18 unsigned long phys, pgprot_t flags);
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
index 09f29ab5c139..c7115c1d7217 100644
--- a/arch/x86/include/asm/fixmap_32.h
+++ b/arch/x86/include/asm/fixmap_32.h
@@ -28,10 +28,8 @@ extern unsigned long __FIXADDR_TOP;
28#include <asm/acpi.h> 28#include <asm/acpi.h>
29#include <asm/apicdef.h> 29#include <asm/apicdef.h>
30#include <asm/page.h> 30#include <asm/page.h>
31#ifdef CONFIG_HIGHMEM
32#include <linux/threads.h> 31#include <linux/threads.h>
33#include <asm/kmap_types.h> 32#include <asm/kmap_types.h>
34#endif
35 33
36/* 34/*
37 * Here we define all the compile-time 'special' virtual 35 * Here we define all the compile-time 'special' virtual
@@ -75,10 +73,8 @@ enum fixed_addresses {
75#ifdef CONFIG_X86_CYCLONE_TIMER 73#ifdef CONFIG_X86_CYCLONE_TIMER
76 FIX_CYCLONE_TIMER, /*cyclone timer register*/ 74 FIX_CYCLONE_TIMER, /*cyclone timer register*/
77#endif 75#endif
78#ifdef CONFIG_HIGHMEM
79 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
80 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, 77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
81#endif
82#ifdef CONFIG_PCI_MMCONFIG 78#ifdef CONFIG_PCI_MMCONFIG
83 FIX_PCIE_MCFG, 79 FIX_PCIE_MCFG,
84#endif 80#endif
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index a3b3b7c3027b..bf9276bea660 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -25,14 +25,11 @@
25#include <asm/kmap_types.h> 25#include <asm/kmap_types.h>
26#include <asm/tlbflush.h> 26#include <asm/tlbflush.h>
27#include <asm/paravirt.h> 27#include <asm/paravirt.h>
28#include <asm/fixmap.h>
28 29
29/* declarations for highmem.c */ 30/* declarations for highmem.c */
30extern unsigned long highstart_pfn, highend_pfn; 31extern unsigned long highstart_pfn, highend_pfn;
31 32
32extern pte_t *kmap_pte;
33extern pgprot_t kmap_prot;
34extern pte_t *pkmap_page_table;
35
36/* 33/*
37 * Right now we initialize only a single pte table. It can be extended 34 * Right now we initialize only a single pte table. It can be extended
38 * easily, subsequent pte tables have to be allocated in one physical 35 * easily, subsequent pte tables have to be allocated in one physical
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index d843ed0e9b2e..0005adb0f941 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -101,30 +101,22 @@
101#define LAST_VM86_IRQ 15 101#define LAST_VM86_IRQ 15
102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15) 102#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
103 103
104#ifdef CONFIG_X86_64 104#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
105# if NR_CPUS < MAX_IO_APICS 105# if NR_CPUS < MAX_IO_APICS
106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) 106# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
107# else 107# else
108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) 108# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
109# endif 109# endif
110 110
111#elif !defined(CONFIG_X86_VOYAGER) 111#elif defined(CONFIG_X86_VOYAGER)
112 112
113# if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT) || defined(CONFIG_X86_VISWS) 113# define NR_IRQS 224
114
115# define NR_IRQS 224
116
117# else /* IO_APIC || PARAVIRT */
118
119# define NR_IRQS 16
120
121# endif
122 114
123#else /* !VISWS && !VOYAGER */ 115#else /* IO_APIC || VOYAGER */
124 116
125# define NR_IRQS 224 117# define NR_IRQS 16
126 118
127#endif /* VISWS */ 119#endif
128 120
129/* Voyager specific defines */ 121/* Voyager specific defines */
130/* These define the CPIs we use in linux */ 122/* These define the CPIs we use in linux */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 90ac7718469a..4850e4b02b61 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -154,7 +154,7 @@ extern unsigned long node_remap_size[];
154 154
155#endif 155#endif
156 156
157/* sched_domains SD_NODE_INIT for NUMAQ machines */ 157/* sched_domains SD_NODE_INIT for NUMA machines */
158#define SD_NODE_INIT (struct sched_domain) { \ 158#define SD_NODE_INIT (struct sched_domain) { \
159 .min_interval = 8, \ 159 .min_interval = 8, \
160 .max_interval = 32, \ 160 .max_interval = 32, \
@@ -169,8 +169,9 @@ extern unsigned long node_remap_size[];
169 .flags = SD_LOAD_BALANCE \ 169 .flags = SD_LOAD_BALANCE \
170 | SD_BALANCE_EXEC \ 170 | SD_BALANCE_EXEC \
171 | SD_BALANCE_FORK \ 171 | SD_BALANCE_FORK \
172 | SD_SERIALIZE \ 172 | SD_WAKE_AFFINE \
173 | SD_WAKE_BALANCE, \ 173 | SD_WAKE_BALANCE \
174 | SD_SERIALIZE, \
174 .last_balance = jiffies, \ 175 .last_balance = jiffies, \
175 .balance_interval = 1, \ 176 .balance_interval = 1, \
176} 177}
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
index 9c811d2e6f91..b3e647307625 100644
--- a/arch/x86/include/asm/voyager.h
+++ b/arch/x86/include/asm/voyager.h
@@ -520,6 +520,7 @@ extern void voyager_restart(void);
520extern void voyager_cat_power_off(void); 520extern void voyager_cat_power_off(void);
521extern void voyager_cat_do_common_interrupt(void); 521extern void voyager_cat_do_common_interrupt(void);
522extern void voyager_handle_nmi(void); 522extern void voyager_handle_nmi(void);
523extern void voyager_smp_intr_init(void);
523/* Commands for the following are */ 524/* Commands for the following are */
524#define VOYAGER_PSI_READ 0 525#define VOYAGER_PSI_READ 0
525#define VOYAGER_PSI_WRITE 1 526#define VOYAGER_PSI_WRITE 1
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index a8fd9ebdc8e2..331b318304eb 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -50,7 +50,7 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
50/* returns !0 if the IOMMU is caching non-present entries in its TLB */ 50/* returns !0 if the IOMMU is caching non-present entries in its TLB */
51static int iommu_has_npcache(struct amd_iommu *iommu) 51static int iommu_has_npcache(struct amd_iommu *iommu)
52{ 52{
53 return iommu->cap & IOMMU_CAP_NPCACHE; 53 return iommu->cap & (1UL << IOMMU_CAP_NPCACHE);
54} 54}
55 55
56/**************************************************************************** 56/****************************************************************************
@@ -536,6 +536,9 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
536{ 536{
537 address >>= PAGE_SHIFT; 537 address >>= PAGE_SHIFT;
538 iommu_area_free(dom->bitmap, address, pages); 538 iommu_area_free(dom->bitmap, address, pages);
539
540 if (address + pages >= dom->next_bit)
541 dom->need_flush = true;
539} 542}
540 543
541/**************************************************************************** 544/****************************************************************************
@@ -992,8 +995,10 @@ static void __unmap_single(struct amd_iommu *iommu,
992 995
993 dma_ops_free_addresses(dma_dom, dma_addr, pages); 996 dma_ops_free_addresses(dma_dom, dma_addr, pages);
994 997
995 if (amd_iommu_unmap_flush) 998 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
996 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); 999 iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
1000 dma_dom->need_flush = false;
1001 }
997} 1002}
998 1003
999/* 1004/*
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index b764d7429c61..7a3f2028e2eb 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -3611,6 +3611,8 @@ int __init probe_nr_irqs(void)
3611 /* something wrong ? */ 3611 /* something wrong ? */
3612 if (nr < nr_min) 3612 if (nr < nr_min)
3613 nr = nr_min; 3613 nr = nr_min;
3614 if (WARN_ON(nr > NR_IRQS))
3615 nr = NR_IRQS;
3614 3616
3615 return nr; 3617 return nr;
3616} 3618}
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index f4c93f1cfc19..724adfc63cb9 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -29,11 +29,7 @@ EXPORT_SYMBOL(pm_power_off);
29 29
30static const struct desc_ptr no_idt = {}; 30static const struct desc_ptr no_idt = {};
31static int reboot_mode; 31static int reboot_mode;
32/* 32enum reboot_type reboot_type = BOOT_KBD;
33 * Keyboard reset and triple fault may result in INIT, not RESET, which
34 * doesn't work when we're in vmx root mode. Try ACPI first.
35 */
36enum reboot_type reboot_type = BOOT_ACPI;
37int reboot_force; 33int reboot_force;
38 34
39#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 35#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index e00534b33534..f4049f3513b6 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -154,6 +154,12 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
154 flush_mm = mm; 154 flush_mm = mm;
155 flush_va = va; 155 flush_va = va;
156 cpus_or(flush_cpumask, cpumask, flush_cpumask); 156 cpus_or(flush_cpumask, cpumask, flush_cpumask);
157
158 /*
159 * Make the above memory operations globally visible before
160 * sending the IPI.
161 */
162 smp_mb();
157 /* 163 /*
158 * We have to send the IPI only to 164 * We have to send the IPI only to
159 * CPUs affected. 165 * CPUs affected.
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index dcbf7a1159ea..8f919ca69494 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -183,6 +183,11 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); 183 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
184 184
185 /* 185 /*
186 * Make the above memory operations globally visible before
187 * sending the IPI.
188 */
189 smp_mb();
190 /*
186 * We have to send the IPI only to 191 * We have to send the IPI only to
187 * CPUs affected. 192 * CPUs affected.
188 */ 193 */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 62348e4fd8d1..2ef80e301925 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -813,10 +813,6 @@ void __init tsc_init(void)
813 cpu_khz = calibrate_cpu(); 813 cpu_khz = calibrate_cpu();
814#endif 814#endif
815 815
816 lpj = ((u64)tsc_khz * 1000);
817 do_div(lpj, HZ);
818 lpj_fine = lpj;
819
820 printk("Detected %lu.%03lu MHz processor.\n", 816 printk("Detected %lu.%03lu MHz processor.\n",
821 (unsigned long)cpu_khz / 1000, 817 (unsigned long)cpu_khz / 1000,
822 (unsigned long)cpu_khz % 1000); 818 (unsigned long)cpu_khz % 1000);
@@ -836,6 +832,10 @@ void __init tsc_init(void)
836 /* now allow native_sched_clock() to use rdtsc */ 832 /* now allow native_sched_clock() to use rdtsc */
837 tsc_disabled = 0; 833 tsc_disabled = 0;
838 834
835 lpj = ((u64)tsc_khz * 1000);
836 do_div(lpj, HZ);
837 lpj_fine = lpj;
838
839 use_tsc_delay(); 839 use_tsc_delay();
840 /* Check and install the TSC clocksource */ 840 /* Check and install the TSC clocksource */
841 dmi_check_system(bad_tsc_dmi_table); 841 dmi_check_system(bad_tsc_dmi_table);
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index 6bbdd633864c..a580b9562e76 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -27,7 +27,7 @@ static struct irqaction irq2 = {
27void __init intr_init_hook(void) 27void __init intr_init_hook(void)
28{ 28{
29#ifdef CONFIG_SMP 29#ifdef CONFIG_SMP
30 smp_intr_init(); 30 voyager_smp_intr_init();
31#endif 31#endif
32 32
33 setup_irq(2, &irq2); 33 setup_irq(2, &irq2);
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 7f4c6af14351..0e331652681e 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -1258,7 +1258,7 @@ static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1258#define QIC_SET_GATE(cpi, vector) \ 1258#define QIC_SET_GATE(cpi, vector) \
1259 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector)) 1259 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1260 1260
1261void __init smp_intr_init(void) 1261void __init voyager_smp_intr_init(void)
1262{ 1262{
1263 int i; 1263 int i;
1264 1264
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 59f89b434b45..fea4565ff576 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,7 +1,7 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_X86_32) += pgtable_32.o 4obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
5 5
6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 6obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
7obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o 7obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8396868e82c5..c483f4242079 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -334,7 +334,6 @@ int devmem_is_allowed(unsigned long pagenr)
334 return 0; 334 return 0;
335} 335}
336 336
337#ifdef CONFIG_HIGHMEM
338pte_t *kmap_pte; 337pte_t *kmap_pte;
339pgprot_t kmap_prot; 338pgprot_t kmap_prot;
340 339
@@ -357,6 +356,7 @@ static void __init kmap_init(void)
357 kmap_prot = PAGE_KERNEL; 356 kmap_prot = PAGE_KERNEL;
358} 357}
359 358
359#ifdef CONFIG_HIGHMEM
360static void __init permanent_kmaps_init(pgd_t *pgd_base) 360static void __init permanent_kmaps_init(pgd_t *pgd_base)
361{ 361{
362 unsigned long vaddr; 362 unsigned long vaddr;
@@ -436,7 +436,6 @@ static void __init set_highmem_pages_init(void)
436#endif /* !CONFIG_NUMA */ 436#endif /* !CONFIG_NUMA */
437 437
438#else 438#else
439# define kmap_init() do { } while (0)
440# define permanent_kmaps_init(pgd_base) do { } while (0) 439# define permanent_kmaps_init(pgd_base) do { } while (0)
441# define set_highmem_pages_init() do { } while (0) 440# define set_highmem_pages_init() do { } while (0)
442#endif /* CONFIG_HIGHMEM */ 441#endif /* CONFIG_HIGHMEM */
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
new file mode 100644
index 000000000000..d0151d8ce452
--- /dev/null
+++ b/arch/x86/mm/iomap_32.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <asm/iomap.h>
20#include <linux/module.h>
21
22/* Map 'pfn' using fixed map 'type' and protections 'prot'
23 */
24void *
25iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26{
27 enum fixed_addresses idx;
28 unsigned long vaddr;
29
30 pagefault_disable();
31
32 idx = type + KM_TYPE_NR*smp_processor_id();
33 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
35 arch_flush_lazy_mmu_mode();
36
37 return (void*) vaddr;
38}
39EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
40
41void
42iounmap_atomic(void *kvaddr, enum km_type type)
43{
44 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
45 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
46
47 /*
48 * Force other mappings to Oops if they'll try to access this pte
49 * without first remap it. Keeping stale mappings around is a bad idea
50 * also, in case the page changes cacheability attributes or becomes
51 * a protected page in a hypervisor.
52 */
53 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
54 kpte_clear_flush(kmap_pte-idx, vaddr);
55
56 arch_flush_lazy_mmu_mode();
57 pagefault_enable();
58}
59EXPORT_SYMBOL_GPL(iounmap_atomic);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index f1dc1b75d166..e89d24815f26 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -67,18 +67,18 @@ static void split_page_count(int level)
67 67
68void arch_report_meminfo(struct seq_file *m) 68void arch_report_meminfo(struct seq_file *m)
69{ 69{
70 seq_printf(m, "DirectMap4k: %8lu kB\n", 70 seq_printf(m, "DirectMap4k: %8lu kB\n",
71 direct_pages_count[PG_LEVEL_4K] << 2); 71 direct_pages_count[PG_LEVEL_4K] << 2);
72#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 72#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
73 seq_printf(m, "DirectMap2M: %8lu kB\n", 73 seq_printf(m, "DirectMap2M: %8lu kB\n",
74 direct_pages_count[PG_LEVEL_2M] << 11); 74 direct_pages_count[PG_LEVEL_2M] << 11);
75#else 75#else
76 seq_printf(m, "DirectMap4M: %8lu kB\n", 76 seq_printf(m, "DirectMap4M: %8lu kB\n",
77 direct_pages_count[PG_LEVEL_2M] << 12); 77 direct_pages_count[PG_LEVEL_2M] << 12);
78#endif 78#endif
79#ifdef CONFIG_X86_64 79#ifdef CONFIG_X86_64
80 if (direct_gbpages) 80 if (direct_gbpages)
81 seq_printf(m, "DirectMap1G: %8lu kB\n", 81 seq_printf(m, "DirectMap1G: %8lu kB\n",
82 direct_pages_count[PG_LEVEL_1G] << 20); 82 direct_pages_count[PG_LEVEL_1G] << 20);
83#endif 83#endif
84} 84}