aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/flat.h3
-rw-r--r--arch/blackfin/include/asm/.gitignore1
-rw-r--r--arch/blackfin/include/asm/flat.h1
-rw-r--r--arch/blackfin/include/asm/unistd.h4
-rw-r--r--arch/blackfin/kernel/.gitignore1
-rw-r--r--arch/blackfin/lib/strncmp.c3
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/h8300/include/asm/flat.h1
-rw-r--r--arch/m32r/include/asm/flat.h1
-rw-r--r--arch/m68k/include/asm/flat.h1
-rw-r--r--arch/powerpc/Kconfig12
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h6
-rw-r--r--arch/powerpc/include/asm/fixmap.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h26
-rw-r--r--arch/powerpc/kernel/dma.c2
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/dma-noncoherent.c237
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c400
-rw-r--r--arch/powerpc/mm/init_32.c8
-rw-r--r--arch/powerpc/mm/mem.c17
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c6
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/sh/include/asm/flat.h1
-rw-r--r--arch/x86/boot/compressed/relocs.c7
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c42
-rw-r--r--arch/x86/mm/hugetlbpage.c6
-rw-r--r--arch/x86/mm/pageattr.c17
31 files changed, 517 insertions, 303 deletions
diff --git a/arch/arm/include/asm/flat.h b/arch/arm/include/asm/flat.h
index 1d77e51907f6..59426a4595c9 100644
--- a/arch/arm/include/asm/flat.h
+++ b/arch/arm/include/asm/flat.h
@@ -5,9 +5,6 @@
5#ifndef __ARM_FLAT_H__ 5#ifndef __ARM_FLAT_H__
6#define __ARM_FLAT_H__ 6#define __ARM_FLAT_H__
7 7
8/* An odd number of words will be pushed after this alignment, so
9 deliberately misalign the value. */
10#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
11#define flat_argvp_envp_on_stack() 1 8#define flat_argvp_envp_on_stack() 1
12#define flat_old_ram_flag(flags) (flags) 9#define flat_old_ram_flag(flags) (flags)
13#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 10#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/blackfin/include/asm/.gitignore b/arch/blackfin/include/asm/.gitignore
deleted file mode 100644
index 7858564a4466..000000000000
--- a/arch/blackfin/include/asm/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
1+mach
diff --git a/arch/blackfin/include/asm/flat.h b/arch/blackfin/include/asm/flat.h
index e70074e05f4e..733a178d782d 100644
--- a/arch/blackfin/include/asm/flat.h
+++ b/arch/blackfin/include/asm/flat.h
@@ -10,7 +10,6 @@
10 10
11#include <asm/unaligned.h> 11#include <asm/unaligned.h>
12 12
13#define flat_stack_align(sp) /* nothing needed */
14#define flat_argvp_envp_on_stack() 0 13#define flat_argvp_envp_on_stack() 0
15#define flat_old_ram_flag(flags) (flags) 14#define flat_old_ram_flag(flags) (flags)
16 15
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index 1e57b636e0bc..cf5066d3efd2 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -378,8 +378,10 @@
378#define __NR_dup3 363 378#define __NR_dup3 363
379#define __NR_pipe2 364 379#define __NR_pipe2 364
380#define __NR_inotify_init1 365 380#define __NR_inotify_init1 365
381#define __NR_preadv 366
382#define __NR_pwritev 367
381 383
382#define __NR_syscall 366 384#define __NR_syscall 368
383#define NR_syscalls __NR_syscall 385#define NR_syscalls __NR_syscall
384 386
385/* Old optional stuff no one actually uses */ 387/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/.gitignore b/arch/blackfin/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/blackfin/kernel/.gitignore
@@ -0,0 +1 @@
vmlinux.lds
diff --git a/arch/blackfin/lib/strncmp.c b/arch/blackfin/lib/strncmp.c
index 2aaae78a68e0..46518b1d2983 100644
--- a/arch/blackfin/lib/strncmp.c
+++ b/arch/blackfin/lib/strncmp.c
@@ -8,9 +8,8 @@
8 8
9#define strncmp __inline_strncmp 9#define strncmp __inline_strncmp
10#include <asm/string.h> 10#include <asm/string.h>
11#undef strncmp
12
13#include <linux/module.h> 11#include <linux/module.h>
12#undef strncmp
14 13
15int strncmp(const char *cs, const char *ct, size_t count) 14int strncmp(const char *cs, const char *ct, size_t count)
16{ 15{
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 21e65a339a22..a063a434f7e3 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1581,6 +1581,8 @@ ENTRY(_sys_call_table)
1581 .long _sys_dup3 1581 .long _sys_dup3
1582 .long _sys_pipe2 1582 .long _sys_pipe2
1583 .long _sys_inotify_init1 /* 365 */ 1583 .long _sys_inotify_init1 /* 365 */
1584 .long _sys_preadv
1585 .long _sys_pwritev
1584 1586
1585 .rept NR_syscalls-(.-_sys_call_table)/4 1587 .rept NR_syscalls-(.-_sys_call_table)/4
1586 .long _sys_ni_syscall 1588 .long _sys_ni_syscall
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
index 2a873508a9a1..bd12b31b90e6 100644
--- a/arch/h8300/include/asm/flat.h
+++ b/arch/h8300/include/asm/flat.h
@@ -5,7 +5,6 @@
5#ifndef __H8300_FLAT_H__ 5#ifndef __H8300_FLAT_H__
6#define __H8300_FLAT_H__ 6#define __H8300_FLAT_H__
7 7
8#define flat_stack_align(sp) /* nothing needed */
9#define flat_argvp_envp_on_stack() 1 8#define flat_argvp_envp_on_stack() 1
10#define flat_old_ram_flag(flags) 1 9#define flat_old_ram_flag(flags) 1
11#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 10#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/m32r/include/asm/flat.h b/arch/m32r/include/asm/flat.h
index d851cf0c4aa5..5d711c4688fb 100644
--- a/arch/m32r/include/asm/flat.h
+++ b/arch/m32r/include/asm/flat.h
@@ -12,7 +12,6 @@
12#ifndef __ASM_M32R_FLAT_H 12#ifndef __ASM_M32R_FLAT_H
13#define __ASM_M32R_FLAT_H 13#define __ASM_M32R_FLAT_H
14 14
15#define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0))
16#define flat_argvp_envp_on_stack() 0 15#define flat_argvp_envp_on_stack() 0
17#define flat_old_ram_flag(flags) (flags) 16#define flat_old_ram_flag(flags) (flags)
18#define flat_set_persistent(relval, p) 0 17#define flat_set_persistent(relval, p) 0
diff --git a/arch/m68k/include/asm/flat.h b/arch/m68k/include/asm/flat.h
index 814b5174a8e0..a0e290793978 100644
--- a/arch/m68k/include/asm/flat.h
+++ b/arch/m68k/include/asm/flat.h
@@ -5,7 +5,6 @@
5#ifndef __M68KNOMMU_FLAT_H__ 5#ifndef __M68KNOMMU_FLAT_H__
6#define __M68KNOMMU_FLAT_H__ 6#define __M68KNOMMU_FLAT_H__
7 7
8#define flat_stack_align(sp) /* nothing needed */
9#define flat_argvp_envp_on_stack() 1 8#define flat_argvp_envp_on_stack() 1
10#define flat_old_ram_flag(flags) (flags) 9#define flat_old_ram_flag(flags) (flags)
11#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 10#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a0d1146a0578..cdc9a6ff4be8 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -868,6 +868,18 @@ config TASK_SIZE
868 default "0x80000000" if PPC_PREP || PPC_8xx 868 default "0x80000000" if PPC_PREP || PPC_8xx
869 default "0xc0000000" 869 default "0xc0000000"
870 870
871config CONSISTENT_SIZE_BOOL
872 bool "Set custom consistent memory pool size"
873 depends on ADVANCED_OPTIONS && NOT_COHERENT_CACHE
874 help
875 This option allows you to set the size of the
876 consistent memory pool. This pool of virtual memory
877 is used to make consistent memory allocations.
878
879config CONSISTENT_SIZE
880 hex "Size of consistent memory pool" if CONSISTENT_SIZE_BOOL
881 default "0x00200000" if NOT_COHERENT_CACHE
882
871config PIN_TLB 883config PIN_TLB
872 bool "Pinned Kernel TLBs (860 ONLY)" 884 bool "Pinned Kernel TLBs (860 ONLY)"
873 depends on ADVANCED_OPTIONS && 8xx 885 depends on ADVANCED_OPTIONS && 8xx
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index c69f2b5f0cc4..cb448d68452c 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -26,7 +26,9 @@
26 * allocate the space "normally" and use the cache management functions 26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent. 27 * to ensure it is consistent.
28 */ 28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp); 29struct device;
30extern void *__dma_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr); 32extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction); 33extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset, 34extern void __dma_sync_page(struct page *page, unsigned long offset,
@@ -37,7 +39,7 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
37 * Cache coherent cores. 39 * Cache coherent cores.
38 */ 40 */
39 41
40#define __dma_alloc_coherent(gfp, size, handle) NULL 42#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0) 43#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0) 44#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0) 45#define __dma_sync_page(pg, off, sz, rw) ((void)0)
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index d60fd18f428c..f1f4e23a84e9 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -14,8 +14,6 @@
14#ifndef _ASM_FIXMAP_H 14#ifndef _ASM_FIXMAP_H
15#define _ASM_FIXMAP_H 15#define _ASM_FIXMAP_H
16 16
17extern unsigned long FIXADDR_TOP;
18
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
20#include <linux/kernel.h> 18#include <linux/kernel.h>
21#include <asm/page.h> 19#include <asm/page.h>
@@ -24,6 +22,8 @@ extern unsigned long FIXADDR_TOP;
24#include <asm/kmap_types.h> 22#include <asm/kmap_types.h>
25#endif 23#endif
26 24
25#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
26
27/* 27/*
28 * Here we define all the compile-time 'special' virtual 28 * Here we define all the compile-time 'special' virtual
29 * addresses. The point is to have a constant address at 29 * addresses. The point is to have a constant address at
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index ba45c997830f..c9ff9d75990e 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -10,7 +10,7 @@
10 10
11extern unsigned long va_to_phys(unsigned long address); 11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address); 12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot, ioremap_base; 13extern unsigned long ioremap_bot;
14 14
15#ifdef CONFIG_44x 15#ifdef CONFIG_44x
16extern int icache_44x_need_flush; 16extern int icache_44x_need_flush;
@@ -56,8 +56,30 @@ extern int icache_44x_need_flush;
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57 57
58/* 58/*
59 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
60 * value (for now) on others, from where we can start layout kernel
61 * virtual space that goes below PKMAP and FIXMAP
62 */
63#ifdef CONFIG_HIGHMEM
64#define KVIRT_TOP PKMAP_BASE
65#else
66#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
67#endif
68
69/*
70 * ioremap_bot starts at that address. Early ioremaps move down from there,
71 * until mem_init() at which point this becomes the top of the vmalloc
72 * and ioremap space
73 */
74#ifdef CONFIG_NOT_COHERENT_CACHE
75#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
76#else
77#define IOREMAP_TOP KVIRT_TOP
78#endif
79
80/*
59 * Just any arbitrary offset to the start of the vmalloc VM area: the 81 * Just any arbitrary offset to the start of the vmalloc VM area: the
60 * current 64MB value just means that there will be a 64MB "hole" after the 82 * current 16MB value just means that there will be a 64MB "hole" after the
61 * physical memory until the kernel virtual memory starts. That means that 83 * physical memory until the kernel virtual memory starts. That means that
62 * any out-of-bounds memory accesses will hopefully be caught. 84 * any out-of-bounds memory accesses will hopefully be caught.
63 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 85 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 53c7788cba78..6b02793dc75b 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
32{ 32{
33 void *ret; 33 void *ret;
34#ifdef CONFIG_NOT_COHERENT_CACHE 34#ifdef CONFIG_NOT_COHERENT_CACHE
35 ret = __dma_alloc_coherent(size, dma_handle, flag); 35 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
36 if (ret == NULL) 36 if (ret == NULL)
37 return NULL; 37 return NULL;
38 *dma_handle += get_dma_direct_offset(dev); 38 *dma_handle += get_dma_direct_offset(dev);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 8db35278a4b4..29b742b90f1f 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
18 memcpy_64.o usercopy_64.o mem_64.o string.o 18 memcpy_64.o usercopy_64.o mem_64.o string.o
19obj-$(CONFIG_XMON) += sstep.o 19obj-$(CONFIG_XMON) += sstep.o
20obj-$(CONFIG_KPROBES) += sstep.o 20obj-$(CONFIG_KPROBES) += sstep.o
21obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
22 21
23ifeq ($(CONFIG_PPC64),y) 22ifeq ($(CONFIG_PPC64),y)
24obj-$(CONFIG_SMP) += locks.o 23obj-$(CONFIG_SMP) += locks.o
diff --git a/arch/powerpc/lib/dma-noncoherent.c b/arch/powerpc/lib/dma-noncoherent.c
deleted file mode 100644
index 005a28d380af..000000000000
--- a/arch/powerpc/lib/dma-noncoherent.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32#include <linux/vmalloc.h>
33
34#include <asm/tlbflush.h>
35
36/*
37 * Allocate DMA-coherent memory space and return both the kernel remapped
38 * virtual and bus address for that space.
39 */
40void *
41__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
42{
43 struct page *page;
44 unsigned long order;
45 int i;
46 unsigned int nr_pages = PAGE_ALIGN(size)>>PAGE_SHIFT;
47 unsigned int array_size = nr_pages * sizeof(struct page *);
48 struct page **pages;
49 struct page *end;
50 u64 mask = 0x00ffffff, limit; /* ISA default */
51 struct vm_struct *area;
52
53 BUG_ON(!mem_init_done);
54 size = PAGE_ALIGN(size);
55 limit = (mask + 1) & ~mask;
56 if (limit && size >= limit) {
57 printk(KERN_WARNING "coherent allocation too big (requested "
58 "%#x mask %#Lx)\n", size, mask);
59 return NULL;
60 }
61
62 order = get_order(size);
63
64 if (mask != 0xffffffff)
65 gfp |= GFP_DMA;
66
67 page = alloc_pages(gfp, order);
68 if (!page)
69 goto no_page;
70
71 end = page + (1 << order);
72
73 /*
74 * Invalidate any data that might be lurking in the
75 * kernel direct-mapped region for device DMA.
76 */
77 {
78 unsigned long kaddr = (unsigned long)page_address(page);
79 memset(page_address(page), 0, size);
80 flush_dcache_range(kaddr, kaddr + size);
81 }
82
83 split_page(page, order);
84
85 /*
86 * Set the "dma handle"
87 */
88 *handle = page_to_phys(page);
89
90 area = get_vm_area_caller(size, VM_IOREMAP,
91 __builtin_return_address(1));
92 if (!area)
93 goto out_free_pages;
94
95 if (array_size > PAGE_SIZE) {
96 pages = vmalloc(array_size);
97 area->flags |= VM_VPAGES;
98 } else {
99 pages = kmalloc(array_size, GFP_KERNEL);
100 }
101 if (!pages)
102 goto out_free_area;
103
104 area->pages = pages;
105 area->nr_pages = nr_pages;
106
107 for (i = 0; i < nr_pages; i++)
108 pages[i] = page + i;
109
110 if (map_vm_area(area, pgprot_noncached(PAGE_KERNEL), &pages))
111 goto out_unmap;
112
113 /*
114 * Free the otherwise unused pages.
115 */
116 page += nr_pages;
117 while (page < end) {
118 __free_page(page);
119 page++;
120 }
121
122 return area->addr;
123out_unmap:
124 vunmap(area->addr);
125 if (array_size > PAGE_SIZE)
126 vfree(pages);
127 else
128 kfree(pages);
129 goto out_free_pages;
130out_free_area:
131 free_vm_area(area);
132out_free_pages:
133 if (page)
134 __free_pages(page, order);
135no_page:
136 return NULL;
137}
138EXPORT_SYMBOL(__dma_alloc_coherent);
139
140/*
141 * free a page as defined by the above mapping.
142 */
143void __dma_free_coherent(size_t size, void *vaddr)
144{
145 vfree(vaddr);
146
147}
148EXPORT_SYMBOL(__dma_free_coherent);
149
150/*
151 * make an area consistent.
152 */
153void __dma_sync(void *vaddr, size_t size, int direction)
154{
155 unsigned long start = (unsigned long)vaddr;
156 unsigned long end = start + size;
157
158 switch (direction) {
159 case DMA_NONE:
160 BUG();
161 case DMA_FROM_DEVICE:
162 /*
163 * invalidate only when cache-line aligned otherwise there is
164 * the potential for discarding uncommitted data from the cache
165 */
166 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
167 flush_dcache_range(start, end);
168 else
169 invalidate_dcache_range(start, end);
170 break;
171 case DMA_TO_DEVICE: /* writeback only */
172 clean_dcache_range(start, end);
173 break;
174 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
175 flush_dcache_range(start, end);
176 break;
177 }
178}
179EXPORT_SYMBOL(__dma_sync);
180
181#ifdef CONFIG_HIGHMEM
182/*
183 * __dma_sync_page() implementation for systems using highmem.
184 * In this case, each page of a buffer must be kmapped/kunmapped
185 * in order to have a virtual address for __dma_sync(). This must
186 * not sleep so kmap_atomic()/kunmap_atomic() are used.
187 *
188 * Note: yes, it is possible and correct to have a buffer extend
189 * beyond the first page.
190 */
191static inline void __dma_sync_page_highmem(struct page *page,
192 unsigned long offset, size_t size, int direction)
193{
194 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
195 size_t cur_size = seg_size;
196 unsigned long flags, start, seg_offset = offset;
197 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
198 int seg_nr = 0;
199
200 local_irq_save(flags);
201
202 do {
203 start = (unsigned long)kmap_atomic(page + seg_nr,
204 KM_PPC_SYNC_PAGE) + seg_offset;
205
206 /* Sync this buffer segment */
207 __dma_sync((void *)start, seg_size, direction);
208 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
209 seg_nr++;
210
211 /* Calculate next buffer segment size */
212 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
213
214 /* Add the segment size to our running total */
215 cur_size += seg_size;
216 seg_offset = 0;
217 } while (seg_nr < nr_segs);
218
219 local_irq_restore(flags);
220}
221#endif /* CONFIG_HIGHMEM */
222
223/*
224 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
225 * takes a struct page instead of a virtual address
226 */
227void __dma_sync_page(struct page *page, unsigned long offset,
228 size_t size, int direction)
229{
230#ifdef CONFIG_HIGHMEM
231 __dma_sync_page_highmem(page, offset, size, direction);
232#else
233 unsigned long start = (unsigned long)page_address(page) + offset;
234 __dma_sync((void *)start, size, direction);
235#endif
236}
237EXPORT_SYMBOL(__dma_sync_page);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 17290bcedc5e..b746f4ca4209 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
26obj-$(CONFIG_PPC_MM_SLICES) += slice.o 26obj-$(CONFIG_PPC_MM_SLICES) += slice.o
27obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 27obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
28obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o 28obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage-prot.o
29obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
new file mode 100644
index 000000000000..36692f5c9a76
--- /dev/null
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -0,0 +1,400 @@
1/*
2 * PowerPC version derived from arch/arm/mm/consistent.c
3 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4 *
5 * Copyright (C) 2000 Russell King
6 *
7 * Consistent memory allocators. Used for DMA devices that want to
8 * share uncached memory with the processor core. The function return
9 * is the virtual address and 'dma_handle' is the physical address.
10 * Mostly stolen from the ARM port, with some changes for PowerPC.
11 * -- Dan
12 *
13 * Reorganized to get rid of the arch-specific consistent_* functions
14 * and provide non-coherent implementations for the DMA API. -Matt
15 *
16 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17 * implementation. This is pulled straight from ARM and barely
18 * modified. -Matt
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License version 2 as
22 * published by the Free Software Foundation.
23 */
24
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/highmem.h>
31#include <linux/dma-mapping.h>
32
33#include <asm/tlbflush.h>
34
35#include "mmu_decl.h"
36
37/*
38 * This address range defaults to a value that is safe for all
39 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
40 * can be further configured for specific applications under
41 * the "Advanced Setup" menu. -Matt
42 */
43#define CONSISTENT_BASE (IOREMAP_TOP)
44#define CONSISTENT_END (CONSISTENT_BASE + CONFIG_CONSISTENT_SIZE)
45#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
46
47/*
48 * This is the page table (2MB) covering uncached, DMA consistent allocations
49 */
50static DEFINE_SPINLOCK(consistent_lock);
51
52/*
53 * VM region handling support.
54 *
55 * This should become something generic, handling VM region allocations for
56 * vmalloc and similar (ioremap, module space, etc).
57 *
58 * I envisage vmalloc()'s supporting vm_struct becoming:
59 *
60 * struct vm_struct {
61 * struct vm_region region;
62 * unsigned long flags;
63 * struct page **pages;
64 * unsigned int nr_pages;
65 * unsigned long phys_addr;
66 * };
67 *
68 * get_vm_area() would then call vm_region_alloc with an appropriate
69 * struct vm_region head (eg):
70 *
71 * struct vm_region vmalloc_head = {
72 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
73 * .vm_start = VMALLOC_START,
74 * .vm_end = VMALLOC_END,
75 * };
76 *
77 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
78 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
79 * would have to initialise this each time prior to calling vm_region_alloc().
80 */
81struct ppc_vm_region {
82 struct list_head vm_list;
83 unsigned long vm_start;
84 unsigned long vm_end;
85};
86
87static struct ppc_vm_region consistent_head = {
88 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
89 .vm_start = CONSISTENT_BASE,
90 .vm_end = CONSISTENT_END,
91};
92
93static struct ppc_vm_region *
94ppc_vm_region_alloc(struct ppc_vm_region *head, size_t size, gfp_t gfp)
95{
96 unsigned long addr = head->vm_start, end = head->vm_end - size;
97 unsigned long flags;
98 struct ppc_vm_region *c, *new;
99
100 new = kmalloc(sizeof(struct ppc_vm_region), gfp);
101 if (!new)
102 goto out;
103
104 spin_lock_irqsave(&consistent_lock, flags);
105
106 list_for_each_entry(c, &head->vm_list, vm_list) {
107 if ((addr + size) < addr)
108 goto nospc;
109 if ((addr + size) <= c->vm_start)
110 goto found;
111 addr = c->vm_end;
112 if (addr > end)
113 goto nospc;
114 }
115
116 found:
117 /*
118 * Insert this entry _before_ the one we found.
119 */
120 list_add_tail(&new->vm_list, &c->vm_list);
121 new->vm_start = addr;
122 new->vm_end = addr + size;
123
124 spin_unlock_irqrestore(&consistent_lock, flags);
125 return new;
126
127 nospc:
128 spin_unlock_irqrestore(&consistent_lock, flags);
129 kfree(new);
130 out:
131 return NULL;
132}
133
134static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsigned long addr)
135{
136 struct ppc_vm_region *c;
137
138 list_for_each_entry(c, &head->vm_list, vm_list) {
139 if (c->vm_start == addr)
140 goto out;
141 }
142 c = NULL;
143 out:
144 return c;
145}
146
147/*
148 * Allocate DMA-coherent memory space and return both the kernel remapped
149 * virtual and bus address for that space.
150 */
151void *
152__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
153{
154 struct page *page;
155 struct ppc_vm_region *c;
156 unsigned long order;
157 u64 mask = ISA_DMA_THRESHOLD, limit;
158
159 if (dev) {
160 mask = dev->coherent_dma_mask;
161
162 /*
163 * Sanity check the DMA mask - it must be non-zero, and
164 * must be able to be satisfied by a DMA allocation.
165 */
166 if (mask == 0) {
167 dev_warn(dev, "coherent DMA mask is unset\n");
168 goto no_page;
169 }
170
171 if ((~mask) & ISA_DMA_THRESHOLD) {
172 dev_warn(dev, "coherent DMA mask %#llx is smaller "
173 "than system GFP_DMA mask %#llx\n",
174 mask, (unsigned long long)ISA_DMA_THRESHOLD);
175 goto no_page;
176 }
177 }
178
179
180 size = PAGE_ALIGN(size);
181 limit = (mask + 1) & ~mask;
182 if ((limit && size >= limit) ||
183 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
184 printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
185 size, mask);
186 return NULL;
187 }
188
189 order = get_order(size);
190
191 /* Might be useful if we ever have a real legacy DMA zone... */
192 if (mask != 0xffffffff)
193 gfp |= GFP_DMA;
194
195 page = alloc_pages(gfp, order);
196 if (!page)
197 goto no_page;
198
199 /*
200 * Invalidate any data that might be lurking in the
201 * kernel direct-mapped region for device DMA.
202 */
203 {
204 unsigned long kaddr = (unsigned long)page_address(page);
205 memset(page_address(page), 0, size);
206 flush_dcache_range(kaddr, kaddr + size);
207 }
208
209 /*
210 * Allocate a virtual address in the consistent mapping region.
211 */
212 c = ppc_vm_region_alloc(&consistent_head, size,
213 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
214 if (c) {
215 unsigned long vaddr = c->vm_start;
216 struct page *end = page + (1 << order);
217
218 split_page(page, order);
219
220 /*
221 * Set the "dma handle"
222 */
223 *handle = page_to_phys(page);
224
225 do {
226 SetPageReserved(page);
227 map_page(vaddr, page_to_phys(page),
228 pgprot_noncached(PAGE_KERNEL));
229 page++;
230 vaddr += PAGE_SIZE;
231 } while (size -= PAGE_SIZE);
232
233 /*
234 * Free the otherwise unused pages.
235 */
236 while (page < end) {
237 __free_page(page);
238 page++;
239 }
240
241 return (void *)c->vm_start;
242 }
243
244 if (page)
245 __free_pages(page, order);
246 no_page:
247 return NULL;
248}
249EXPORT_SYMBOL(__dma_alloc_coherent);
250
251/*
252 * free a page as defined by the above mapping.
253 */
254void __dma_free_coherent(size_t size, void *vaddr)
255{
256 struct ppc_vm_region *c;
257 unsigned long flags, addr;
258
259 size = PAGE_ALIGN(size);
260
261 spin_lock_irqsave(&consistent_lock, flags);
262
263 c = ppc_vm_region_find(&consistent_head, (unsigned long)vaddr);
264 if (!c)
265 goto no_area;
266
267 if ((c->vm_end - c->vm_start) != size) {
268 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
269 __func__, c->vm_end - c->vm_start, size);
270 dump_stack();
271 size = c->vm_end - c->vm_start;
272 }
273
274 addr = c->vm_start;
275 do {
276 pte_t *ptep;
277 unsigned long pfn;
278
279 ptep = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(addr),
280 addr),
281 addr),
282 addr);
283 if (!pte_none(*ptep) && pte_present(*ptep)) {
284 pfn = pte_pfn(*ptep);
285 pte_clear(&init_mm, addr, ptep);
286 if (pfn_valid(pfn)) {
287 struct page *page = pfn_to_page(pfn);
288
289 ClearPageReserved(page);
290 __free_page(page);
291 }
292 }
293 addr += PAGE_SIZE;
294 } while (size -= PAGE_SIZE);
295
296 flush_tlb_kernel_range(c->vm_start, c->vm_end);
297
298 list_del(&c->vm_list);
299
300 spin_unlock_irqrestore(&consistent_lock, flags);
301
302 kfree(c);
303 return;
304
305 no_area:
306 spin_unlock_irqrestore(&consistent_lock, flags);
307 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
308 __func__, vaddr);
309 dump_stack();
310}
311EXPORT_SYMBOL(__dma_free_coherent);
312
313/*
314 * make an area consistent.
315 */
316void __dma_sync(void *vaddr, size_t size, int direction)
317{
318 unsigned long start = (unsigned long)vaddr;
319 unsigned long end = start + size;
320
321 switch (direction) {
322 case DMA_NONE:
323 BUG();
324 case DMA_FROM_DEVICE:
325 /*
326 * invalidate only when cache-line aligned otherwise there is
327 * the potential for discarding uncommitted data from the cache
328 */
329 if ((start & (L1_CACHE_BYTES - 1)) || (size & (L1_CACHE_BYTES - 1)))
330 flush_dcache_range(start, end);
331 else
332 invalidate_dcache_range(start, end);
333 break;
334 case DMA_TO_DEVICE: /* writeback only */
335 clean_dcache_range(start, end);
336 break;
337 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
338 flush_dcache_range(start, end);
339 break;
340 }
341}
342EXPORT_SYMBOL(__dma_sync);
343
344#ifdef CONFIG_HIGHMEM
345/*
346 * __dma_sync_page() implementation for systems using highmem.
347 * In this case, each page of a buffer must be kmapped/kunmapped
348 * in order to have a virtual address for __dma_sync(). This must
349 * not sleep so kmap_atomic()/kunmap_atomic() are used.
350 *
351 * Note: yes, it is possible and correct to have a buffer extend
352 * beyond the first page.
353 */
354static inline void __dma_sync_page_highmem(struct page *page,
355 unsigned long offset, size_t size, int direction)
356{
357 size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
358 size_t cur_size = seg_size;
359 unsigned long flags, start, seg_offset = offset;
360 int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
361 int seg_nr = 0;
362
363 local_irq_save(flags);
364
365 do {
366 start = (unsigned long)kmap_atomic(page + seg_nr,
367 KM_PPC_SYNC_PAGE) + seg_offset;
368
369 /* Sync this buffer segment */
370 __dma_sync((void *)start, seg_size, direction);
371 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
372 seg_nr++;
373
374 /* Calculate next buffer segment size */
375 seg_size = min((size_t)PAGE_SIZE, size - cur_size);
376
377 /* Add the segment size to our running total */
378 cur_size += seg_size;
379 seg_offset = 0;
380 } while (seg_nr < nr_segs);
381
382 local_irq_restore(flags);
383}
384#endif /* CONFIG_HIGHMEM */
385
386/*
387 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
388 * takes a struct page instead of a virtual address
389 */
390void __dma_sync_page(struct page *page, unsigned long offset,
391 size_t size, int direction)
392{
393#ifdef CONFIG_HIGHMEM
394 __dma_sync_page_highmem(page, offset, size, direction);
395#else
396 unsigned long start = (unsigned long)page_address(page) + offset;
397 __dma_sync((void *)start, size, direction);
398#endif
399}
400EXPORT_SYMBOL(__dma_sync_page);
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 666a5e8a5be1..3de6a0d93824 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -168,12 +168,8 @@ void __init MMU_init(void)
168 ppc_md.progress("MMU:mapin", 0x301); 168 ppc_md.progress("MMU:mapin", 0x301);
169 mapin_ram(); 169 mapin_ram();
170 170
171#ifdef CONFIG_HIGHMEM 171 /* Initialize early top-down ioremap allocator */
172 ioremap_base = PKMAP_BASE; 172 ioremap_bot = IOREMAP_TOP;
173#else
174 ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */
175#endif /* CONFIG_HIGHMEM */
176 ioremap_bot = ioremap_base;
177 173
178 /* Map in I/O resources */ 174 /* Map in I/O resources */
179 if (ppc_md.progress) 175 if (ppc_md.progress)
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d0602a76bf7f..579382c163a9 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -380,6 +380,23 @@ void __init mem_init(void)
380 bsssize >> 10, 380 bsssize >> 10,
381 initsize >> 10); 381 initsize >> 10);
382 382
383#ifdef CONFIG_PPC32
384 pr_info("Kernel virtual memory layout:\n");
385 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
386#ifdef CONFIG_HIGHMEM
387 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
388 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
389#endif /* CONFIG_HIGHMEM */
390#ifdef CONFIG_NOT_COHERENT_CACHE
391 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
392 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
393#endif /* CONFIG_NOT_COHERENT_CACHE */
394 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
395 ioremap_bot, IOREMAP_TOP);
396 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
397 VMALLOC_START, VMALLOC_END);
398#endif /* CONFIG_PPC32 */
399
383 mem_init_done = 1; 400 mem_init_done = 1;
384} 401}
385 402
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index a70e311bd457..030d0005b4d2 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -127,12 +127,12 @@ static unsigned int steal_context_up(unsigned int id)
127 127
128 pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm); 128 pr_debug("[%d] steal context %d from mm @%p\n", cpu, id, mm);
129 129
130 /* Mark this mm has having no context anymore */
131 mm->context.id = MMU_NO_CONTEXT;
132
133 /* Flush the TLB for that context */ 130 /* Flush the TLB for that context */
134 local_flush_tlb_mm(mm); 131 local_flush_tlb_mm(mm);
135 132
133 /* Mark this mm has having no context anymore */
134 mm->context.id = MMU_NO_CONTEXT;
135
136 /* XXX This clear should ultimately be part of local_flush_tlb_mm */ 136 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
137 __clear_bit(id, stale_map[cpu]); 137 __clear_bit(id, stale_map[cpu]);
138 138
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 430d0908fa50..5422169626ba 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -399,8 +399,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
399#endif /* CONFIG_DEBUG_PAGEALLOC */ 399#endif /* CONFIG_DEBUG_PAGEALLOC */
400 400
401static int fixmaps; 401static int fixmaps;
402unsigned long FIXADDR_TOP = (-PAGE_SIZE);
403EXPORT_SYMBOL(FIXADDR_TOP);
404 402
405void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) 403void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
406{ 404{
diff --git a/arch/sh/include/asm/flat.h b/arch/sh/include/asm/flat.h
index d3b2b4f109e3..5d84df5e27f6 100644
--- a/arch/sh/include/asm/flat.h
+++ b/arch/sh/include/asm/flat.h
@@ -12,7 +12,6 @@
12#ifndef __ASM_SH_FLAT_H 12#ifndef __ASM_SH_FLAT_H
13#define __ASM_SH_FLAT_H 13#define __ASM_SH_FLAT_H
14 14
15#define flat_stack_align(sp) /* nothing needed */
16#define flat_argvp_envp_on_stack() 0 15#define flat_argvp_envp_on_stack() 0
17#define flat_old_ram_flag(flags) (flags) 16#define flat_old_ram_flag(flags) (flags)
18#define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 17#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
index 857e492c571e..bbeb0c3fbd90 100644
--- a/arch/x86/boot/compressed/relocs.c
+++ b/arch/x86/boot/compressed/relocs.c
@@ -504,8 +504,11 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
504 if (sym->st_shndx == SHN_ABS) { 504 if (sym->st_shndx == SHN_ABS) {
505 continue; 505 continue;
506 } 506 }
507 if (r_type == R_386_PC32) { 507 if (r_type == R_386_NONE || r_type == R_386_PC32) {
508 /* PC relative relocations don't need to be adjusted */ 508 /*
509 * NONE can be ignored and and PC relative
510 * relocations don't need to be adjusted.
511 */
509 } 512 }
510 else if (r_type == R_386_32) { 513 else if (r_type == R_386_32) {
511 /* Visit relocations that need to be adjusted */ 514 /* Visit relocations that need to be adjusted */
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 208ecf6643df..54b6de2cd947 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -693,8 +693,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && 693 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
694 policy->cpuinfo.transition_latency > 20 * 1000) { 694 policy->cpuinfo.transition_latency > 20 * 1000) {
695 policy->cpuinfo.transition_latency = 20 * 1000; 695 policy->cpuinfo.transition_latency = 20 * 1000;
696 printk_once(KERN_INFO "Capping off P-state tranision" 696 printk_once(KERN_INFO
697 " latency at 20 uS\n"); 697 "P-state transition latency capped at 20 uS\n");
698 } 698 }
699 699
700 /* table init */ 700 /* table init */
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 6ac55bd341ae..869615193720 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -168,6 +168,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
168 case 0x0E: /* Core */ 168 case 0x0E: /* Core */
169 case 0x0F: /* Core Duo */ 169 case 0x0F: /* Core Duo */
170 case 0x16: /* Celeron Core */ 170 case 0x16: /* Celeron Core */
171 case 0x1C: /* Atom */
171 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; 172 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
172 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); 173 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
173 case 0x0D: /* Pentium M (Dothan) */ 174 case 0x0D: /* Pentium M (Dothan) */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index 3c28ccd49742..a8363e5be4ef 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -168,10 +168,12 @@ static int check_powernow(void)
168 return 1; 168 return 1;
169} 169}
170 170
171#ifdef CONFIG_X86_POWERNOW_K7_ACPI
171static void invalidate_entry(unsigned int entry) 172static void invalidate_entry(unsigned int entry)
172{ 173{
173 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; 174 powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID;
174} 175}
176#endif
175 177
176static int get_ranges(unsigned char *pst) 178static int get_ranges(unsigned char *pst)
177{ 179{
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 4709ead2db52..f6b32d112357 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -649,6 +649,20 @@ static void print_basics(struct powernow_k8_data *data)
649 data->batps); 649 data->batps);
650} 650}
651 651
652static u32 freq_from_fid_did(u32 fid, u32 did)
653{
654 u32 mhz = 0;
655
656 if (boot_cpu_data.x86 == 0x10)
657 mhz = (100 * (fid + 0x10)) >> did;
658 else if (boot_cpu_data.x86 == 0x11)
659 mhz = (100 * (fid + 8)) >> did;
660 else
661 BUG();
662
663 return mhz * 1000;
664}
665
652static int fill_powernow_table(struct powernow_k8_data *data, 666static int fill_powernow_table(struct powernow_k8_data *data,
653 struct pst_s *pst, u8 maxvid) 667 struct pst_s *pst, u8 maxvid)
654{ 668{
@@ -923,8 +937,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
923 937
924 powernow_table[i].index = index; 938 powernow_table[i].index = index;
925 939
926 powernow_table[i].frequency = 940 /* Frequency may be rounded for these */
927 data->acpi_data.states[i].core_frequency * 1000; 941 if (boot_cpu_data.x86 == 0x10 || boot_cpu_data.x86 == 0x11) {
942 powernow_table[i].frequency =
943 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
944 } else
945 powernow_table[i].frequency =
946 data->acpi_data.states[i].core_frequency * 1000;
928 } 947 }
929 return 0; 948 return 0;
930} 949}
@@ -1215,13 +1234,16 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1215 return cpufreq_frequency_table_verify(pol, data->powernow_table); 1234 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1216} 1235}
1217 1236
1237static const char ACPI_PSS_BIOS_BUG_MSG[] =
1238 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1239 KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
1240
1218/* per CPU init entry point to the driver */ 1241/* per CPU init entry point to the driver */
1219static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1242static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1220{ 1243{
1221 struct powernow_k8_data *data; 1244 struct powernow_k8_data *data;
1222 cpumask_t oldmask; 1245 cpumask_t oldmask;
1223 int rc; 1246 int rc;
1224 static int print_once;
1225 1247
1226 if (!cpu_online(pol->cpu)) 1248 if (!cpu_online(pol->cpu))
1227 return -ENODEV; 1249 return -ENODEV;
@@ -1244,19 +1266,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1244 * an UP version, and is deprecated by AMD. 1266 * an UP version, and is deprecated by AMD.
1245 */ 1267 */
1246 if (num_online_cpus() != 1) { 1268 if (num_online_cpus() != 1) {
1247 /* 1269 printk_once(ACPI_PSS_BIOS_BUG_MSG);
1248 * Replace this one with print_once as soon as such a
1249 * thing gets introduced
1250 */
1251 if (!print_once) {
1252 WARN_ONCE(1, KERN_ERR FW_BUG PFX "Your BIOS "
1253 "does not provide ACPI _PSS objects "
1254 "in a way that Linux understands. "
1255 "Please report this to the Linux ACPI"
1256 " maintainers and complain to your "
1257 "BIOS vendor.\n");
1258 print_once++;
1259 }
1260 goto err_out; 1270 goto err_out;
1261 } 1271 }
1262 if (pol->cpu != 0) { 1272 if (pol->cpu != 0) {
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 8f307d914c2e..f46c340727b8 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
26 unsigned long sbase = saddr & PUD_MASK; 26 unsigned long sbase = saddr & PUD_MASK;
27 unsigned long s_end = sbase + PUD_SIZE; 27 unsigned long s_end = sbase + PUD_SIZE;
28 28
29 /* Allow segments to share if only one is marked locked */
30 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
31 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
32
29 /* 33 /*
30 * match the virtual addresses, permission and the alignment of the 34 * match the virtual addresses, permission and the alignment of the
31 * page table page. 35 * page table page.
32 */ 36 */
33 if (pmd_index(addr) != pmd_index(saddr) || 37 if (pmd_index(addr) != pmd_index(saddr) ||
34 vma->vm_flags != svma->vm_flags || 38 vm_flags != svm_flags ||
35 sbase < svma->vm_start || svma->vm_end < s_end) 39 sbase < svma->vm_start || svma->vm_end < s_end)
36 return 0; 40 return 0;
37 41
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 0f9052bcec4b..e17efed088c5 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -204,30 +204,19 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
204 } 204 }
205} 205}
206 206
207static void wbinvd_local(void *unused)
208{
209 wbinvd();
210}
211
212static void cpa_flush_array(unsigned long *start, int numpages, int cache, 207static void cpa_flush_array(unsigned long *start, int numpages, int cache,
213 int in_flags, struct page **pages) 208 int in_flags, struct page **pages)
214{ 209{
215 unsigned int i, level; 210 unsigned int i, level;
211 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
216 212
217 BUG_ON(irqs_disabled()); 213 BUG_ON(irqs_disabled());
218 214
219 on_each_cpu(__cpa_flush_range, NULL, 1); 215 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
220 216
221 if (!cache) 217 if (!cache || do_wbinvd)
222 return; 218 return;
223 219
224 /* 4M threshold */
225 if (numpages >= 1024) {
226 if (boot_cpu_data.x86 >= 4)
227 on_each_cpu(wbinvd_local, NULL, 1);
228
229 return;
230 }
231 /* 220 /*
232 * We only need to flush on one CPU, 221 * We only need to flush on one CPU,
233 * clflush is a MESI-coherent instruction that 222 * clflush is a MESI-coherent instruction that