aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/highmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
commit31453a9764f7e2a72a6e2c502ace586e2663a68c (patch)
tree5d4db63de5b4b85d1ffdab4e95a75175a784a10a /arch/mips/mm/highmem.c
parentf9ba5375a8aae4aeea6be15df77e24707a429812 (diff)
parent93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff)
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits) scripts/checkpatch.pl: add check for declaration of pci_device_id scripts/checkpatch.pl: add warnings for static char that could be static const char checkpatch: version 0.31 checkpatch: statement/block context analyser should look at sanitised lines checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar checkpatch: clean up structure definition macro handline checkpatch: update copyright dates checkpatch: Add additional attribute #defines checkpatch: check for incorrect permissions checkpatch: ensure kconfig help checks only apply when we are adding help checkpatch: simplify and consolidate "missing space after" checks checkpatch: add check for space after struct, union, and enum checkpatch: returning errno typically should be negative checkpatch: handle casts better fixing false categorisation of : as binary checkpatch: ensure we do not collapse bracketed sections into constants checkpatch: suggest cleanpatch and cleanfile when appropriate checkpatch: types may sit on a line on their own checkpatch: fix regressions in "fix handling of leading spaces" div64_u64(): improve precision on 32bit platforms lib/parser: cleanup match_number() ...
Diffstat (limited to 'arch/mips/mm/highmem.c')
-rw-r--r--arch/mips/mm/highmem.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 6a2b1bf9ef11..1e69b1fb4b85 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,7 +9,7 @@ static pte_t *kmap_pte;
9 9
10unsigned long highstart_pfn, highend_pfn; 10unsigned long highstart_pfn, highend_pfn;
11 11
12void *__kmap(struct page *page) 12void *kmap(struct page *page)
13{ 13{
14 void *addr; 14 void *addr;
15 15
@@ -21,16 +21,16 @@ void *__kmap(struct page *page)
21 21
22 return addr; 22 return addr;
23} 23}
24EXPORT_SYMBOL(__kmap); 24EXPORT_SYMBOL(kmap);
25 25
26void __kunmap(struct page *page) 26void kunmap(struct page *page)
27{ 27{
28 BUG_ON(in_interrupt()); 28 BUG_ON(in_interrupt());
29 if (!PageHighMem(page)) 29 if (!PageHighMem(page))
30 return; 30 return;
31 kunmap_high(page); 31 kunmap_high(page);
32} 32}
33EXPORT_SYMBOL(__kunmap); 33EXPORT_SYMBOL(kunmap);
34 34
35/* 35/*
36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -41,17 +41,17 @@ EXPORT_SYMBOL(__kunmap);
41 * kmaps are appropriate for short, tight code paths only. 41 * kmaps are appropriate for short, tight code paths only.
42 */ 42 */
43 43
44void *__kmap_atomic(struct page *page, enum km_type type) 44void *__kmap_atomic(struct page *page)
45{ 45{
46 enum fixed_addresses idx;
47 unsigned long vaddr; 46 unsigned long vaddr;
47 int idx, type;
48 48
49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50 pagefault_disable(); 50 pagefault_disable();
51 if (!PageHighMem(page)) 51 if (!PageHighMem(page))
52 return page_address(page); 52 return page_address(page);
53 53
54 debug_kmap_atomic(type); 54 type = kmap_atomic_idx_push();
55 idx = type + KM_TYPE_NR*smp_processor_id(); 55 idx = type + KM_TYPE_NR*smp_processor_id();
56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 56 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
57#ifdef CONFIG_DEBUG_HIGHMEM 57#ifdef CONFIG_DEBUG_HIGHMEM
@@ -64,43 +64,47 @@ void *__kmap_atomic(struct page *page, enum km_type type)
64} 64}
65EXPORT_SYMBOL(__kmap_atomic); 65EXPORT_SYMBOL(__kmap_atomic);
66 66
67void __kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 67void __kunmap_atomic(void *kvaddr)
68{ 68{
69#ifdef CONFIG_DEBUG_HIGHMEM
70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 69 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 70 int type;
72 71
73 if (vaddr < FIXADDR_START) { // FIXME 72 if (vaddr < FIXADDR_START) { // FIXME
74 pagefault_enable(); 73 pagefault_enable();
75 return; 74 return;
76 } 75 }
77 76
78 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 77 type = kmap_atomic_idx_pop();
78#ifdef CONFIG_DEBUG_HIGHMEM
79 {
80 int idx = type + KM_TYPE_NR * smp_processor_id();
79 81
80 /* 82 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
81 * force other mappings to Oops if they'll try to access
82 * this pte without first remap it
83 */
84 pte_clear(&init_mm, vaddr, kmap_pte-idx);
85 local_flush_tlb_one(vaddr);
86#endif
87 83
84 /*
85 * force other mappings to Oops if they'll try to access
86 * this pte without first remap it
87 */
88 pte_clear(&init_mm, vaddr, kmap_pte-idx);
89 local_flush_tlb_one(vaddr);
90 }
91#endif
88 pagefault_enable(); 92 pagefault_enable();
89} 93}
90EXPORT_SYMBOL(__kunmap_atomic_notypecheck); 94EXPORT_SYMBOL(__kunmap_atomic);
91 95
92/* 96/*
93 * This is the same as kmap_atomic() but can map memory that doesn't 97 * This is the same as kmap_atomic() but can map memory that doesn't
94 * have a struct page associated with it. 98 * have a struct page associated with it.
95 */ 99 */
96void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) 100void *kmap_atomic_pfn(unsigned long pfn)
97{ 101{
98 enum fixed_addresses idx;
99 unsigned long vaddr; 102 unsigned long vaddr;
103 int idx, type;
100 104
101 pagefault_disable(); 105 pagefault_disable();
102 106
103 debug_kmap_atomic(type); 107 type = kmap_atomic_idx_push();
104 idx = type + KM_TYPE_NR*smp_processor_id(); 108 idx = type + KM_TYPE_NR*smp_processor_id();
105 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
106 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); 110 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
@@ -109,7 +113,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
109 return (void*) vaddr; 113 return (void*) vaddr;
110} 114}
111 115
112struct page *__kmap_atomic_to_page(void *ptr) 116struct page *kmap_atomic_to_page(void *ptr)
113{ 117{
114 unsigned long idx, vaddr = (unsigned long)ptr; 118 unsigned long idx, vaddr = (unsigned long)ptr;
115 pte_t *pte; 119 pte_t *pte;