diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/mm/highmem.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/i386/mm/highmem.c')
-rw-r--r-- | arch/i386/mm/highmem.c | 89 |
1 files changed, 89 insertions, 0 deletions
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c new file mode 100644 index 000000000000..fc4c4cad4e98 --- /dev/null +++ b/arch/i386/mm/highmem.c | |||
@@ -0,0 +1,89 @@ | |||
1 | #include <linux/highmem.h> | ||
2 | |||
3 | void *kmap(struct page *page) | ||
4 | { | ||
5 | might_sleep(); | ||
6 | if (!PageHighMem(page)) | ||
7 | return page_address(page); | ||
8 | return kmap_high(page); | ||
9 | } | ||
10 | |||
11 | void kunmap(struct page *page) | ||
12 | { | ||
13 | if (in_interrupt()) | ||
14 | BUG(); | ||
15 | if (!PageHighMem(page)) | ||
16 | return; | ||
17 | kunmap_high(page); | ||
18 | } | ||
19 | |||
20 | /* | ||
21 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because | ||
22 | * no global lock is needed and because the kmap code must perform a global TLB | ||
23 | * invalidation when the kmap pool wraps. | ||
24 | * | ||
25 | * However when holding an atomic kmap is is not legal to sleep, so atomic | ||
26 | * kmaps are appropriate for short, tight code paths only. | ||
27 | */ | ||
28 | void *kmap_atomic(struct page *page, enum km_type type) | ||
29 | { | ||
30 | enum fixed_addresses idx; | ||
31 | unsigned long vaddr; | ||
32 | |||
33 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
34 | inc_preempt_count(); | ||
35 | if (!PageHighMem(page)) | ||
36 | return page_address(page); | ||
37 | |||
38 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
39 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
40 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
41 | if (!pte_none(*(kmap_pte-idx))) | ||
42 | BUG(); | ||
43 | #endif | ||
44 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); | ||
45 | __flush_tlb_one(vaddr); | ||
46 | |||
47 | return (void*) vaddr; | ||
48 | } | ||
49 | |||
50 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
51 | { | ||
52 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
53 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
54 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
55 | |||
56 | if (vaddr < FIXADDR_START) { // FIXME | ||
57 | dec_preempt_count(); | ||
58 | preempt_check_resched(); | ||
59 | return; | ||
60 | } | ||
61 | |||
62 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) | ||
63 | BUG(); | ||
64 | |||
65 | /* | ||
66 | * force other mappings to Oops if they'll try to access | ||
67 | * this pte without first remap it | ||
68 | */ | ||
69 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
70 | __flush_tlb_one(vaddr); | ||
71 | #endif | ||
72 | |||
73 | dec_preempt_count(); | ||
74 | preempt_check_resched(); | ||
75 | } | ||
76 | |||
77 | struct page *kmap_atomic_to_page(void *ptr) | ||
78 | { | ||
79 | unsigned long idx, vaddr = (unsigned long)ptr; | ||
80 | pte_t *pte; | ||
81 | |||
82 | if (vaddr < FIXADDR_START) | ||
83 | return virt_to_page(ptr); | ||
84 | |||
85 | idx = virt_to_fix(vaddr); | ||
86 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); | ||
87 | return pte_page(*pte); | ||
88 | } | ||
89 | |||