aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/iomap_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/iomap_32.c')
-rw-r--r--arch/x86/mm/iomap_32.c59
1 files changed, 59 insertions, 0 deletions
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
new file mode 100644
index 000000000000..d0151d8ce452
--- /dev/null
+++ b/arch/x86/mm/iomap_32.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright © 2008 Ingo Molnar
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#include <asm/iomap.h>
20#include <linux/module.h>
21
22/* Map 'pfn' using fixed map 'type' and protections 'prot'
23 */
24void *
25iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
26{
27 enum fixed_addresses idx;
28 unsigned long vaddr;
29
30 pagefault_disable();
31
32 idx = type + KM_TYPE_NR*smp_processor_id();
33 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
35 arch_flush_lazy_mmu_mode();
36
37 return (void*) vaddr;
38}
39EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
40
41void
42iounmap_atomic(void *kvaddr, enum km_type type)
43{
44 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
45 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
46
47 /*
48 * Force other mappings to Oops if they'll try to access this pte
49 * without first remap it. Keeping stale mappings around is a bad idea
50 * also, in case the page changes cacheability attributes or becomes
51 * a protected page in a hypervisor.
52 */
53 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
54 kpte_clear_flush(kmap_pte-idx, vaddr);
55
56 arch_flush_lazy_mmu_mode();
57 pagefault_enable();
58}
59EXPORT_SYMBOL_GPL(iounmap_atomic);