aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:32:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-25 15:32:10 -0400
commit4b7227ca321ccf447cdc04538687c895db8b77f5 (patch)
tree72712127fc56aa2579e8a1508998bcabf6bd6c60 /include/asm-x86
parent5dae61b80564a5583ff4b56e357bdbc733fddb76 (diff)
parent1775826ceec51187aa868406585799b7e76ffa7d (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-xen-next
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-xen-next: (52 commits) xen: add balloon driver xen: allow compilation with non-flat memory xen: fold xen_sysexit into xen_iret xen: allow set_pte_at on init_mm to be lockless xen: disable preemption during tlb flush xen pvfb: Para-virtual framebuffer, keyboard and pointer driver xen: Add compatibility aliases for frontend drivers xen: Module autoprobing support for frontend drivers xen blkfront: Delay wait for block devices until after the disk is added xen/blkfront: use bdget_disk xen: Make xen-blkfront write its protocol ABI to xenstore xen: import arch generic part of xencomm xen: make grant table arch portable xen: replace callers of alloc_vm_area()/free_vm_area() with xen_ prefixed one xen: make include/xen/page.h portable moving those definitions under asm dir xen: add resend_irq_on_evtchn() definition into events.c Xen: make events.c portable for ia64/xen support xen: move events.c to drivers/xen for IA64/Xen support xen: move features.c from arch/x86/xen/features.c to drivers/xen xen: add missing definitions in include/xen/interface/vcpu.h which ia64/xen needs ...
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/paravirt.h43
-rw-r--r--include/asm-x86/pgalloc.h111
-rw-r--r--include/asm-x86/pgalloc_32.h95
-rw-r--r--include/asm-x86/pgalloc_64.h133
-rw-r--r--include/asm-x86/pgtable.h54
-rw-r--r--include/asm-x86/pgtable_32.h18
-rw-r--r--include/asm-x86/pgtable_64.h2
-rw-r--r--include/asm-x86/xen/events.h22
-rw-r--r--include/asm-x86/xen/grant_table.h7
-rw-r--r--include/asm-x86/xen/hypercall.h6
-rw-r--r--include/asm-x86/xen/interface.h28
-rw-r--r--include/asm-x86/xen/page.h168
12 files changed, 393 insertions, 294 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 3d419398499b..0f13b945e240 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -220,11 +220,13 @@ struct pv_mmu_ops {
220 unsigned long va); 220 unsigned long va);
221 221
222 /* Hooks for allocating/releasing pagetable pages */ 222 /* Hooks for allocating/releasing pagetable pages */
223 void (*alloc_pt)(struct mm_struct *mm, u32 pfn); 223 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
224 void (*alloc_pd)(struct mm_struct *mm, u32 pfn); 224 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
225 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 225 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
226 void (*release_pt)(u32 pfn); 226 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
227 void (*release_pd)(u32 pfn); 227 void (*release_pte)(u32 pfn);
228 void (*release_pmd)(u32 pfn);
229 void (*release_pud)(u32 pfn);
228 230
229 /* Pagetable manipulation functions */ 231 /* Pagetable manipulation functions */
230 void (*set_pte)(pte_t *ptep, pte_t pteval); 232 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -910,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
910 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 912 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
911} 913}
912 914
913static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) 915static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
914{ 916{
915 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn); 917 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
916} 918}
917static inline void paravirt_release_pt(unsigned pfn) 919static inline void paravirt_release_pte(unsigned pfn)
918{ 920{
919 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); 921 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
920} 922}
921 923
922static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) 924static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
923{ 925{
924 PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); 926 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
925} 927}
926 928
927static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, 929static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
928 unsigned start, unsigned count) 930 unsigned start, unsigned count)
929{ 931{
930 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count); 932 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
931} 933}
932static inline void paravirt_release_pd(unsigned pfn) 934static inline void paravirt_release_pmd(unsigned pfn)
933{ 935{
934 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn); 936 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
937}
938
939static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
940{
941 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
942}
943static inline void paravirt_release_pud(unsigned pfn)
944{
945 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
935} 946}
936 947
937#ifdef CONFIG_HIGHPTE 948#ifdef CONFIG_HIGHPTE
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 5886eed05886..91e4641f3f31 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,5 +1,110 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_PGALLOC_H
2# include "pgalloc_32.h" 2#define _ASM_X86_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
3#else 10#else
4# include "pgalloc_64.h" 11static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
12static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
13static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
14 unsigned long start, unsigned long count) {}
15static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
16static inline void paravirt_release_pte(unsigned long pfn) {}
17static inline void paravirt_release_pmd(unsigned long pfn) {}
18static inline void paravirt_release_pud(unsigned long pfn) {}
5#endif 19#endif
20
21/*
22 * Allocate and free page tables.
23 */
24extern pgd_t *pgd_alloc(struct mm_struct *);
25extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
26
27extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
28extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
29
30/* Should really implement gc for free page table pages. This could be
31 done with a reference count in struct page. */
32
33static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
34{
35 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
36 free_page((unsigned long)pte);
37}
38
39static inline void pte_free(struct mm_struct *mm, struct page *pte)
40{
41 __free_page(pte);
42}
43
44extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
45
46static inline void pmd_populate_kernel(struct mm_struct *mm,
47 pmd_t *pmd, pte_t *pte)
48{
49 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
50 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
51}
52
53static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
54 struct page *pte)
55{
56 unsigned long pfn = page_to_pfn(pte);
57
58 paravirt_alloc_pte(mm, pfn);
59 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
60}
61
62#define pmd_pgtable(pmd) pmd_page(pmd)
63
64#if PAGETABLE_LEVELS > 2
65static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
66{
67 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
68}
69
70static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
71{
72 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
73 free_page((unsigned long)pmd);
74}
75
76extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
77
78#ifdef CONFIG_X86_PAE
79extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
80#else /* !CONFIG_X86_PAE */
81static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
82{
83 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
84 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
85}
86#endif /* CONFIG_X86_PAE */
87
88#if PAGETABLE_LEVELS > 3
89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
90{
91 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
92 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
93}
94
95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
96{
97 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
98}
99
100static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101{
102 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
103 free_page((unsigned long)pud);
104}
105
106extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
107#endif /* PAGETABLE_LEVELS > 3 */
108#endif /* PAGETABLE_LEVELS > 2 */
109
110#endif /* _ASM_X86_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
deleted file mode 100644
index 6bea6e5b5ee5..000000000000
--- a/include/asm-x86/pgalloc_32.h
+++ /dev/null
@@ -1,95 +0,0 @@
1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7#include <asm/tlb.h>
8#include <asm-generic/tlb.h>
9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
13#define paravirt_alloc_pt(mm, pfn) do { } while (0)
14#define paravirt_alloc_pd(mm, pfn) do { } while (0)
15#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
16#define paravirt_release_pt(pfn) do { } while (0)
17#define paravirt_release_pd(pfn) do { } while (0)
18#endif
19
20static inline void pmd_populate_kernel(struct mm_struct *mm,
21 pmd_t *pmd, pte_t *pte)
22{
23 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
24 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
25}
26
27static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
28{
29 unsigned long pfn = page_to_pfn(pte);
30
31 paravirt_alloc_pt(mm, pfn);
32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
33}
34#define pmd_pgtable(pmd) pmd_page(pmd)
35
36/*
37 * Allocate and free page tables.
38 */
39extern pgd_t *pgd_alloc(struct mm_struct *);
40extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
41
42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
43extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
44
45static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
46{
47 free_page((unsigned long)pte);
48}
49
50static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
51{
52 pgtable_page_dtor(pte);
53 __free_page(pte);
54}
55
56
57extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
58
59#ifdef CONFIG_X86_PAE
60/*
61 * In the PAE case we free the pmds as part of the pgd.
62 */
63static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
64{
65 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
66}
67
68static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
69{
70 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
71 free_page((unsigned long)pmd);
72}
73
74extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
75
76static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
77{
78 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
79
80 /* Note: almost everything apart from _PAGE_PRESENT is
81 reserved at the pmd (PDPT) level. */
82 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
83
84 /*
85 * According to Intel App note "TLBs, Paging-Structure Caches,
86 * and Their Invalidation", April 2007, document 317080-001,
87 * section 8.1: in PAE mode we explicitly have to flush the
88 * TLB via cr3 if the top-level pgd is changed...
89 */
90 if (mm == current->active_mm)
91 write_cr3(read_cr3());
92}
93#endif /* CONFIG_X86_PAE */
94
95#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h
deleted file mode 100644
index 8d6722320dcc..000000000000
--- a/include/asm-x86/pgalloc_64.h
+++ /dev/null
@@ -1,133 +0,0 @@
1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H
3
4#include <asm/pda.h>
5#include <linux/threads.h>
6#include <linux/mm.h>
7
8#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10#define pud_populate(mm, pud, pmd) \
11 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
12#define pgd_populate(mm, pgd, pud) \
13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
14
15#define pmd_pgtable(pmd) pmd_page(pmd)
16
17static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
18{
19 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
20}
21
22static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
23{
24 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
25 free_page((unsigned long)pmd);
26}
27
28static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
29{
30 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
31}
32
33static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
34{
35 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
36}
37
38static inline void pud_free(struct mm_struct *mm, pud_t *pud)
39{
40 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
41 free_page((unsigned long)pud);
42}
43
44static inline void pgd_list_add(pgd_t *pgd)
45{
46 struct page *page = virt_to_page(pgd);
47 unsigned long flags;
48
49 spin_lock_irqsave(&pgd_lock, flags);
50 list_add(&page->lru, &pgd_list);
51 spin_unlock_irqrestore(&pgd_lock, flags);
52}
53
54static inline void pgd_list_del(pgd_t *pgd)
55{
56 struct page *page = virt_to_page(pgd);
57 unsigned long flags;
58
59 spin_lock_irqsave(&pgd_lock, flags);
60 list_del(&page->lru);
61 spin_unlock_irqrestore(&pgd_lock, flags);
62}
63
64static inline pgd_t *pgd_alloc(struct mm_struct *mm)
65{
66 unsigned boundary;
67 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
68 if (!pgd)
69 return NULL;
70 pgd_list_add(pgd);
71 /*
72 * Copy kernel pointers in from init.
73 * Could keep a freelist or slab cache of those because the kernel
74 * part never changes.
75 */
76 boundary = pgd_index(__PAGE_OFFSET);
77 memset(pgd, 0, boundary * sizeof(pgd_t));
78 memcpy(pgd + boundary,
79 init_level4_pgt + boundary,
80 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
81 return pgd;
82}
83
84static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
85{
86 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
87 pgd_list_del(pgd);
88 free_page((unsigned long)pgd);
89}
90
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
92{
93 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
94}
95
96static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
97{
98 struct page *page;
99 void *p;
100
101 p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
102 if (!p)
103 return NULL;
104 page = virt_to_page(p);
105 pgtable_page_ctor(page);
106 return page;
107}
108
109/* Should really implement gc for free page table pages. This could be
110 done with a reference count in struct page. */
111
112static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
113{
114 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
115 free_page((unsigned long)pte);
116}
117
118static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
119{
120 pgtable_page_dtor(pte);
121 __free_page(pte);
122}
123
124#define __pte_free_tlb(tlb,pte) \
125do { \
126 pgtable_page_dtor((pte)); \
127 tlb_remove_page((tlb), (pte)); \
128} while (0)
129
130#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
131#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
132
133#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index f1d9f4a03f6f..b8a08bd7bd48 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5#define FIRST_USER_ADDRESS 0 4#define FIRST_USER_ADDRESS 0
6 5
7#define _PAGE_BIT_PRESENT 0 /* is present */ 6#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -330,6 +329,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
330# include "pgtable_64.h" 329# include "pgtable_64.h"
331#endif 330#endif
332 331
332#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
333#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
334
333#ifndef __ASSEMBLY__ 335#ifndef __ASSEMBLY__
334 336
335enum { 337enum {
@@ -389,37 +391,17 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
389 * bit at the same time. 391 * bit at the same time.
390 */ 392 */
391#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 393#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
392#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ 394extern int ptep_set_access_flags(struct vm_area_struct *vma,
393({ \ 395 unsigned long address, pte_t *ptep,
394 int __changed = !pte_same(*(ptep), entry); \ 396 pte_t entry, int dirty);
395 if (__changed && dirty) { \
396 *ptep = entry; \
397 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
398 flush_tlb_page(vma, address); \
399 } \
400 __changed; \
401})
402 397
403#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 398#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
404#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ 399extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
405 int __ret = 0; \ 400 unsigned long addr, pte_t *ptep);
406 if (pte_young(*(ptep))) \
407 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
408 &(ptep)->pte); \
409 if (__ret) \
410 pte_update((vma)->vm_mm, addr, ptep); \
411 __ret; \
412})
413 401
414#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 402#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
415#define ptep_clear_flush_young(vma, address, ptep) \ 403extern int ptep_clear_flush_young(struct vm_area_struct *vma,
416({ \ 404 unsigned long address, pte_t *ptep);
417 int __young; \
418 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
419 if (__young) \
420 flush_tlb_page(vma, address); \
421 __young; \
422})
423 405
424#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 406#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
425static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 407static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
@@ -456,6 +438,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
456 pte_update(mm, addr, ptep); 438 pte_update(mm, addr, ptep);
457} 439}
458 440
441/*
442 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
443 *
444 * dst - pointer to pgd range anwhere on a pgd page
445 * src - ""
446 * count - the number of pgds to copy.
447 *
448 * dst and src can be on the same page, but the range must not overlap,
449 * and must not cross a page boundary.
450 */
451static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
452{
453 memcpy(dst, src, count * sizeof(pgd_t));
454}
455
456
459#include <asm-generic/pgtable.h> 457#include <asm-generic/pgtable.h>
460#endif /* __ASSEMBLY__ */ 458#endif /* __ASSEMBLY__ */
461 459
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index c4a643674458..168b6447cf18 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -48,9 +48,6 @@ void paging_init(void);
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 49#define PGDIR_MASK (~(PGDIR_SIZE - 1))
50 50
51#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
52#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
53
54/* Just any arbitrary offset to the start of the vmalloc VM area: the 51/* Just any arbitrary offset to the start of the vmalloc VM area: the
55 * current 8MB value just means that there will be a 8MB "hole" after the 52 * current 8MB value just means that there will be a 8MB "hole" after the
56 * physical memory until the kernel virtual memory starts. That means that 53 * physical memory until the kernel virtual memory starts. That means that
@@ -109,21 +106,6 @@ extern int pmd_bad(pmd_t pmd);
109#endif 106#endif
110 107
111/* 108/*
112 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
113 *
114 * dst - pointer to pgd range anwhere on a pgd page
115 * src - ""
116 * count - the number of pgds to copy.
117 *
118 * dst and src can be on the same page, but the range must not overlap,
119 * and must not cross a page boundary.
120 */
121static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
122{
123 memcpy(dst, src, count * sizeof(pgd_t));
124}
125
126/*
127 * Macro to mark a page protection value as "uncacheable". 109 * Macro to mark a page protection value as "uncacheable".
128 * On processors which do not support it, this is a no-op. 110 * On processors which do not support it, this is a no-op.
129 */ 111 */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 9fd87d0b6477..a3bbf8766c1d 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -24,7 +24,7 @@ extern void paging_init(void);
24 24
25#endif /* !__ASSEMBLY__ */ 25#endif /* !__ASSEMBLY__ */
26 26
27#define SHARED_KERNEL_PMD 1 27#define SHARED_KERNEL_PMD 0
28 28
29/* 29/*
30 * PGDIR_SHIFT determines what a top-level page table entry can map 30 * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
new file mode 100644
index 000000000000..596312a7bfc9
--- /dev/null
+++ b/include/asm-x86/xen/events.h
@@ -0,0 +1,22 @@
1#ifndef __XEN_EVENTS_H
2#define __XEN_EVENTS_H
3
4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR,
7
8 XEN_NR_IPIS,
9};
10
11static inline int xen_irqs_disabled(struct pt_regs *regs)
12{
13 return raw_irqs_disabled_flags(regs->flags);
14}
15
16static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
17{
18 regs->orig_ax = ~irq;
19 do_IRQ(regs);
20}
21
22#endif /* __XEN_EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
new file mode 100644
index 000000000000..2444d4593a3b
--- /dev/null
+++ b/include/asm-x86/xen/grant_table.h
@@ -0,0 +1,7 @@
1#ifndef __XEN_GRANT_TABLE_H
2#define __XEN_GRANT_TABLE_H
3
4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area)
6
7#endif /* __XEN_GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index bc0ee7d961ca..c2ccd997ed35 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -164,6 +164,12 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
164} 164}
165 165
166static inline int 166static inline int
167HYPERVISOR_callback_op(int cmd, void *arg)
168{
169 return _hypercall2(int, callback_op, cmd, arg);
170}
171
172static inline int
167HYPERVISOR_fpu_taskswitch(int set) 173HYPERVISOR_fpu_taskswitch(int set)
168{ 174{
169 return _hypercall1(int, fpu_taskswitch, set); 175 return _hypercall1(int, fpu_taskswitch, set);
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 165c3968e138..6227000a1e84 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -22,6 +22,30 @@
22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23#define GUEST_HANDLE(name) __guest_handle_ ## name 23#define GUEST_HANDLE(name) __guest_handle_ ## name
24 24
25#ifdef __XEN__
26#if defined(__i386__)
27#define set_xen_guest_handle(hnd, val) \
28 do { \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
31 (hnd).p = val; \
32 } while (0)
33#elif defined(__x86_64__)
34#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
35#endif
36#else
37#if defined(__i386__)
38#define set_xen_guest_handle(hnd, val) \
39 do { \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
42 (hnd) = val; \
43 } while (0)
44#elif defined(__x86_64__)
45#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
46#endif
47#endif
48
25#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
26/* Guest handles for primitive C types. */ 50/* Guest handles for primitive C types. */
27__DEFINE_GUEST_HANDLE(uchar, unsigned char); 51__DEFINE_GUEST_HANDLE(uchar, unsigned char);
@@ -171,6 +195,10 @@ struct arch_vcpu_info {
171 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ 195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
172}; 196};
173 197
198struct xen_callback {
199 unsigned long cs;
200 unsigned long eip;
201};
174#endif /* !__ASSEMBLY__ */ 202#endif /* !__ASSEMBLY__ */
175 203
176/* 204/*
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
new file mode 100644
index 000000000000..01799305f02a
--- /dev/null
+++ b/include/asm-x86/xen/page.h
@@ -0,0 +1,168 @@
1#ifndef __XEN_PAGE_H
2#define __XEN_PAGE_H
3
4#include <linux/pfn.h>
5
6#include <asm/uaccess.h>
7#include <asm/pgtable.h>
8
9#include <xen/features.h>
10
11/* Xen machine address */
12typedef struct xmaddr {
13 phys_addr_t maddr;
14} xmaddr_t;
15
16/* Xen pseudo-physical address */
17typedef struct xpaddr {
18 phys_addr_t paddr;
19} xpaddr_t;
20
21#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
22#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
23
24/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
25#define INVALID_P2M_ENTRY (~0UL)
26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28
29extern unsigned long *phys_to_machine_mapping;
30
31static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{
33 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn;
35
36 return phys_to_machine_mapping[(unsigned int)(pfn)] &
37 ~FOREIGN_FRAME_BIT;
38}
39
40static inline int phys_to_machine_mapping_valid(unsigned long pfn)
41{
42 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1;
44
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
46}
47
48static inline unsigned long mfn_to_pfn(unsigned long mfn)
49{
50 unsigned long pfn;
51
52 if (xen_feature(XENFEAT_auto_translated_physmap))
53 return mfn;
54
55#if 0
56 if (unlikely((mfn >> machine_to_phys_order) != 0))
57 return max_mapnr;
58#endif
59
60 pfn = 0;
61 /*
62 * The array access can fail (e.g., device space beyond end of RAM).
63 * In such cases it doesn't matter what we return (we return garbage),
64 * but we must handle the fault without crashing!
65 */
66 __get_user(pfn, &machine_to_phys_mapping[mfn]);
67
68 return pfn;
69}
70
71static inline xmaddr_t phys_to_machine(xpaddr_t phys)
72{
73 unsigned offset = phys.paddr & ~PAGE_MASK;
74 return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
75}
76
77static inline xpaddr_t machine_to_phys(xmaddr_t machine)
78{
79 unsigned offset = machine.maddr & ~PAGE_MASK;
80 return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
81}
82
83/*
84 * We detect special mappings in one of two ways:
85 * 1. If the MFN is an I/O page then Xen will set the m2p entry
86 * to be outside our maximum possible pseudophys range.
87 * 2. If the MFN belongs to a different domain then we will certainly
88 * not have MFN in our p2m table. Conversely, if the page is ours,
89 * then we'll have p2m(m2p(MFN))==MFN.
90 * If we detect a special mapping then it doesn't have a 'struct page'.
91 * We force !pfn_valid() by returning an out-of-range pointer.
92 *
93 * NB. These checks require that, for any MFN that is not in our reservation,
94 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
95 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
96 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
97 *
98 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
99 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
100 * require. In all the cases we care about, the FOREIGN_FRAME bit is
101 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
102 */
103static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
104{
105 extern unsigned long max_mapnr;
106 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn))
110 return max_mapnr; /* force !pfn_valid() */
111 return pfn;
112}
113
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
126#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
127
128static inline unsigned long pte_mfn(pte_t pte)
129{
130 return (pte.pte & ~_PAGE_NX) >> PAGE_SHIFT;
131}
132
133static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
134{
135 pte_t pte;
136
137 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
138 (pgprot_val(pgprot) & __supported_pte_mask);
139
140 return pte;
141}
142
143static inline pteval_t pte_val_ma(pte_t pte)
144{
145 return pte.pte;
146}
147
148static inline pte_t __pte_ma(pteval_t x)
149{
150 return (pte_t) { .pte = x };
151}
152
153#ifdef CONFIG_X86_PAE
154#define pmd_val_ma(v) ((v).pmd)
155#define pud_val_ma(v) ((v).pgd.pgd)
156#define __pmd_ma(x) ((pmd_t) { (x) } )
157#else /* !X86_PAE */
158#define pmd_val_ma(v) ((v).pud.pgd.pgd)
159#endif /* CONFIG_X86_PAE */
160
161#define pgd_val_ma(x) ((x).pgd)
162
163
164xmaddr_t arbitrary_virt_to_machine(unsigned long address);
165void make_lowmem_page_readonly(void *vaddr);
166void make_lowmem_page_readwrite(void *vaddr);
167
168#endif /* __XEN_PAGE_H */