aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/paravirt.h43
-rw-r--r--include/asm-x86/pgalloc.h111
-rw-r--r--include/asm-x86/pgalloc_32.h95
-rw-r--r--include/asm-x86/pgalloc_64.h133
-rw-r--r--include/asm-x86/pgtable.h54
-rw-r--r--include/asm-x86/pgtable_32.h18
-rw-r--r--include/asm-x86/pgtable_64.h2
-rw-r--r--include/asm-x86/xen/events.h22
-rw-r--r--include/asm-x86/xen/grant_table.h7
-rw-r--r--include/asm-x86/xen/hypercall.h6
-rw-r--r--include/asm-x86/xen/interface.h28
-rw-r--r--include/asm-x86/xen/page.h168
-rw-r--r--include/xen/balloon.h61
-rw-r--r--include/xen/events.h9
-rw-r--r--include/xen/grant_table.h7
-rw-r--r--include/xen/interface/callback.h102
-rw-r--r--include/xen/interface/grant_table.h11
-rw-r--r--include/xen/interface/io/fbif.h124
-rw-r--r--include/xen/interface/io/kbdif.h114
-rw-r--r--include/xen/interface/io/protocols.h21
-rw-r--r--include/xen/interface/memory.h12
-rw-r--r--include/xen/interface/vcpu.h5
-rw-r--r--include/xen/interface/xen.h22
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/page.h181
-rw-r--r--include/xen/xen-ops.h8
-rw-r--r--include/xen/xenbus.h1
-rw-r--r--include/xen/xencomm.h77
28 files changed, 990 insertions, 493 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index 3d419398499b..0f13b945e240 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -220,11 +220,13 @@ struct pv_mmu_ops {
220 unsigned long va); 220 unsigned long va);
221 221
222 /* Hooks for allocating/releasing pagetable pages */ 222 /* Hooks for allocating/releasing pagetable pages */
223 void (*alloc_pt)(struct mm_struct *mm, u32 pfn); 223 void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
224 void (*alloc_pd)(struct mm_struct *mm, u32 pfn); 224 void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
225 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 225 void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
226 void (*release_pt)(u32 pfn); 226 void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
227 void (*release_pd)(u32 pfn); 227 void (*release_pte)(u32 pfn);
228 void (*release_pmd)(u32 pfn);
229 void (*release_pud)(u32 pfn);
228 230
229 /* Pagetable manipulation functions */ 231 /* Pagetable manipulation functions */
230 void (*set_pte)(pte_t *ptep, pte_t pteval); 232 void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -910,28 +912,37 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
910 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); 912 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
911} 913}
912 914
913static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn) 915static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
914{ 916{
915 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn); 917 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
916} 918}
917static inline void paravirt_release_pt(unsigned pfn) 919static inline void paravirt_release_pte(unsigned pfn)
918{ 920{
919 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn); 921 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
920} 922}
921 923
922static inline void paravirt_alloc_pd(struct mm_struct *mm, unsigned pfn) 924static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
923{ 925{
924 PVOP_VCALL2(pv_mmu_ops.alloc_pd, mm, pfn); 926 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
925} 927}
926 928
927static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn, 929static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
928 unsigned start, unsigned count) 930 unsigned start, unsigned count)
929{ 931{
930 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count); 932 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
931} 933}
932static inline void paravirt_release_pd(unsigned pfn) 934static inline void paravirt_release_pmd(unsigned pfn)
933{ 935{
934 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn); 936 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
937}
938
939static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
940{
941 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
942}
943static inline void paravirt_release_pud(unsigned pfn)
944{
945 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
935} 946}
936 947
937#ifdef CONFIG_HIGHPTE 948#ifdef CONFIG_HIGHPTE
diff --git a/include/asm-x86/pgalloc.h b/include/asm-x86/pgalloc.h
index 5886eed05886..91e4641f3f31 100644
--- a/include/asm-x86/pgalloc.h
+++ b/include/asm-x86/pgalloc.h
@@ -1,5 +1,110 @@
1#ifdef CONFIG_X86_32 1#ifndef _ASM_X86_PGALLOC_H
2# include "pgalloc_32.h" 2#define _ASM_X86_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
8#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
3#else 10#else
4# include "pgalloc_64.h" 11static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
12static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
13static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
14 unsigned long start, unsigned long count) {}
15static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
16static inline void paravirt_release_pte(unsigned long pfn) {}
17static inline void paravirt_release_pmd(unsigned long pfn) {}
18static inline void paravirt_release_pud(unsigned long pfn) {}
5#endif 19#endif
20
21/*
22 * Allocate and free page tables.
23 */
24extern pgd_t *pgd_alloc(struct mm_struct *);
25extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
26
27extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
28extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
29
30/* Should really implement gc for free page table pages. This could be
31 done with a reference count in struct page. */
32
33static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
34{
35 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
36 free_page((unsigned long)pte);
37}
38
39static inline void pte_free(struct mm_struct *mm, struct page *pte)
40{
41 __free_page(pte);
42}
43
44extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
45
46static inline void pmd_populate_kernel(struct mm_struct *mm,
47 pmd_t *pmd, pte_t *pte)
48{
49 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
50 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
51}
52
53static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
54 struct page *pte)
55{
56 unsigned long pfn = page_to_pfn(pte);
57
58 paravirt_alloc_pte(mm, pfn);
59 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
60}
61
62#define pmd_pgtable(pmd) pmd_page(pmd)
63
64#if PAGETABLE_LEVELS > 2
65static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
66{
67 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
68}
69
70static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
71{
72 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
73 free_page((unsigned long)pmd);
74}
75
76extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
77
78#ifdef CONFIG_X86_PAE
79extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
80#else /* !CONFIG_X86_PAE */
81static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
82{
83 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
84 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
85}
86#endif /* CONFIG_X86_PAE */
87
88#if PAGETABLE_LEVELS > 3
89static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
90{
91 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
92 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
93}
94
95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
96{
97 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
98}
99
100static inline void pud_free(struct mm_struct *mm, pud_t *pud)
101{
102 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
103 free_page((unsigned long)pud);
104}
105
106extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
107#endif /* PAGETABLE_LEVELS > 3 */
108#endif /* PAGETABLE_LEVELS > 2 */
109
110#endif /* _ASM_X86_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
deleted file mode 100644
index 6bea6e5b5ee5..000000000000
--- a/include/asm-x86/pgalloc_32.h
+++ /dev/null
@@ -1,95 +0,0 @@
1#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7#include <asm/tlb.h>
8#include <asm-generic/tlb.h>
9
10#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
13#define paravirt_alloc_pt(mm, pfn) do { } while (0)
14#define paravirt_alloc_pd(mm, pfn) do { } while (0)
15#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
16#define paravirt_release_pt(pfn) do { } while (0)
17#define paravirt_release_pd(pfn) do { } while (0)
18#endif
19
20static inline void pmd_populate_kernel(struct mm_struct *mm,
21 pmd_t *pmd, pte_t *pte)
22{
23 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
24 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
25}
26
27static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
28{
29 unsigned long pfn = page_to_pfn(pte);
30
31 paravirt_alloc_pt(mm, pfn);
32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
33}
34#define pmd_pgtable(pmd) pmd_page(pmd)
35
36/*
37 * Allocate and free page tables.
38 */
39extern pgd_t *pgd_alloc(struct mm_struct *);
40extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
41
42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
43extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
44
45static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
46{
47 free_page((unsigned long)pte);
48}
49
50static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
51{
52 pgtable_page_dtor(pte);
53 __free_page(pte);
54}
55
56
57extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
58
59#ifdef CONFIG_X86_PAE
60/*
61 * In the PAE case we free the pmds as part of the pgd.
62 */
63static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
64{
65 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
66}
67
68static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
69{
70 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
71 free_page((unsigned long)pmd);
72}
73
74extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
75
76static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
77{
78 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
79
80 /* Note: almost everything apart from _PAGE_PRESENT is
81 reserved at the pmd (PDPT) level. */
82 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
83
84 /*
85 * According to Intel App note "TLBs, Paging-Structure Caches,
86 * and Their Invalidation", April 2007, document 317080-001,
87 * section 8.1: in PAE mode we explicitly have to flush the
88 * TLB via cr3 if the top-level pgd is changed...
89 */
90 if (mm == current->active_mm)
91 write_cr3(read_cr3());
92}
93#endif /* CONFIG_X86_PAE */
94
95#endif /* _I386_PGALLOC_H */
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h
deleted file mode 100644
index 8d6722320dcc..000000000000
--- a/include/asm-x86/pgalloc_64.h
+++ /dev/null
@@ -1,133 +0,0 @@
1#ifndef _X86_64_PGALLOC_H
2#define _X86_64_PGALLOC_H
3
4#include <asm/pda.h>
5#include <linux/threads.h>
6#include <linux/mm.h>
7
8#define pmd_populate_kernel(mm, pmd, pte) \
9 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
10#define pud_populate(mm, pud, pmd) \
11 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
12#define pgd_populate(mm, pgd, pud) \
13 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
14
15#define pmd_pgtable(pmd) pmd_page(pmd)
16
17static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
18{
19 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
20}
21
22static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
23{
24 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
25 free_page((unsigned long)pmd);
26}
27
28static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
29{
30 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
31}
32
33static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
34{
35 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
36}
37
38static inline void pud_free(struct mm_struct *mm, pud_t *pud)
39{
40 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
41 free_page((unsigned long)pud);
42}
43
44static inline void pgd_list_add(pgd_t *pgd)
45{
46 struct page *page = virt_to_page(pgd);
47 unsigned long flags;
48
49 spin_lock_irqsave(&pgd_lock, flags);
50 list_add(&page->lru, &pgd_list);
51 spin_unlock_irqrestore(&pgd_lock, flags);
52}
53
54static inline void pgd_list_del(pgd_t *pgd)
55{
56 struct page *page = virt_to_page(pgd);
57 unsigned long flags;
58
59 spin_lock_irqsave(&pgd_lock, flags);
60 list_del(&page->lru);
61 spin_unlock_irqrestore(&pgd_lock, flags);
62}
63
64static inline pgd_t *pgd_alloc(struct mm_struct *mm)
65{
66 unsigned boundary;
67 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
68 if (!pgd)
69 return NULL;
70 pgd_list_add(pgd);
71 /*
72 * Copy kernel pointers in from init.
73 * Could keep a freelist or slab cache of those because the kernel
74 * part never changes.
75 */
76 boundary = pgd_index(__PAGE_OFFSET);
77 memset(pgd, 0, boundary * sizeof(pgd_t));
78 memcpy(pgd + boundary,
79 init_level4_pgt + boundary,
80 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
81 return pgd;
82}
83
84static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
85{
86 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
87 pgd_list_del(pgd);
88 free_page((unsigned long)pgd);
89}
90
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
92{
93 return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
94}
95
96static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
97{
98 struct page *page;
99 void *p;
100
101 p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
102 if (!p)
103 return NULL;
104 page = virt_to_page(p);
105 pgtable_page_ctor(page);
106 return page;
107}
108
109/* Should really implement gc for free page table pages. This could be
110 done with a reference count in struct page. */
111
112static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
113{
114 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
115 free_page((unsigned long)pte);
116}
117
118static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
119{
120 pgtable_page_dtor(pte);
121 __free_page(pte);
122}
123
124#define __pte_free_tlb(tlb,pte) \
125do { \
126 pgtable_page_dtor((pte)); \
127 tlb_remove_page((tlb), (pte)); \
128} while (0)
129
130#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
131#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
132
133#endif /* _X86_64_PGALLOC_H */
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index f1d9f4a03f6f..b8a08bd7bd48 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PGTABLE_H 1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H 2#define _ASM_X86_PGTABLE_H
3 3
4#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
5#define FIRST_USER_ADDRESS 0 4#define FIRST_USER_ADDRESS 0
6 5
7#define _PAGE_BIT_PRESENT 0 /* is present */ 6#define _PAGE_BIT_PRESENT 0 /* is present */
@@ -330,6 +329,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
330# include "pgtable_64.h" 329# include "pgtable_64.h"
331#endif 330#endif
332 331
332#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
333#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
334
333#ifndef __ASSEMBLY__ 335#ifndef __ASSEMBLY__
334 336
335enum { 337enum {
@@ -389,37 +391,17 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
389 * bit at the same time. 391 * bit at the same time.
390 */ 392 */
391#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 393#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
392#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ 394extern int ptep_set_access_flags(struct vm_area_struct *vma,
393({ \ 395 unsigned long address, pte_t *ptep,
394 int __changed = !pte_same(*(ptep), entry); \ 396 pte_t entry, int dirty);
395 if (__changed && dirty) { \
396 *ptep = entry; \
397 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
398 flush_tlb_page(vma, address); \
399 } \
400 __changed; \
401})
402 397
403#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 398#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
404#define ptep_test_and_clear_young(vma, addr, ptep) ({ \ 399extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
405 int __ret = 0; \ 400 unsigned long addr, pte_t *ptep);
406 if (pte_young(*(ptep))) \
407 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
408 &(ptep)->pte); \
409 if (__ret) \
410 pte_update((vma)->vm_mm, addr, ptep); \
411 __ret; \
412})
413 401
414#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 402#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
415#define ptep_clear_flush_young(vma, address, ptep) \ 403extern int ptep_clear_flush_young(struct vm_area_struct *vma,
416({ \ 404 unsigned long address, pte_t *ptep);
417 int __young; \
418 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
419 if (__young) \
420 flush_tlb_page(vma, address); \
421 __young; \
422})
423 405
424#define __HAVE_ARCH_PTEP_GET_AND_CLEAR 406#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
425static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 407static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
@@ -456,6 +438,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
456 pte_update(mm, addr, ptep); 438 pte_update(mm, addr, ptep);
457} 439}
458 440
441/*
442 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
443 *
444 * dst - pointer to pgd range anwhere on a pgd page
445 * src - ""
446 * count - the number of pgds to copy.
447 *
448 * dst and src can be on the same page, but the range must not overlap,
449 * and must not cross a page boundary.
450 */
451static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
452{
453 memcpy(dst, src, count * sizeof(pgd_t));
454}
455
456
459#include <asm-generic/pgtable.h> 457#include <asm-generic/pgtable.h>
460#endif /* __ASSEMBLY__ */ 458#endif /* __ASSEMBLY__ */
461 459
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index c4a643674458..168b6447cf18 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -48,9 +48,6 @@ void paging_init(void);
48#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 48#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
49#define PGDIR_MASK (~(PGDIR_SIZE - 1)) 49#define PGDIR_MASK (~(PGDIR_SIZE - 1))
50 50
51#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
52#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
53
54/* Just any arbitrary offset to the start of the vmalloc VM area: the 51/* Just any arbitrary offset to the start of the vmalloc VM area: the
55 * current 8MB value just means that there will be a 8MB "hole" after the 52 * current 8MB value just means that there will be a 8MB "hole" after the
56 * physical memory until the kernel virtual memory starts. That means that 53 * physical memory until the kernel virtual memory starts. That means that
@@ -109,21 +106,6 @@ extern int pmd_bad(pmd_t pmd);
109#endif 106#endif
110 107
111/* 108/*
112 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
113 *
114 * dst - pointer to pgd range anwhere on a pgd page
115 * src - ""
116 * count - the number of pgds to copy.
117 *
118 * dst and src can be on the same page, but the range must not overlap,
119 * and must not cross a page boundary.
120 */
121static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
122{
123 memcpy(dst, src, count * sizeof(pgd_t));
124}
125
126/*
127 * Macro to mark a page protection value as "uncacheable". 109 * Macro to mark a page protection value as "uncacheable".
128 * On processors which do not support it, this is a no-op. 110 * On processors which do not support it, this is a no-op.
129 */ 111 */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 9fd87d0b6477..a3bbf8766c1d 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -24,7 +24,7 @@ extern void paging_init(void);
24 24
25#endif /* !__ASSEMBLY__ */ 25#endif /* !__ASSEMBLY__ */
26 26
27#define SHARED_KERNEL_PMD 1 27#define SHARED_KERNEL_PMD 0
28 28
29/* 29/*
30 * PGDIR_SHIFT determines what a top-level page table entry can map 30 * PGDIR_SHIFT determines what a top-level page table entry can map
diff --git a/include/asm-x86/xen/events.h b/include/asm-x86/xen/events.h
new file mode 100644
index 000000000000..596312a7bfc9
--- /dev/null
+++ b/include/asm-x86/xen/events.h
@@ -0,0 +1,22 @@
1#ifndef __XEN_EVENTS_H
2#define __XEN_EVENTS_H
3
4enum ipi_vector {
5 XEN_RESCHEDULE_VECTOR,
6 XEN_CALL_FUNCTION_VECTOR,
7
8 XEN_NR_IPIS,
9};
10
11static inline int xen_irqs_disabled(struct pt_regs *regs)
12{
13 return raw_irqs_disabled_flags(regs->flags);
14}
15
16static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
17{
18 regs->orig_ax = ~irq;
19 do_IRQ(regs);
20}
21
22#endif /* __XEN_EVENTS_H */
diff --git a/include/asm-x86/xen/grant_table.h b/include/asm-x86/xen/grant_table.h
new file mode 100644
index 000000000000..2444d4593a3b
--- /dev/null
+++ b/include/asm-x86/xen/grant_table.h
@@ -0,0 +1,7 @@
1#ifndef __XEN_GRANT_TABLE_H
2#define __XEN_GRANT_TABLE_H
3
4#define xen_alloc_vm_area(size) alloc_vm_area(size)
5#define xen_free_vm_area(area) free_vm_area(area)
6
7#endif /* __XEN_GRANT_TABLE_H */
diff --git a/include/asm-x86/xen/hypercall.h b/include/asm-x86/xen/hypercall.h
index bc0ee7d961ca..c2ccd997ed35 100644
--- a/include/asm-x86/xen/hypercall.h
+++ b/include/asm-x86/xen/hypercall.h
@@ -164,6 +164,12 @@ HYPERVISOR_set_callbacks(unsigned long event_selector,
164} 164}
165 165
166static inline int 166static inline int
167HYPERVISOR_callback_op(int cmd, void *arg)
168{
169 return _hypercall2(int, callback_op, cmd, arg);
170}
171
172static inline int
167HYPERVISOR_fpu_taskswitch(int set) 173HYPERVISOR_fpu_taskswitch(int set)
168{ 174{
169 return _hypercall1(int, fpu_taskswitch, set); 175 return _hypercall1(int, fpu_taskswitch, set);
diff --git a/include/asm-x86/xen/interface.h b/include/asm-x86/xen/interface.h
index 165c3968e138..6227000a1e84 100644
--- a/include/asm-x86/xen/interface.h
+++ b/include/asm-x86/xen/interface.h
@@ -22,6 +22,30 @@
22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 22#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
23#define GUEST_HANDLE(name) __guest_handle_ ## name 23#define GUEST_HANDLE(name) __guest_handle_ ## name
24 24
25#ifdef __XEN__
26#if defined(__i386__)
27#define set_xen_guest_handle(hnd, val) \
28 do { \
29 if (sizeof(hnd) == 8) \
30 *(uint64_t *)&(hnd) = 0; \
31 (hnd).p = val; \
32 } while (0)
33#elif defined(__x86_64__)
34#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
35#endif
36#else
37#if defined(__i386__)
38#define set_xen_guest_handle(hnd, val) \
39 do { \
40 if (sizeof(hnd) == 8) \
41 *(uint64_t *)&(hnd) = 0; \
42 (hnd) = val; \
43 } while (0)
44#elif defined(__x86_64__)
45#define set_xen_guest_handle(hnd, val) do { (hnd) = val; } while (0)
46#endif
47#endif
48
25#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
26/* Guest handles for primitive C types. */ 50/* Guest handles for primitive C types. */
27__DEFINE_GUEST_HANDLE(uchar, unsigned char); 51__DEFINE_GUEST_HANDLE(uchar, unsigned char);
@@ -171,6 +195,10 @@ struct arch_vcpu_info {
171 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */ 195 unsigned long pad[5]; /* sizeof(struct vcpu_info) == 64 */
172}; 196};
173 197
198struct xen_callback {
199 unsigned long cs;
200 unsigned long eip;
201};
174#endif /* !__ASSEMBLY__ */ 202#endif /* !__ASSEMBLY__ */
175 203
176/* 204/*
diff --git a/include/asm-x86/xen/page.h b/include/asm-x86/xen/page.h
new file mode 100644
index 000000000000..01799305f02a
--- /dev/null
+++ b/include/asm-x86/xen/page.h
@@ -0,0 +1,168 @@
1#ifndef __XEN_PAGE_H
2#define __XEN_PAGE_H
3
4#include <linux/pfn.h>
5
6#include <asm/uaccess.h>
7#include <asm/pgtable.h>
8
9#include <xen/features.h>
10
11/* Xen machine address */
12typedef struct xmaddr {
13 phys_addr_t maddr;
14} xmaddr_t;
15
16/* Xen pseudo-physical address */
17typedef struct xpaddr {
18 phys_addr_t paddr;
19} xpaddr_t;
20
21#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
22#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
23
24/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
25#define INVALID_P2M_ENTRY (~0UL)
26#define FOREIGN_FRAME_BIT (1UL<<31)
27#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
28
29extern unsigned long *phys_to_machine_mapping;
30
31static inline unsigned long pfn_to_mfn(unsigned long pfn)
32{
33 if (xen_feature(XENFEAT_auto_translated_physmap))
34 return pfn;
35
36 return phys_to_machine_mapping[(unsigned int)(pfn)] &
37 ~FOREIGN_FRAME_BIT;
38}
39
40static inline int phys_to_machine_mapping_valid(unsigned long pfn)
41{
42 if (xen_feature(XENFEAT_auto_translated_physmap))
43 return 1;
44
45 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
46}
47
48static inline unsigned long mfn_to_pfn(unsigned long mfn)
49{
50 unsigned long pfn;
51
52 if (xen_feature(XENFEAT_auto_translated_physmap))
53 return mfn;
54
55#if 0
56 if (unlikely((mfn >> machine_to_phys_order) != 0))
57 return max_mapnr;
58#endif
59
60 pfn = 0;
61 /*
62 * The array access can fail (e.g., device space beyond end of RAM).
63 * In such cases it doesn't matter what we return (we return garbage),
64 * but we must handle the fault without crashing!
65 */
66 __get_user(pfn, &machine_to_phys_mapping[mfn]);
67
68 return pfn;
69}
70
71static inline xmaddr_t phys_to_machine(xpaddr_t phys)
72{
73 unsigned offset = phys.paddr & ~PAGE_MASK;
74 return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
75}
76
77static inline xpaddr_t machine_to_phys(xmaddr_t machine)
78{
79 unsigned offset = machine.maddr & ~PAGE_MASK;
80 return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
81}
82
83/*
84 * We detect special mappings in one of two ways:
85 * 1. If the MFN is an I/O page then Xen will set the m2p entry
86 * to be outside our maximum possible pseudophys range.
87 * 2. If the MFN belongs to a different domain then we will certainly
88 * not have MFN in our p2m table. Conversely, if the page is ours,
89 * then we'll have p2m(m2p(MFN))==MFN.
90 * If we detect a special mapping then it doesn't have a 'struct page'.
91 * We force !pfn_valid() by returning an out-of-range pointer.
92 *
93 * NB. These checks require that, for any MFN that is not in our reservation,
94 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
95 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
96 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
97 *
98 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
99 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
100 * require. In all the cases we care about, the FOREIGN_FRAME bit is
101 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
102 */
103static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
104{
105 extern unsigned long max_mapnr;
106 unsigned long pfn = mfn_to_pfn(mfn);
107 if ((pfn < max_mapnr)
108 && !xen_feature(XENFEAT_auto_translated_physmap)
109 && (phys_to_machine_mapping[pfn] != mfn))
110 return max_mapnr; /* force !pfn_valid() */
111 return pfn;
112}
113
114static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
115{
116 if (xen_feature(XENFEAT_auto_translated_physmap)) {
117 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
118 return;
119 }
120 phys_to_machine_mapping[pfn] = mfn;
121}
122
123/* VIRT <-> MACHINE conversion */
124#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
125#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
126#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
127
128static inline unsigned long pte_mfn(pte_t pte)
129{
130 return (pte.pte & ~_PAGE_NX) >> PAGE_SHIFT;
131}
132
133static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
134{
135 pte_t pte;
136
137 pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
138 (pgprot_val(pgprot) & __supported_pte_mask);
139
140 return pte;
141}
142
143static inline pteval_t pte_val_ma(pte_t pte)
144{
145 return pte.pte;
146}
147
148static inline pte_t __pte_ma(pteval_t x)
149{
150 return (pte_t) { .pte = x };
151}
152
153#ifdef CONFIG_X86_PAE
154#define pmd_val_ma(v) ((v).pmd)
155#define pud_val_ma(v) ((v).pgd.pgd)
156#define __pmd_ma(x) ((pmd_t) { (x) } )
157#else /* !X86_PAE */
158#define pmd_val_ma(v) ((v).pud.pgd.pgd)
159#endif /* CONFIG_X86_PAE */
160
161#define pgd_val_ma(x) ((x).pgd)
162
163
164xmaddr_t arbitrary_virt_to_machine(unsigned long address);
165void make_lowmem_page_readonly(void *vaddr);
166void make_lowmem_page_readwrite(void *vaddr);
167
168#endif /* __XEN_PAGE_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
new file mode 100644
index 000000000000..fe43b0f3c86a
--- /dev/null
+++ b/include/xen/balloon.h
@@ -0,0 +1,61 @@
1/******************************************************************************
2 * balloon.h
3 *
4 * Xen balloon driver - enables returning/claiming memory to/from Xen.
5 *
6 * Copyright (c) 2003, B Dragovic
7 * Copyright (c) 2003-2004, M Williamson, K Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#ifndef __XEN_BALLOON_H__
35#define __XEN_BALLOON_H__
36
37#include <linux/spinlock.h>
38
39#if 0
40/*
41 * Inform the balloon driver that it should allow some slop for device-driver
42 * memory activities.
43 */
44void balloon_update_driver_allowance(long delta);
45
46/* Allocate/free a set of empty pages in low memory (i.e., no RAM mapped). */
47struct page **alloc_empty_pages_and_pagevec(int nr_pages);
48void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
49
50void balloon_release_driver_page(struct page *page);
51
52/*
53 * Prevent the balloon driver from changing the memory reservation during
54 * a driver critical region.
55 */
56extern spinlock_t balloon_lock;
57#define balloon_lock(__flags) spin_lock_irqsave(&balloon_lock, __flags)
58#define balloon_unlock(__flags) spin_unlock_irqrestore(&balloon_lock, __flags)
59#endif
60
61#endif /* __XEN_BALLOON_H__ */
diff --git a/include/xen/events.h b/include/xen/events.h
index 2bde54d29be5..acd8e062c85f 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -5,13 +5,7 @@
5 5
6#include <xen/interface/event_channel.h> 6#include <xen/interface/event_channel.h>
7#include <asm/xen/hypercall.h> 7#include <asm/xen/hypercall.h>
8 8#include <asm/xen/events.h>
9enum ipi_vector {
10 XEN_RESCHEDULE_VECTOR,
11 XEN_CALL_FUNCTION_VECTOR,
12
13 XEN_NR_IPIS,
14};
15 9
16int bind_evtchn_to_irq(unsigned int evtchn); 10int bind_evtchn_to_irq(unsigned int evtchn);
17int bind_evtchn_to_irqhandler(unsigned int evtchn, 11int bind_evtchn_to_irqhandler(unsigned int evtchn,
@@ -37,6 +31,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
37void unbind_from_irqhandler(unsigned int irq, void *dev_id); 31void unbind_from_irqhandler(unsigned int irq, void *dev_id);
38 32
39void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); 33void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
34int resend_irq_on_evtchn(unsigned int irq);
40 35
41static inline void notify_remote_via_evtchn(int port) 36static inline void notify_remote_via_evtchn(int port)
42{ 37{
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 761c83498e03..466204846121 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -39,6 +39,7 @@
39 39
40#include <asm/xen/hypervisor.h> 40#include <asm/xen/hypervisor.h>
41#include <xen/interface/grant_table.h> 41#include <xen/interface/grant_table.h>
42#include <asm/xen/grant_table.h>
42 43
43/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ 44/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
44#define NR_GRANT_FRAMES 4 45#define NR_GRANT_FRAMES 4
@@ -102,6 +103,12 @@ void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
102void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid, 103void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
103 unsigned long pfn); 104 unsigned long pfn);
104 105
106int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
107 unsigned long max_nr_gframes,
108 struct grant_entry **__shared);
109void arch_gnttab_unmap_shared(struct grant_entry *shared,
110 unsigned long nr_gframes);
111
105#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 112#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
106 113
107#endif /* __ASM_GNTTAB_H__ */ 114#endif /* __ASM_GNTTAB_H__ */
diff --git a/include/xen/interface/callback.h b/include/xen/interface/callback.h
new file mode 100644
index 000000000000..4aadcba31af9
--- /dev/null
+++ b/include/xen/interface/callback.h
@@ -0,0 +1,102 @@
1/******************************************************************************
2 * callback.h
3 *
4 * Register guest OS callbacks with Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2006, Ian Campbell
25 */
26
27#ifndef __XEN_PUBLIC_CALLBACK_H__
28#define __XEN_PUBLIC_CALLBACK_H__
29
30#include "xen.h"
31
32/*
33 * Prototype for this hypercall is:
34 * long callback_op(int cmd, void *extra_args)
35 * @cmd == CALLBACKOP_??? (callback operation).
36 * @extra_args == Operation-specific extra arguments (NULL if none).
37 */
38
39/* ia64, x86: Callback for event delivery. */
40#define CALLBACKTYPE_event 0
41
42/* x86: Failsafe callback when guest state cannot be restored by Xen. */
43#define CALLBACKTYPE_failsafe 1
44
45/* x86/64 hypervisor: Syscall by 64-bit guest app ('64-on-64-on-64'). */
46#define CALLBACKTYPE_syscall 2
47
48/*
49 * x86/32 hypervisor: Only available on x86/32 when supervisor_mode_kernel
50 * feature is enabled. Do not use this callback type in new code.
51 */
52#define CALLBACKTYPE_sysenter_deprecated 3
53
54/* x86: Callback for NMI delivery. */
55#define CALLBACKTYPE_nmi 4
56
57/*
58 * x86: sysenter is only available as follows:
59 * - 32-bit hypervisor: with the supervisor_mode_kernel feature enabled
60 * - 64-bit hypervisor: 32-bit guest applications on Intel CPUs
61 * ('32-on-32-on-64', '32-on-64-on-64')
62 * [nb. also 64-bit guest applications on Intel CPUs
63 * ('64-on-64-on-64'), but syscall is preferred]
64 */
65#define CALLBACKTYPE_sysenter 5
66
67/*
68 * x86/64 hypervisor: Syscall by 32-bit guest app on AMD CPUs
69 * ('32-on-32-on-64', '32-on-64-on-64')
70 */
71#define CALLBACKTYPE_syscall32 7
72
73/*
74 * Disable event deliver during callback? This flag is ignored for event and
75 * NMI callbacks: event delivery is unconditionally disabled.
76 */
77#define _CALLBACKF_mask_events 0
78#define CALLBACKF_mask_events (1U << _CALLBACKF_mask_events)
79
80/*
81 * Register a callback.
82 */
83#define CALLBACKOP_register 0
84struct callback_register {
85 uint16_t type;
86 uint16_t flags;
87 struct xen_callback address;
88};
89
90/*
91 * Unregister a callback.
92 *
93 * Not all callbacks can be unregistered. -EINVAL will be returned if
94 * you attempt to unregister such a callback.
95 */
96#define CALLBACKOP_unregister 1
97struct callback_unregister {
98 uint16_t type;
99 uint16_t _unused;
100};
101
102#endif /* __XEN_PUBLIC_CALLBACK_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index 219049802cf2..39da93c21de0 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -185,6 +185,7 @@ struct gnttab_map_grant_ref {
185 grant_handle_t handle; 185 grant_handle_t handle;
186 uint64_t dev_bus_addr; 186 uint64_t dev_bus_addr;
187}; 187};
188DEFINE_GUEST_HANDLE_STRUCT(gnttab_map_grant_ref);
188 189
189/* 190/*
190 * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings 191 * GNTTABOP_unmap_grant_ref: Destroy one or more grant-reference mappings
@@ -206,6 +207,7 @@ struct gnttab_unmap_grant_ref {
206 /* OUT parameters. */ 207 /* OUT parameters. */
207 int16_t status; /* GNTST_* */ 208 int16_t status; /* GNTST_* */
208}; 209};
210DEFINE_GUEST_HANDLE_STRUCT(gnttab_unmap_grant_ref);
209 211
210/* 212/*
211 * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least 213 * GNTTABOP_setup_table: Set up a grant table for <dom> comprising at least
@@ -223,8 +225,9 @@ struct gnttab_setup_table {
223 uint32_t nr_frames; 225 uint32_t nr_frames;
224 /* OUT parameters. */ 226 /* OUT parameters. */
225 int16_t status; /* GNTST_* */ 227 int16_t status; /* GNTST_* */
226 ulong *frame_list; 228 GUEST_HANDLE(ulong) frame_list;
227}; 229};
230DEFINE_GUEST_HANDLE_STRUCT(gnttab_setup_table);
228 231
229/* 232/*
230 * GNTTABOP_dump_table: Dump the contents of the grant table to the 233 * GNTTABOP_dump_table: Dump the contents of the grant table to the
@@ -237,6 +240,7 @@ struct gnttab_dump_table {
237 /* OUT parameters. */ 240 /* OUT parameters. */
238 int16_t status; /* GNTST_* */ 241 int16_t status; /* GNTST_* */
239}; 242};
243DEFINE_GUEST_HANDLE_STRUCT(gnttab_dump_table);
240 244
241/* 245/*
242 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The 246 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
@@ -255,7 +259,7 @@ struct gnttab_transfer {
255 /* OUT parameters. */ 259 /* OUT parameters. */
256 int16_t status; 260 int16_t status;
257}; 261};
258 262DEFINE_GUEST_HANDLE_STRUCT(gnttab_transfer);
259 263
260/* 264/*
261 * GNTTABOP_copy: Hypervisor based copy 265 * GNTTABOP_copy: Hypervisor based copy
@@ -296,6 +300,7 @@ struct gnttab_copy {
296 /* OUT parameters. */ 300 /* OUT parameters. */
297 int16_t status; 301 int16_t status;
298}; 302};
303DEFINE_GUEST_HANDLE_STRUCT(gnttab_copy);
299 304
300/* 305/*
301 * GNTTABOP_query_size: Query the current and maximum sizes of the shared 306 * GNTTABOP_query_size: Query the current and maximum sizes of the shared
@@ -313,7 +318,7 @@ struct gnttab_query_size {
313 uint32_t max_nr_frames; 318 uint32_t max_nr_frames;
314 int16_t status; /* GNTST_* */ 319 int16_t status; /* GNTST_* */
315}; 320};
316 321DEFINE_GUEST_HANDLE_STRUCT(gnttab_query_size);
317 322
318/* 323/*
319 * Bitfield values for update_pin_status.flags. 324 * Bitfield values for update_pin_status.flags.
diff --git a/include/xen/interface/io/fbif.h b/include/xen/interface/io/fbif.h
new file mode 100644
index 000000000000..5a934dd7796d
--- /dev/null
+++ b/include/xen/interface/io/fbif.h
@@ -0,0 +1,124 @@
1/*
2 * fbif.h -- Xen virtual frame buffer device
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
23 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
24 */
25
26#ifndef __XEN_PUBLIC_IO_FBIF_H__
27#define __XEN_PUBLIC_IO_FBIF_H__
28
29/* Out events (frontend -> backend) */
30
31/*
32 * Out events may be sent only when requested by backend, and receipt
33 * of an unknown out event is an error.
34 */
35
36/* Event type 1 currently not used */
37/*
38 * Framebuffer update notification event
39 * Capable frontend sets feature-update in xenstore.
40 * Backend requests it by setting request-update in xenstore.
41 */
42#define XENFB_TYPE_UPDATE 2
43
44struct xenfb_update {
45 uint8_t type; /* XENFB_TYPE_UPDATE */
46 int32_t x; /* source x */
47 int32_t y; /* source y */
48 int32_t width; /* rect width */
49 int32_t height; /* rect height */
50};
51
52#define XENFB_OUT_EVENT_SIZE 40
53
54union xenfb_out_event {
55 uint8_t type;
56 struct xenfb_update update;
57 char pad[XENFB_OUT_EVENT_SIZE];
58};
59
60/* In events (backend -> frontend) */
61
62/*
63 * Frontends should ignore unknown in events.
64 * No in events currently defined.
65 */
66
67#define XENFB_IN_EVENT_SIZE 40
68
69union xenfb_in_event {
70 uint8_t type;
71 char pad[XENFB_IN_EVENT_SIZE];
72};
73
74/* shared page */
75
76#define XENFB_IN_RING_SIZE 1024
77#define XENFB_IN_RING_LEN (XENFB_IN_RING_SIZE / XENFB_IN_EVENT_SIZE)
78#define XENFB_IN_RING_OFFS 1024
79#define XENFB_IN_RING(page) \
80 ((union xenfb_in_event *)((char *)(page) + XENFB_IN_RING_OFFS))
81#define XENFB_IN_RING_REF(page, idx) \
82 (XENFB_IN_RING((page))[(idx) % XENFB_IN_RING_LEN])
83
84#define XENFB_OUT_RING_SIZE 2048
85#define XENFB_OUT_RING_LEN (XENFB_OUT_RING_SIZE / XENFB_OUT_EVENT_SIZE)
86#define XENFB_OUT_RING_OFFS (XENFB_IN_RING_OFFS + XENFB_IN_RING_SIZE)
87#define XENFB_OUT_RING(page) \
88 ((union xenfb_out_event *)((char *)(page) + XENFB_OUT_RING_OFFS))
89#define XENFB_OUT_RING_REF(page, idx) \
90 (XENFB_OUT_RING((page))[(idx) % XENFB_OUT_RING_LEN])
91
92struct xenfb_page {
93 uint32_t in_cons, in_prod;
94 uint32_t out_cons, out_prod;
95
96 int32_t width; /* width of the framebuffer (in pixels) */
97 int32_t height; /* height of the framebuffer (in pixels) */
98 uint32_t line_length; /* length of a row of pixels (in bytes) */
99 uint32_t mem_length; /* length of the framebuffer (in bytes) */
100 uint8_t depth; /* depth of a pixel (in bits) */
101
102 /*
103 * Framebuffer page directory
104 *
105 * Each directory page holds PAGE_SIZE / sizeof(*pd)
106 * framebuffer pages, and can thus map up to PAGE_SIZE *
107 * PAGE_SIZE / sizeof(*pd) bytes. With PAGE_SIZE == 4096 and
108 * sizeof(unsigned long) == 4, that's 4 Megs. Two directory
109 * pages should be enough for a while.
110 */
111 unsigned long pd[2];
112};
113
114/*
115 * Wart: xenkbd needs to know resolution. Put it here until a better
116 * solution is found, but don't leak it to the backend.
117 */
118#ifdef __KERNEL__
119#define XENFB_WIDTH 800
120#define XENFB_HEIGHT 600
121#define XENFB_DEPTH 32
122#endif
123
124#endif
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
new file mode 100644
index 000000000000..fb97f4284ffd
--- /dev/null
+++ b/include/xen/interface/io/kbdif.h
@@ -0,0 +1,114 @@
1/*
2 * kbdif.h -- Xen virtual keyboard/mouse
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
23 * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
24 */
25
26#ifndef __XEN_PUBLIC_IO_KBDIF_H__
27#define __XEN_PUBLIC_IO_KBDIF_H__
28
29/* In events (backend -> frontend) */
30
31/*
32 * Frontends should ignore unknown in events.
33 */
34
35/* Pointer movement event */
36#define XENKBD_TYPE_MOTION 1
37/* Event type 2 currently not used */
38/* Key event (includes pointer buttons) */
39#define XENKBD_TYPE_KEY 3
40/*
41 * Pointer position event
42 * Capable backend sets feature-abs-pointer in xenstore.
43 * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
44 * request-abs-update in xenstore.
45 */
46#define XENKBD_TYPE_POS 4
47
48struct xenkbd_motion {
49 uint8_t type; /* XENKBD_TYPE_MOTION */
50 int32_t rel_x; /* relative X motion */
51 int32_t rel_y; /* relative Y motion */
52};
53
54struct xenkbd_key {
55 uint8_t type; /* XENKBD_TYPE_KEY */
56 uint8_t pressed; /* 1 if pressed; 0 otherwise */
57 uint32_t keycode; /* KEY_* from linux/input.h */
58};
59
60struct xenkbd_position {
61 uint8_t type; /* XENKBD_TYPE_POS */
62 int32_t abs_x; /* absolute X position (in FB pixels) */
63 int32_t abs_y; /* absolute Y position (in FB pixels) */
64};
65
66#define XENKBD_IN_EVENT_SIZE 40
67
68union xenkbd_in_event {
69 uint8_t type;
70 struct xenkbd_motion motion;
71 struct xenkbd_key key;
72 struct xenkbd_position pos;
73 char pad[XENKBD_IN_EVENT_SIZE];
74};
75
76/* Out events (frontend -> backend) */
77
78/*
79 * Out events may be sent only when requested by backend, and receipt
80 * of an unknown out event is an error.
81 * No out events currently defined.
82 */
83
84#define XENKBD_OUT_EVENT_SIZE 40
85
86union xenkbd_out_event {
87 uint8_t type;
88 char pad[XENKBD_OUT_EVENT_SIZE];
89};
90
91/* shared page */
92
93#define XENKBD_IN_RING_SIZE 2048
94#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
95#define XENKBD_IN_RING_OFFS 1024
96#define XENKBD_IN_RING(page) \
97 ((union xenkbd_in_event *)((char *)(page) + XENKBD_IN_RING_OFFS))
98#define XENKBD_IN_RING_REF(page, idx) \
99 (XENKBD_IN_RING((page))[(idx) % XENKBD_IN_RING_LEN])
100
101#define XENKBD_OUT_RING_SIZE 1024
102#define XENKBD_OUT_RING_LEN (XENKBD_OUT_RING_SIZE / XENKBD_OUT_EVENT_SIZE)
103#define XENKBD_OUT_RING_OFFS (XENKBD_IN_RING_OFFS + XENKBD_IN_RING_SIZE)
104#define XENKBD_OUT_RING(page) \
105 ((union xenkbd_out_event *)((char *)(page) + XENKBD_OUT_RING_OFFS))
106#define XENKBD_OUT_RING_REF(page, idx) \
107 (XENKBD_OUT_RING((page))[(idx) % XENKBD_OUT_RING_LEN])
108
109struct xenkbd_page {
110 uint32_t in_cons, in_prod;
111 uint32_t out_cons, out_prod;
112};
113
114#endif
diff --git a/include/xen/interface/io/protocols.h b/include/xen/interface/io/protocols.h
new file mode 100644
index 000000000000..01fc8ae5f0b0
--- /dev/null
+++ b/include/xen/interface/io/protocols.h
@@ -0,0 +1,21 @@
1#ifndef __XEN_PROTOCOLS_H__
2#define __XEN_PROTOCOLS_H__
3
4#define XEN_IO_PROTO_ABI_X86_32 "x86_32-abi"
5#define XEN_IO_PROTO_ABI_X86_64 "x86_64-abi"
6#define XEN_IO_PROTO_ABI_IA64 "ia64-abi"
7#define XEN_IO_PROTO_ABI_POWERPC64 "powerpc64-abi"
8
9#if defined(__i386__)
10# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_32
11#elif defined(__x86_64__)
12# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_X86_64
13#elif defined(__ia64__)
14# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_IA64
15#elif defined(__powerpc64__)
16# define XEN_IO_PROTO_ABI_NATIVE XEN_IO_PROTO_ABI_POWERPC64
17#else
18# error arch fixup needed here
19#endif
20
21#endif
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index af36ead16817..da768469aa92 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -29,7 +29,7 @@ struct xen_memory_reservation {
29 * OUT: GMFN bases of extents that were allocated 29 * OUT: GMFN bases of extents that were allocated
30 * (NB. This command also updates the mach_to_phys translation table) 30 * (NB. This command also updates the mach_to_phys translation table)
31 */ 31 */
32 GUEST_HANDLE(ulong) extent_start; 32 ulong extent_start;
33 33
34 /* Number of extents, and size/alignment of each (2^extent_order pages). */ 34 /* Number of extents, and size/alignment of each (2^extent_order pages). */
35 unsigned long nr_extents; 35 unsigned long nr_extents;
@@ -50,7 +50,6 @@ struct xen_memory_reservation {
50 domid_t domid; 50 domid_t domid;
51 51
52}; 52};
53DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
54 53
55/* 54/*
56 * Returns the maximum machine frame number of mapped RAM in this system. 55 * Returns the maximum machine frame number of mapped RAM in this system.
@@ -86,7 +85,7 @@ struct xen_machphys_mfn_list {
86 * any large discontiguities in the machine address space, 2MB gaps in 85 * any large discontiguities in the machine address space, 2MB gaps in
87 * the machphys table will be represented by an MFN base of zero. 86 * the machphys table will be represented by an MFN base of zero.
88 */ 87 */
89 GUEST_HANDLE(ulong) extent_start; 88 ulong extent_start;
90 89
91 /* 90 /*
92 * Number of extents written to the above array. This will be smaller 91 * Number of extents written to the above array. This will be smaller
@@ -94,7 +93,6 @@ struct xen_machphys_mfn_list {
94 */ 93 */
95 unsigned int nr_extents; 94 unsigned int nr_extents;
96}; 95};
97DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
98 96
99/* 97/*
100 * Sets the GPFN at which a particular page appears in the specified guest's 98 * Sets the GPFN at which a particular page appears in the specified guest's
@@ -117,7 +115,6 @@ struct xen_add_to_physmap {
117 /* GPFN where the source mapping page should appear. */ 115 /* GPFN where the source mapping page should appear. */
118 unsigned long gpfn; 116 unsigned long gpfn;
119}; 117};
120DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
121 118
122/* 119/*
123 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error 120 * Translates a list of domain-specific GPFNs into MFNs. Returns a -ve error
@@ -132,14 +129,13 @@ struct xen_translate_gpfn_list {
132 unsigned long nr_gpfns; 129 unsigned long nr_gpfns;
133 130
134 /* List of GPFNs to translate. */ 131 /* List of GPFNs to translate. */
135 GUEST_HANDLE(ulong) gpfn_list; 132 ulong gpfn_list;
136 133
137 /* 134 /*
138 * Output list to contain MFN translations. May be the same as the input 135 * Output list to contain MFN translations. May be the same as the input
139 * list (in which case each input GPFN is overwritten with the output MFN). 136 * list (in which case each input GPFN is overwritten with the output MFN).
140 */ 137 */
141 GUEST_HANDLE(ulong) mfn_list; 138 ulong mfn_list;
142}; 139};
143DEFINE_GUEST_HANDLE_STRUCT(xen_translate_gpfn_list);
144 140
145#endif /* __XEN_PUBLIC_MEMORY_H__ */ 141#endif /* __XEN_PUBLIC_MEMORY_H__ */
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index b05d8a6d9143..87e6f8a48661 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -85,6 +85,7 @@ struct vcpu_runstate_info {
85 */ 85 */
86 uint64_t time[4]; 86 uint64_t time[4];
87}; 87};
88DEFINE_GUEST_HANDLE_STRUCT(vcpu_runstate_info);
88 89
89/* VCPU is currently running on a physical CPU. */ 90/* VCPU is currently running on a physical CPU. */
90#define RUNSTATE_running 0 91#define RUNSTATE_running 0
@@ -119,6 +120,7 @@ struct vcpu_runstate_info {
119#define VCPUOP_register_runstate_memory_area 5 120#define VCPUOP_register_runstate_memory_area 5
120struct vcpu_register_runstate_memory_area { 121struct vcpu_register_runstate_memory_area {
121 union { 122 union {
123 GUEST_HANDLE(vcpu_runstate_info) h;
122 struct vcpu_runstate_info *v; 124 struct vcpu_runstate_info *v;
123 uint64_t p; 125 uint64_t p;
124 } addr; 126 } addr;
@@ -134,6 +136,7 @@ struct vcpu_register_runstate_memory_area {
134struct vcpu_set_periodic_timer { 136struct vcpu_set_periodic_timer {
135 uint64_t period_ns; 137 uint64_t period_ns;
136}; 138};
139DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_periodic_timer);
137 140
138/* 141/*
139 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot 142 * Set or stop a VCPU's single-shot timer. Every VCPU has one single-shot
@@ -145,6 +148,7 @@ struct vcpu_set_singleshot_timer {
145 uint64_t timeout_abs_ns; 148 uint64_t timeout_abs_ns;
146 uint32_t flags; /* VCPU_SSHOTTMR_??? */ 149 uint32_t flags; /* VCPU_SSHOTTMR_??? */
147}; 150};
151DEFINE_GUEST_HANDLE_STRUCT(vcpu_set_singleshot_timer);
148 152
149/* Flags to VCPUOP_set_singleshot_timer. */ 153/* Flags to VCPUOP_set_singleshot_timer. */
150 /* Require the timeout to be in the future (return -ETIME if it's passed). */ 154 /* Require the timeout to be in the future (return -ETIME if it's passed). */
@@ -164,5 +168,6 @@ struct vcpu_register_vcpu_info {
164 uint32_t offset; /* offset within page */ 168 uint32_t offset; /* offset within page */
165 uint32_t rsvd; /* unused */ 169 uint32_t rsvd; /* unused */
166}; 170};
171DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
167 172
168#endif /* __XEN_PUBLIC_VCPU_H__ */ 173#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 518a5bf79ed3..9b018da48cf3 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -58,6 +58,16 @@
58#define __HYPERVISOR_physdev_op 33 58#define __HYPERVISOR_physdev_op 33
59#define __HYPERVISOR_hvm_op 34 59#define __HYPERVISOR_hvm_op 34
60 60
61/* Architecture-specific hypercall definitions. */
62#define __HYPERVISOR_arch_0 48
63#define __HYPERVISOR_arch_1 49
64#define __HYPERVISOR_arch_2 50
65#define __HYPERVISOR_arch_3 51
66#define __HYPERVISOR_arch_4 52
67#define __HYPERVISOR_arch_5 53
68#define __HYPERVISOR_arch_6 54
69#define __HYPERVISOR_arch_7 55
70
61/* 71/*
62 * VIRTUAL INTERRUPTS 72 * VIRTUAL INTERRUPTS
63 * 73 *
@@ -68,8 +78,18 @@
68#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */ 78#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
69#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */ 79#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
70#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */ 80#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
71#define NR_VIRQS 8
72 81
82/* Architecture-specific VIRQ definitions. */
83#define VIRQ_ARCH_0 16
84#define VIRQ_ARCH_1 17
85#define VIRQ_ARCH_2 18
86#define VIRQ_ARCH_3 19
87#define VIRQ_ARCH_4 20
88#define VIRQ_ARCH_5 21
89#define VIRQ_ARCH_6 22
90#define VIRQ_ARCH_7 23
91
92#define NR_VIRQS 24
73/* 93/*
74 * MMU-UPDATE REQUESTS 94 * MMU-UPDATE REQUESTS
75 * 95 *
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
new file mode 100644
index 000000000000..ac45e0712afa
--- /dev/null
+++ b/include/xen/interface/xencomm.h
@@ -0,0 +1,41 @@
1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (C) IBM Corp. 2006
21 */
22
23#ifndef _XEN_XENCOMM_H_
24#define _XEN_XENCOMM_H_
25
26/* A xencomm descriptor is a scatter/gather list containing physical
27 * addresses corresponding to a virtually contiguous memory area. The
28 * hypervisor translates these physical addresses to machine addresses to copy
29 * to and from the virtually contiguous area.
30 */
31
32#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
33#define XENCOMM_INVALID (~0UL)
34
35struct xencomm_desc {
36 uint32_t magic;
37 uint32_t nr_addrs; /* the number of entries in address[] */
38 uint64_t address[0];
39};
40
41#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/page.h b/include/xen/page.h
index 031ef22a971e..eaf85fab1263 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -1,180 +1 @@
1#ifndef __XEN_PAGE_H #include <asm/xen/page.h>
2#define __XEN_PAGE_H
3
4#include <linux/pfn.h>
5
6#include <asm/uaccess.h>
7#include <asm/pgtable.h>
8
9#include <xen/features.h>
10
11#ifdef CONFIG_X86_PAE
12/* Xen machine address */
13typedef struct xmaddr {
14 unsigned long long maddr;
15} xmaddr_t;
16
17/* Xen pseudo-physical address */
18typedef struct xpaddr {
19 unsigned long long paddr;
20} xpaddr_t;
21#else
22/* Xen machine address */
23typedef struct xmaddr {
24 unsigned long maddr;
25} xmaddr_t;
26
27/* Xen pseudo-physical address */
28typedef struct xpaddr {
29 unsigned long paddr;
30} xpaddr_t;
31#endif
32
33#define XMADDR(x) ((xmaddr_t) { .maddr = (x) })
34#define XPADDR(x) ((xpaddr_t) { .paddr = (x) })
35
36/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
37#define INVALID_P2M_ENTRY (~0UL)
38#define FOREIGN_FRAME_BIT (1UL<<31)
39#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
40
41extern unsigned long *phys_to_machine_mapping;
42
43static inline unsigned long pfn_to_mfn(unsigned long pfn)
44{
45 if (xen_feature(XENFEAT_auto_translated_physmap))
46 return pfn;
47
48 return phys_to_machine_mapping[(unsigned int)(pfn)] &
49 ~FOREIGN_FRAME_BIT;
50}
51
52static inline int phys_to_machine_mapping_valid(unsigned long pfn)
53{
54 if (xen_feature(XENFEAT_auto_translated_physmap))
55 return 1;
56
57 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
58}
59
60static inline unsigned long mfn_to_pfn(unsigned long mfn)
61{
62 unsigned long pfn;
63
64 if (xen_feature(XENFEAT_auto_translated_physmap))
65 return mfn;
66
67#if 0
68 if (unlikely((mfn >> machine_to_phys_order) != 0))
69 return max_mapnr;
70#endif
71
72 pfn = 0;
73 /*
74 * The array access can fail (e.g., device space beyond end of RAM).
75 * In such cases it doesn't matter what we return (we return garbage),
76 * but we must handle the fault without crashing!
77 */
78 __get_user(pfn, &machine_to_phys_mapping[mfn]);
79
80 return pfn;
81}
82
83static inline xmaddr_t phys_to_machine(xpaddr_t phys)
84{
85 unsigned offset = phys.paddr & ~PAGE_MASK;
86 return XMADDR(PFN_PHYS((u64)pfn_to_mfn(PFN_DOWN(phys.paddr))) | offset);
87}
88
89static inline xpaddr_t machine_to_phys(xmaddr_t machine)
90{
91 unsigned offset = machine.maddr & ~PAGE_MASK;
92 return XPADDR(PFN_PHYS((u64)mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
93}
94
95/*
96 * We detect special mappings in one of two ways:
97 * 1. If the MFN is an I/O page then Xen will set the m2p entry
98 * to be outside our maximum possible pseudophys range.
99 * 2. If the MFN belongs to a different domain then we will certainly
100 * not have MFN in our p2m table. Conversely, if the page is ours,
101 * then we'll have p2m(m2p(MFN))==MFN.
102 * If we detect a special mapping then it doesn't have a 'struct page'.
103 * We force !pfn_valid() by returning an out-of-range pointer.
104 *
105 * NB. These checks require that, for any MFN that is not in our reservation,
106 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
107 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
108 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
109 *
110 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
111 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
112 * require. In all the cases we care about, the FOREIGN_FRAME bit is
113 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
114 */
115static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
116{
117 extern unsigned long max_mapnr;
118 unsigned long pfn = mfn_to_pfn(mfn);
119 if ((pfn < max_mapnr)
120 && !xen_feature(XENFEAT_auto_translated_physmap)
121 && (phys_to_machine_mapping[pfn] != mfn))
122 return max_mapnr; /* force !pfn_valid() */
123 return pfn;
124}
125
126static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
127{
128 if (xen_feature(XENFEAT_auto_translated_physmap)) {
129 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
130 return;
131 }
132 phys_to_machine_mapping[pfn] = mfn;
133}
134
135/* VIRT <-> MACHINE conversion */
136#define virt_to_machine(v) (phys_to_machine(XPADDR(__pa(v))))
137#define virt_to_mfn(v) (pfn_to_mfn(PFN_DOWN(__pa(v))))
138#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
139
140#ifdef CONFIG_X86_PAE
141#define pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
142 (((_pte).pte_high & 0xfff) << (32-PAGE_SHIFT)))
143
144static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
145{
146 pte_t pte;
147
148 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) |
149 (pgprot_val(pgprot) >> 32);
150 pte.pte_high &= (__supported_pte_mask >> 32);
151 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
152 pte.pte_low &= __supported_pte_mask;
153
154 return pte;
155}
156
157static inline unsigned long long pte_val_ma(pte_t x)
158{
159 return x.pte;
160}
161#define pmd_val_ma(v) ((v).pmd)
162#define pud_val_ma(v) ((v).pgd.pgd)
163#define __pte_ma(x) ((pte_t) { .pte = (x) })
164#define __pmd_ma(x) ((pmd_t) { (x) } )
165#else /* !X86_PAE */
166#define pte_mfn(_pte) ((_pte).pte_low >> PAGE_SHIFT)
167#define mfn_pte(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
168#define pte_val_ma(x) ((x).pte)
169#define pmd_val_ma(v) ((v).pud.pgd.pgd)
170#define __pte_ma(x) ((pte_t) { (x) } )
171#endif /* CONFIG_X86_PAE */
172
173#define pgd_val_ma(x) ((x).pgd)
174
175
176xmaddr_t arbitrary_virt_to_machine(unsigned long address);
177void make_lowmem_page_readonly(void *vaddr);
178void make_lowmem_page_readwrite(void *vaddr);
179
180#endif /* __XEN_PAGE_H */
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
new file mode 100644
index 000000000000..10ddfe0142d0
--- /dev/null
+++ b/include/xen/xen-ops.h
@@ -0,0 +1,8 @@
1#ifndef INCLUDE_XEN_OPS_H
2#define INCLUDE_XEN_OPS_H
3
4#include <linux/percpu.h>
5
6DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
7
8#endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 6f7c290651ae..6369d89c25d5 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -97,6 +97,7 @@ struct xenbus_driver {
97 int (*uevent)(struct xenbus_device *, char **, int, char *, int); 97 int (*uevent)(struct xenbus_device *, char **, int, char *, int);
98 struct device_driver driver; 98 struct device_driver driver;
99 int (*read_otherend_details)(struct xenbus_device *dev); 99 int (*read_otherend_details)(struct xenbus_device *dev);
100 int (*is_ready)(struct xenbus_device *dev);
100}; 101};
101 102
102static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv) 103static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
new file mode 100644
index 000000000000..e43b039be112
--- /dev/null
+++ b/include/xen/xencomm.h
@@ -0,0 +1,77 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 *
16 * Copyright (C) IBM Corp. 2006
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Jerone Young <jyoung5@us.ibm.com>
20 */
21
22#ifndef _LINUX_XENCOMM_H_
23#define _LINUX_XENCOMM_H_
24
25#include <xen/interface/xencomm.h>
26
27#define XENCOMM_MINI_ADDRS 3
28struct xencomm_mini {
29 struct xencomm_desc _desc;
30 uint64_t address[XENCOMM_MINI_ADDRS];
31};
32
33/* To avoid additionnal virt to phys conversion, an opaque structure is
34 presented. */
35struct xencomm_handle;
36
37extern void xencomm_free(struct xencomm_handle *desc);
38extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
39extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
40 unsigned long bytes, struct xencomm_mini *xc_area);
41
42#if 0
43#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
44 struct xencomm_mini xc_desc ## _base[(n)] \
45 __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
46 struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
47#else
48/*
49 * gcc bug workaround:
50 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
51 * gcc doesn't handle properly stack variable with
52 * __attribute__((__align__(sizeof(struct xencomm_mini))))
53 */
54#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
55 unsigned char xc_desc ## _base[((n) + 1 ) * \
56 sizeof(struct xencomm_mini)]; \
57 struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
58 ((unsigned long)xc_desc ## _base + \
59 (sizeof(struct xencomm_mini) - \
60 ((unsigned long)xc_desc ## _base) % \
61 sizeof(struct xencomm_mini)));
62#endif
63#define xencomm_map_no_alloc(ptr, bytes) \
64 ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
65 __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
66
67/* provided by architecture code: */
68extern unsigned long xencomm_vtop(unsigned long vaddr);
69
70static inline void *xencomm_pa(void *ptr)
71{
72 return (void *)xencomm_vtop((unsigned long)ptr);
73}
74
75#define xen_guest_handle(hnd) ((hnd).p)
76
77#endif /* _LINUX_XENCOMM_H_ */