aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/xen/page.h15
-rw-r--r--arch/arm/xen/p2m.c32
-rw-r--r--arch/x86/include/asm/xen/page.h11
-rw-r--r--arch/x86/xen/p2m.c121
-rw-r--r--drivers/xen/grant-table.c73
5 files changed, 156 insertions, 96 deletions
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index e0965abacb7d..cf4f3e867395 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -97,16 +97,13 @@ static inline pte_t *lookup_address(unsigned long address, unsigned int *level)
97 return NULL; 97 return NULL;
98} 98}
99 99
100static inline int m2p_add_override(unsigned long mfn, struct page *page, 100extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
101 struct gnttab_map_grant_ref *kmap_op) 101 struct gnttab_map_grant_ref *kmap_ops,
102{ 102 struct page **pages, unsigned int count);
103 return 0;
104}
105 103
106static inline int m2p_remove_override(struct page *page, bool clear_pte) 104extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
107{ 105 struct gnttab_map_grant_ref *kmap_ops,
108 return 0; 106 struct page **pages, unsigned int count);
109}
110 107
111bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 108bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
112bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 109bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn,
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index b31ee1b275b0..97baf4427817 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -146,6 +146,38 @@ unsigned long __mfn_to_pfn(unsigned long mfn)
146} 146}
147EXPORT_SYMBOL_GPL(__mfn_to_pfn); 147EXPORT_SYMBOL_GPL(__mfn_to_pfn);
148 148
149int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
150 struct gnttab_map_grant_ref *kmap_ops,
151 struct page **pages, unsigned int count)
152{
153 int i;
154
155 for (i = 0; i < count; i++) {
156 if (map_ops[i].status)
157 continue;
158 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
159 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
160 }
161
162 return 0;
163}
164EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
165
166int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
167 struct gnttab_map_grant_ref *kmap_ops,
168 struct page **pages, unsigned int count)
169{
170 int i;
171
172 for (i = 0; i < count; i++) {
173 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
174 INVALID_P2M_ENTRY);
175 }
176
177 return 0;
178}
179EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
180
149bool __set_phys_to_machine_multi(unsigned long pfn, 181bool __set_phys_to_machine_multi(unsigned long pfn,
150 unsigned long mfn, unsigned long nr_pages) 182 unsigned long mfn, unsigned long nr_pages)
151{ 183{
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 3e276eb23d1b..c949923a5668 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -49,10 +49,17 @@ extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49extern unsigned long set_phys_range_identity(unsigned long pfn_s, 49extern unsigned long set_phys_range_identity(unsigned long pfn_s,
50 unsigned long pfn_e); 50 unsigned long pfn_e);
51 51
52extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
53 struct gnttab_map_grant_ref *kmap_ops,
54 struct page **pages, unsigned int count);
52extern int m2p_add_override(unsigned long mfn, struct page *page, 55extern int m2p_add_override(unsigned long mfn, struct page *page,
53 struct gnttab_map_grant_ref *kmap_op); 56 struct gnttab_map_grant_ref *kmap_op);
57extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
58 struct gnttab_map_grant_ref *kmap_ops,
59 struct page **pages, unsigned int count);
54extern int m2p_remove_override(struct page *page, 60extern int m2p_remove_override(struct page *page,
55 struct gnttab_map_grant_ref *kmap_op); 61 struct gnttab_map_grant_ref *kmap_op,
62 unsigned long mfn);
56extern struct page *m2p_find_override(unsigned long mfn); 63extern struct page *m2p_find_override(unsigned long mfn);
57extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 64extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
58 65
@@ -121,7 +128,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
121 pfn = m2p_find_override_pfn(mfn, ~0); 128 pfn = m2p_find_override_pfn(mfn, ~0);
122 } 129 }
123 130
124 /* 131 /*
125 * pfn is ~0 if there are no entries in the m2p for mfn or if the 132 * pfn is ~0 if there are no entries in the m2p for mfn or if the
126 * entry doesn't map back to the mfn and m2p_override doesn't have a 133 * entry doesn't map back to the mfn and m2p_override doesn't have a
127 * valid entry for it. 134 * valid entry for it.
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 696c694986d0..85e5d78c9874 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -881,6 +881,65 @@ static unsigned long mfn_hash(unsigned long mfn)
881 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT); 881 return hash_long(mfn, M2P_OVERRIDE_HASH_SHIFT);
882} 882}
883 883
884int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
885 struct gnttab_map_grant_ref *kmap_ops,
886 struct page **pages, unsigned int count)
887{
888 int i, ret = 0;
889 bool lazy = false;
890 pte_t *pte;
891
892 if (xen_feature(XENFEAT_auto_translated_physmap))
893 return 0;
894
895 if (kmap_ops &&
896 !in_interrupt() &&
897 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
898 arch_enter_lazy_mmu_mode();
899 lazy = true;
900 }
901
902 for (i = 0; i < count; i++) {
903 unsigned long mfn, pfn;
904
905 /* Do not add to override if the map failed. */
906 if (map_ops[i].status)
907 continue;
908
909 if (map_ops[i].flags & GNTMAP_contains_pte) {
910 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
911 (map_ops[i].host_addr & ~PAGE_MASK));
912 mfn = pte_mfn(*pte);
913 } else {
914 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
915 }
916 pfn = page_to_pfn(pages[i]);
917
918 WARN_ON(PagePrivate(pages[i]));
919 SetPagePrivate(pages[i]);
920 set_page_private(pages[i], mfn);
921 pages[i]->index = pfn_to_mfn(pfn);
922
923 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
924 ret = -ENOMEM;
925 goto out;
926 }
927
928 if (kmap_ops) {
929 ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
930 if (ret)
931 goto out;
932 }
933 }
934
935out:
936 if (lazy)
937 arch_leave_lazy_mmu_mode();
938
939 return ret;
940}
941EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
942
884/* Add an MFN override for a particular page */ 943/* Add an MFN override for a particular page */
885int m2p_add_override(unsigned long mfn, struct page *page, 944int m2p_add_override(unsigned long mfn, struct page *page,
886 struct gnttab_map_grant_ref *kmap_op) 945 struct gnttab_map_grant_ref *kmap_op)
@@ -899,13 +958,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
899 "m2p_add_override: pfn %lx not mapped", pfn)) 958 "m2p_add_override: pfn %lx not mapped", pfn))
900 return -EINVAL; 959 return -EINVAL;
901 } 960 }
902 WARN_ON(PagePrivate(page));
903 SetPagePrivate(page);
904 set_page_private(page, mfn);
905 page->index = pfn_to_mfn(pfn);
906
907 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
908 return -ENOMEM;
909 961
910 if (kmap_op != NULL) { 962 if (kmap_op != NULL) {
911 if (!PageHighMem(page)) { 963 if (!PageHighMem(page)) {
@@ -943,20 +995,62 @@ int m2p_add_override(unsigned long mfn, struct page *page,
943 return 0; 995 return 0;
944} 996}
945EXPORT_SYMBOL_GPL(m2p_add_override); 997EXPORT_SYMBOL_GPL(m2p_add_override);
998
999int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
1000 struct gnttab_map_grant_ref *kmap_ops,
1001 struct page **pages, unsigned int count)
1002{
1003 int i, ret = 0;
1004 bool lazy = false;
1005
1006 if (xen_feature(XENFEAT_auto_translated_physmap))
1007 return 0;
1008
1009 if (kmap_ops &&
1010 !in_interrupt() &&
1011 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1012 arch_enter_lazy_mmu_mode();
1013 lazy = true;
1014 }
1015
1016 for (i = 0; i < count; i++) {
1017 unsigned long mfn = get_phys_to_machine(page_to_pfn(pages[i]));
1018 unsigned long pfn = page_to_pfn(pages[i]);
1019
1020 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
1021 ret = -EINVAL;
1022 goto out;
1023 }
1024
1025 set_page_private(pages[i], INVALID_P2M_ENTRY);
1026 WARN_ON(!PagePrivate(pages[i]));
1027 ClearPagePrivate(pages[i]);
1028 set_phys_to_machine(pfn, pages[i]->index);
1029
1030 if (kmap_ops)
1031 ret = m2p_remove_override(pages[i], &kmap_ops[i], mfn);
1032 if (ret)
1033 goto out;
1034 }
1035
1036out:
1037 if (lazy)
1038 arch_leave_lazy_mmu_mode();
1039 return ret;
1040}
1041EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
1042
946int m2p_remove_override(struct page *page, 1043int m2p_remove_override(struct page *page,
947 struct gnttab_map_grant_ref *kmap_op) 1044 struct gnttab_map_grant_ref *kmap_op,
1045 unsigned long mfn)
948{ 1046{
949 unsigned long flags; 1047 unsigned long flags;
950 unsigned long mfn;
951 unsigned long pfn; 1048 unsigned long pfn;
952 unsigned long uninitialized_var(address); 1049 unsigned long uninitialized_var(address);
953 unsigned level; 1050 unsigned level;
954 pte_t *ptep = NULL; 1051 pte_t *ptep = NULL;
955 1052
956 pfn = page_to_pfn(page); 1053 pfn = page_to_pfn(page);
957 mfn = get_phys_to_machine(pfn);
958 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
959 return -EINVAL;
960 1054
961 if (!PageHighMem(page)) { 1055 if (!PageHighMem(page)) {
962 address = (unsigned long)__va(pfn << PAGE_SHIFT); 1056 address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -970,10 +1064,7 @@ int m2p_remove_override(struct page *page,
970 spin_lock_irqsave(&m2p_override_lock, flags); 1064 spin_lock_irqsave(&m2p_override_lock, flags);
971 list_del(&page->lru); 1065 list_del(&page->lru);
972 spin_unlock_irqrestore(&m2p_override_lock, flags); 1066 spin_unlock_irqrestore(&m2p_override_lock, flags);
973 WARN_ON(!PagePrivate(page));
974 ClearPagePrivate(page);
975 1067
976 set_phys_to_machine(pfn, page->index);
977 if (kmap_op != NULL) { 1068 if (kmap_op != NULL) {
978 if (!PageHighMem(page)) { 1069 if (!PageHighMem(page)) {
979 struct multicall_space mcs; 1070 struct multicall_space mcs;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b84e3ab839aa..6d325bda76da 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -933,9 +933,6 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
933 struct page **pages, unsigned int count) 933 struct page **pages, unsigned int count)
934{ 934{
935 int i, ret; 935 int i, ret;
936 bool lazy = false;
937 pte_t *pte;
938 unsigned long mfn;
939 936
940 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 937 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
941 if (ret) 938 if (ret)
@@ -947,45 +944,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
947 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 944 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
948 &map_ops[i].status, __func__); 945 &map_ops[i].status, __func__);
949 946
950 /* this is basically a nop on x86 */ 947 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
951 if (xen_feature(XENFEAT_auto_translated_physmap)) {
952 for (i = 0; i < count; i++) {
953 if (map_ops[i].status)
954 continue;
955 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
956 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
957 }
958 return ret;
959 }
960
961 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
962 arch_enter_lazy_mmu_mode();
963 lazy = true;
964 }
965
966 for (i = 0; i < count; i++) {
967 /* Do not add to override if the map failed. */
968 if (map_ops[i].status)
969 continue;
970
971 if (map_ops[i].flags & GNTMAP_contains_pte) {
972 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
973 (map_ops[i].host_addr & ~PAGE_MASK));
974 mfn = pte_mfn(*pte);
975 } else {
976 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
977 }
978 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
979 &kmap_ops[i] : NULL);
980 if (ret)
981 goto out;
982 }
983
984 out:
985 if (lazy)
986 arch_leave_lazy_mmu_mode();
987
988 return ret;
989} 948}
990EXPORT_SYMBOL_GPL(gnttab_map_refs); 949EXPORT_SYMBOL_GPL(gnttab_map_refs);
991 950
@@ -993,39 +952,13 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
993 struct gnttab_map_grant_ref *kmap_ops, 952 struct gnttab_map_grant_ref *kmap_ops,
994 struct page **pages, unsigned int count) 953 struct page **pages, unsigned int count)
995{ 954{
996 int i, ret; 955 int ret;
997 bool lazy = false;
998 956
999 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 957 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1000 if (ret) 958 if (ret)
1001 return ret; 959 return ret;
1002 960
1003 /* this is basically a nop on x86 */ 961 return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
1004 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1005 for (i = 0; i < count; i++) {
1006 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
1007 INVALID_P2M_ENTRY);
1008 }
1009 return ret;
1010 }
1011
1012 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1013 arch_enter_lazy_mmu_mode();
1014 lazy = true;
1015 }
1016
1017 for (i = 0; i < count; i++) {
1018 ret = m2p_remove_override(pages[i], kmap_ops ?
1019 &kmap_ops[i] : NULL);
1020 if (ret)
1021 goto out;
1022 }
1023
1024 out:
1025 if (lazy)
1026 arch_leave_lazy_mmu_mode();
1027
1028 return ret;
1029} 962}
1030EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 963EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1031 964