aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/xen/p2m.c17
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--include/xen/grant_table.h8
6 files changed, 101 insertions, 46 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 3e276eb23d1b..787e1bb5aafc 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -52,7 +52,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
52extern int m2p_add_override(unsigned long mfn, struct page *page, 52extern int m2p_add_override(unsigned long mfn, struct page *page,
53 struct gnttab_map_grant_ref *kmap_op); 53 struct gnttab_map_grant_ref *kmap_op);
54extern int m2p_remove_override(struct page *page, 54extern int m2p_remove_override(struct page *page,
55 struct gnttab_map_grant_ref *kmap_op); 55 struct gnttab_map_grant_ref *kmap_op,
56 unsigned long mfn);
56extern struct page *m2p_find_override(unsigned long mfn); 57extern struct page *m2p_find_override(unsigned long mfn);
57extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); 58extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
58 59
@@ -121,7 +122,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
121 pfn = m2p_find_override_pfn(mfn, ~0); 122 pfn = m2p_find_override_pfn(mfn, ~0);
122 } 123 }
123 124
124 /* 125 /*
125 * pfn is ~0 if there are no entries in the m2p for mfn or if the 126 * pfn is ~0 if there are no entries in the m2p for mfn or if the
126 * entry doesn't map back to the mfn and m2p_override doesn't have a 127 * entry doesn't map back to the mfn and m2p_override doesn't have a
127 * valid entry for it. 128 * valid entry for it.
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 696c694986d0..8009acbe41e4 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -899,13 +899,6 @@ int m2p_add_override(unsigned long mfn, struct page *page,
899 "m2p_add_override: pfn %lx not mapped", pfn)) 899 "m2p_add_override: pfn %lx not mapped", pfn))
900 return -EINVAL; 900 return -EINVAL;
901 } 901 }
902 WARN_ON(PagePrivate(page));
903 SetPagePrivate(page);
904 set_page_private(page, mfn);
905 page->index = pfn_to_mfn(pfn);
906
907 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
908 return -ENOMEM;
909 902
910 if (kmap_op != NULL) { 903 if (kmap_op != NULL) {
911 if (!PageHighMem(page)) { 904 if (!PageHighMem(page)) {
@@ -944,19 +937,16 @@ int m2p_add_override(unsigned long mfn, struct page *page,
944} 937}
945EXPORT_SYMBOL_GPL(m2p_add_override); 938EXPORT_SYMBOL_GPL(m2p_add_override);
946int m2p_remove_override(struct page *page, 939int m2p_remove_override(struct page *page,
947 struct gnttab_map_grant_ref *kmap_op) 940 struct gnttab_map_grant_ref *kmap_op,
941 unsigned long mfn)
948{ 942{
949 unsigned long flags; 943 unsigned long flags;
950 unsigned long mfn;
951 unsigned long pfn; 944 unsigned long pfn;
952 unsigned long uninitialized_var(address); 945 unsigned long uninitialized_var(address);
953 unsigned level; 946 unsigned level;
954 pte_t *ptep = NULL; 947 pte_t *ptep = NULL;
955 948
956 pfn = page_to_pfn(page); 949 pfn = page_to_pfn(page);
957 mfn = get_phys_to_machine(pfn);
958 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
959 return -EINVAL;
960 950
961 if (!PageHighMem(page)) { 951 if (!PageHighMem(page)) {
962 address = (unsigned long)__va(pfn << PAGE_SHIFT); 952 address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -970,10 +960,7 @@ int m2p_remove_override(struct page *page,
970 spin_lock_irqsave(&m2p_override_lock, flags); 960 spin_lock_irqsave(&m2p_override_lock, flags);
971 list_del(&page->lru); 961 list_del(&page->lru);
972 spin_unlock_irqrestore(&m2p_override_lock, flags); 962 spin_unlock_irqrestore(&m2p_override_lock, flags);
973 WARN_ON(!PagePrivate(page));
974 ClearPagePrivate(page);
975 963
976 set_phys_to_machine(pfn, page->index);
977 if (kmap_op != NULL) { 964 if (kmap_op != NULL) {
978 if (!PageHighMem(page)) { 965 if (!PageHighMem(page)) {
979 struct multicall_space mcs; 966 struct multicall_space mcs;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..875025f299b6 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,8 +285,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
285 285
286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || 286 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287 !rb_next(&persistent_gnt->node)) { 287 !rb_next(&persistent_gnt->node)) {
288 ret = gnttab_unmap_refs(unmap, NULL, pages, 288 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
289 segs_to_unmap);
290 BUG_ON(ret); 289 BUG_ON(ret);
291 put_free_pages(blkif, pages, segs_to_unmap); 290 put_free_pages(blkif, pages, segs_to_unmap);
292 segs_to_unmap = 0; 291 segs_to_unmap = 0;
@@ -321,8 +320,7 @@ static void unmap_purged_grants(struct work_struct *work)
321 pages[segs_to_unmap] = persistent_gnt->page; 320 pages[segs_to_unmap] = persistent_gnt->page;
322 321
323 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 322 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324 ret = gnttab_unmap_refs(unmap, NULL, pages, 323 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
325 segs_to_unmap);
326 BUG_ON(ret); 324 BUG_ON(ret);
327 put_free_pages(blkif, pages, segs_to_unmap); 325 put_free_pages(blkif, pages, segs_to_unmap);
328 segs_to_unmap = 0; 326 segs_to_unmap = 0;
@@ -330,7 +328,7 @@ static void unmap_purged_grants(struct work_struct *work)
330 kfree(persistent_gnt); 328 kfree(persistent_gnt);
331 } 329 }
332 if (segs_to_unmap > 0) { 330 if (segs_to_unmap > 0) {
333 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); 331 ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
334 BUG_ON(ret); 332 BUG_ON(ret);
335 put_free_pages(blkif, pages, segs_to_unmap); 333 put_free_pages(blkif, pages, segs_to_unmap);
336 } 334 }
@@ -670,15 +668,14 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
670 GNTMAP_host_map, pages[i]->handle); 668 GNTMAP_host_map, pages[i]->handle);
671 pages[i]->handle = BLKBACK_INVALID_HANDLE; 669 pages[i]->handle = BLKBACK_INVALID_HANDLE;
672 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { 670 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
673 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, 671 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
674 invcount);
675 BUG_ON(ret); 672 BUG_ON(ret);
676 put_free_pages(blkif, unmap_pages, invcount); 673 put_free_pages(blkif, unmap_pages, invcount);
677 invcount = 0; 674 invcount = 0;
678 } 675 }
679 } 676 }
680 if (invcount) { 677 if (invcount) {
681 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); 678 ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
682 BUG_ON(ret); 679 BUG_ON(ret);
683 put_free_pages(blkif, unmap_pages, invcount); 680 put_free_pages(blkif, unmap_pages, invcount);
684 } 681 }
@@ -740,7 +737,7 @@ again:
740 } 737 }
741 738
742 if (segs_to_map) { 739 if (segs_to_map) {
743 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); 740 ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
744 BUG_ON(ret); 741 BUG_ON(ret);
745 } 742 }
746 743
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 073b4a19a8b0..34a2704fbc88 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -284,8 +284,10 @@ static int map_grant_pages(struct grant_map *map)
284 } 284 }
285 285
286 pr_debug("map %d+%d\n", map->index, map->count); 286 pr_debug("map %d+%d\n", map->index, map->count);
287 err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, 287 err = gnttab_map_refs_userspace(map->map_ops,
288 map->pages, map->count); 288 use_ptemod ? map->kmap_ops : NULL,
289 map->pages,
290 map->count);
289 if (err) 291 if (err)
290 return err; 292 return err;
291 293
@@ -315,9 +317,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
315 } 317 }
316 } 318 }
317 319
318 err = gnttab_unmap_refs(map->unmap_ops + offset, 320 err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
319 use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, 321 use_ptemod ? map->kmap_ops + offset : NULL,
320 pages); 322 map->pages + offset,
323 pages);
321 if (err) 324 if (err)
322 return err; 325 return err;
323 326
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index b84e3ab839aa..8ee13e2e45e2 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -928,15 +928,17 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
928} 928}
929EXPORT_SYMBOL_GPL(gnttab_batch_copy); 929EXPORT_SYMBOL_GPL(gnttab_batch_copy);
930 930
931int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 931int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
932 struct gnttab_map_grant_ref *kmap_ops, 932 struct gnttab_map_grant_ref *kmap_ops,
933 struct page **pages, unsigned int count) 933 struct page **pages, unsigned int count,
934 bool m2p_override)
934{ 935{
935 int i, ret; 936 int i, ret;
936 bool lazy = false; 937 bool lazy = false;
937 pte_t *pte; 938 pte_t *pte;
938 unsigned long mfn; 939 unsigned long mfn, pfn;
939 940
941 BUG_ON(kmap_ops && !m2p_override);
940 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); 942 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
941 if (ret) 943 if (ret)
942 return ret; 944 return ret;
@@ -955,10 +957,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
955 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, 957 set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
956 map_ops[i].dev_bus_addr >> PAGE_SHIFT); 958 map_ops[i].dev_bus_addr >> PAGE_SHIFT);
957 } 959 }
958 return ret; 960 return 0;
959 } 961 }
960 962
961 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 963 if (m2p_override &&
964 !in_interrupt() &&
965 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
962 arch_enter_lazy_mmu_mode(); 966 arch_enter_lazy_mmu_mode();
963 lazy = true; 967 lazy = true;
964 } 968 }
@@ -975,8 +979,20 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
975 } else { 979 } else {
976 mfn = PFN_DOWN(map_ops[i].dev_bus_addr); 980 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
977 } 981 }
978 ret = m2p_add_override(mfn, pages[i], kmap_ops ? 982 pfn = page_to_pfn(pages[i]);
979 &kmap_ops[i] : NULL); 983
984 WARN_ON(PagePrivate(pages[i]));
985 SetPagePrivate(pages[i]);
986 set_page_private(pages[i], mfn);
987
988 pages[i]->index = pfn_to_mfn(pfn);
989 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
990 ret = -ENOMEM;
991 goto out;
992 }
993 if (m2p_override)
994 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
995 &kmap_ops[i] : NULL);
980 if (ret) 996 if (ret)
981 goto out; 997 goto out;
982 } 998 }
@@ -987,15 +1003,32 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
987 1003
988 return ret; 1004 return ret;
989} 1005}
1006
1007int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1008 struct page **pages, unsigned int count)
1009{
1010 return __gnttab_map_refs(map_ops, NULL, pages, count, false);
1011}
990EXPORT_SYMBOL_GPL(gnttab_map_refs); 1012EXPORT_SYMBOL_GPL(gnttab_map_refs);
991 1013
992int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 1014int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
1015 struct gnttab_map_grant_ref *kmap_ops,
1016 struct page **pages, unsigned int count)
1017{
1018 return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
1019}
1020EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
1021
1022int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
993 struct gnttab_map_grant_ref *kmap_ops, 1023 struct gnttab_map_grant_ref *kmap_ops,
994 struct page **pages, unsigned int count) 1024 struct page **pages, unsigned int count,
1025 bool m2p_override)
995{ 1026{
996 int i, ret; 1027 int i, ret;
997 bool lazy = false; 1028 bool lazy = false;
1029 unsigned long pfn, mfn;
998 1030
1031 BUG_ON(kmap_ops && !m2p_override);
999 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 1032 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1000 if (ret) 1033 if (ret)
1001 return ret; 1034 return ret;
@@ -1006,17 +1039,33 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1006 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, 1039 set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
1007 INVALID_P2M_ENTRY); 1040 INVALID_P2M_ENTRY);
1008 } 1041 }
1009 return ret; 1042 return 0;
1010 } 1043 }
1011 1044
1012 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { 1045 if (m2p_override &&
1046 !in_interrupt() &&
1047 paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
1013 arch_enter_lazy_mmu_mode(); 1048 arch_enter_lazy_mmu_mode();
1014 lazy = true; 1049 lazy = true;
1015 } 1050 }
1016 1051
1017 for (i = 0; i < count; i++) { 1052 for (i = 0; i < count; i++) {
1018 ret = m2p_remove_override(pages[i], kmap_ops ? 1053 pfn = page_to_pfn(pages[i]);
1019 &kmap_ops[i] : NULL); 1054 mfn = get_phys_to_machine(pfn);
1055 if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
1056 ret = -EINVAL;
1057 goto out;
1058 }
1059
1060 set_page_private(pages[i], INVALID_P2M_ENTRY);
1061 WARN_ON(!PagePrivate(pages[i]));
1062 ClearPagePrivate(pages[i]);
1063 set_phys_to_machine(pfn, pages[i]->index);
1064 if (m2p_override)
1065 ret = m2p_remove_override(pages[i],
1066 kmap_ops ?
1067 &kmap_ops[i] : NULL,
1068 mfn);
1020 if (ret) 1069 if (ret)
1021 goto out; 1070 goto out;
1022 } 1071 }
@@ -1027,8 +1076,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1027 1076
1028 return ret; 1077 return ret;
1029} 1078}
1079
1080int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
1081 struct page **pages, unsigned int count)
1082{
1083 return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
1084}
1030EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 1085EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1031 1086
1087int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
1088 struct gnttab_map_grant_ref *kmap_ops,
1089 struct page **pages, unsigned int count)
1090{
1091 return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
1092}
1093EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
1094
1032static unsigned nr_status_frames(unsigned nr_grant_frames) 1095static unsigned nr_status_frames(unsigned nr_grant_frames)
1033{ 1096{
1034 BUG_ON(grefs_per_grant_frame == 0); 1097 BUG_ON(grefs_per_grant_frame == 0);
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index a5af2a26d94f..7ad033dbc845 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,11 +191,15 @@ void gnttab_free_auto_xlat_frames(void);
191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) 191#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
192 192
193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, 193int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
194 struct gnttab_map_grant_ref *kmap_ops,
195 struct page **pages, unsigned int count); 194 struct page **pages, unsigned int count);
195int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
196 struct gnttab_map_grant_ref *kmap_ops,
197 struct page **pages, unsigned int count);
196int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, 198int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
197 struct gnttab_map_grant_ref *kunmap_ops,
198 struct page **pages, unsigned int count); 199 struct page **pages, unsigned int count);
200int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
201 struct gnttab_map_grant_ref *kunmap_ops,
202 struct page **pages, unsigned int count);
199 203
200/* Perform a batch of grant map/copy operations. Retry every batch slot 204/* Perform a batch of grant map/copy operations. Retry every batch slot
201 * for which the hypervisor returns GNTST_eagain. This is typically due 205 * for which the hypervisor returns GNTST_eagain. This is typically due