aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/lguest/page_tables.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lguest/page_tables.c')
-rw-r--r--drivers/lguest/page_tables.c78
1 files changed, 53 insertions, 25 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d1a5de45be02..19611b0551cd 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -7,7 +7,7 @@
7 * converted Guest pages when running the Guest. 7 * converted Guest pages when running the Guest.
8:*/ 8:*/
9 9
10/* Copyright (C) Rusty Russell IBM Corporation 2006. 10/* Copyright (C) Rusty Russell IBM Corporation 2013.
11 * GPL v2 and any later version */ 11 * GPL v2 and any later version */
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
@@ -731,6 +731,9 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
731 /* Release all the non-kernel mappings. */ 731 /* Release all the non-kernel mappings. */
732 flush_user_mappings(cpu->lg, next); 732 flush_user_mappings(cpu->lg, next);
733 733
734 /* This hasn't run on any CPU at all. */
735 cpu->lg->pgdirs[next].last_host_cpu = -1;
736
734 return next; 737 return next;
735} 738}
736 739
@@ -790,6 +793,7 @@ static void release_all_pagetables(struct lguest *lg)
790 for (j = 0; j < PTRS_PER_PGD; j++) 793 for (j = 0; j < PTRS_PER_PGD; j++)
791 release_pgd(lg->pgdirs[i].pgdir + j); 794 release_pgd(lg->pgdirs[i].pgdir + j);
792 lg->pgdirs[i].switcher_mapped = false; 795 lg->pgdirs[i].switcher_mapped = false;
796 lg->pgdirs[i].last_host_cpu = -1;
793 } 797 }
794} 798}
795 799
@@ -1086,37 +1090,62 @@ void free_guest_pagetable(struct lguest *lg)
1086 free_page((long)lg->pgdirs[i].pgdir); 1090 free_page((long)lg->pgdirs[i].pgdir);
1087} 1091}
1088 1092
1093/*H:481
1094 * This clears the Switcher mappings for cpu #i.
1095 */
1096static void remove_switcher_percpu_map(struct lg_cpu *cpu, unsigned int i)
1097{
1098 unsigned long base = switcher_addr + PAGE_SIZE + i * PAGE_SIZE*2;
1099 pte_t *pte;
1100
1101 /* Clear the mappings for both pages. */
1102 pte = find_spte(cpu, base, false, 0, 0);
1103 release_pte(*pte);
1104 set_pte(pte, __pte(0));
1105
1106 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1107 release_pte(*pte);
1108 set_pte(pte, __pte(0));
1109}
1110
1089/*H:480 1111/*H:480
1090 * (vi) Mapping the Switcher when the Guest is about to run. 1112 * (vi) Mapping the Switcher when the Guest is about to run.
1091 * 1113 *
1092 * The Switcher and the two pages for this CPU need to be visible in the 1114 * The Switcher and the two pages for this CPU need to be visible in the Guest
1093 * Guest (and not the pages for other CPUs). 1115 * (and not the pages for other CPUs).
1094 * 1116 *
1095 * The pages have all been allocate 1117 * The pages for the pagetables have all been allocated before: we just need
1118 * to make sure the actual PTEs are up-to-date for the CPU we're about to run
1119 * on.
1096 */ 1120 */
1097void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 1121void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1098{ 1122{
1099 unsigned long base, i; 1123 unsigned long base;
1100 struct page *percpu_switcher_page, *regs_page; 1124 struct page *percpu_switcher_page, *regs_page;
1101 pte_t *pte; 1125 pte_t *pte;
1126 struct pgdir *pgdir = &cpu->lg->pgdirs[cpu->cpu_pgd];
1102 1127
1103 /* Switcher page should always be mapped! */ 1128 /* Switcher page should always be mapped by now! */
1104 BUG_ON(!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped); 1129 BUG_ON(!pgdir->switcher_mapped);
1105
1106 /* Clear all the Switcher mappings for any other CPUs. */
1107 /* FIXME: This is dumb: update only when Host CPU changes. */
1108 for_each_possible_cpu(i) {
1109 /* Get location of lguest_pages (indexed by Host CPU) */
1110 base = switcher_addr + PAGE_SIZE
1111 + i * sizeof(struct lguest_pages);
1112 1130
1113 /* Get shadow PTE for first page (where we put guest regs). */ 1131 /*
1114 pte = find_spte(cpu, base, false, 0, 0); 1132 * Remember that we have two pages for each Host CPU, so we can run a
1115 set_pte(pte, __pte(0)); 1133 * Guest on each CPU without them interfering. We need to make sure
1134 * those pages are mapped correctly in the Guest, but since we usually
1135 * run on the same CPU, we cache that, and only update the mappings
1136 * when we move.
1137 */
1138 if (pgdir->last_host_cpu == raw_smp_processor_id())
1139 return;
1116 1140
1117 /* This is where we put R/O state. */ 1141 /* -1 means unknown so we remove everything. */
1118 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0); 1142 if (pgdir->last_host_cpu == -1) {
1119 set_pte(pte, __pte(0)); 1143 unsigned int i;
1144 for_each_possible_cpu(i)
1145 remove_switcher_percpu_map(cpu, i);
1146 } else {
1147 /* We know exactly what CPU mapping to remove. */
1148 remove_switcher_percpu_map(cpu, pgdir->last_host_cpu);
1120 } 1149 }
1121 1150
1122 /* 1151 /*
@@ -1140,18 +1169,17 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
1140 * the Guest: the IDT, GDT and other things it's not supposed to 1169 * the Guest: the IDT, GDT and other things it's not supposed to
1141 * change. 1170 * change.
1142 */ 1171 */
1143 base += PAGE_SIZE; 1172 pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
1144 pte = find_spte(cpu, base, false, 0, 0);
1145
1146 percpu_switcher_page 1173 percpu_switcher_page
1147 = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1]; 1174 = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
1148 get_page(percpu_switcher_page); 1175 get_page(percpu_switcher_page);
1149 set_pte(pte, mk_pte(percpu_switcher_page, 1176 set_pte(pte, mk_pte(percpu_switcher_page,
1150 __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL))); 1177 __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
1178
1179 pgdir->last_host_cpu = raw_smp_processor_id();
1151} 1180}
1152/*:*/
1153 1181
1154/* 1182/*H:490
1155 * We've made it through the page table code. Perhaps our tired brains are 1183 * We've made it through the page table code. Perhaps our tired brains are
1156 * still processing the details, or perhaps we're simply glad it's over. 1184 * still processing the details, or perhaps we're simply glad it's over.
1157 * 1185 *