diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
| -rw-r--r-- | arch/x86/xen/mmu.c | 501 |
1 files changed, 425 insertions, 76 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f72d18c69221..9631c90907eb 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -57,6 +57,7 @@ | |||
| 57 | #include <asm/linkage.h> | 57 | #include <asm/linkage.h> |
| 58 | #include <asm/page.h> | 58 | #include <asm/page.h> |
| 59 | #include <asm/init.h> | 59 | #include <asm/init.h> |
| 60 | #include <asm/pat.h> | ||
| 60 | 61 | ||
| 61 | #include <asm/xen/hypercall.h> | 62 | #include <asm/xen/hypercall.h> |
| 62 | #include <asm/xen/hypervisor.h> | 63 | #include <asm/xen/hypervisor.h> |
| @@ -140,7 +141,8 @@ static inline void check_zero(void) | |||
| 140 | * large enough to allocate page table pages to allocate the rest. | 141 | * large enough to allocate page table pages to allocate the rest. |
| 141 | * Each page can map 2MB. | 142 | * Each page can map 2MB. |
| 142 | */ | 143 | */ |
| 143 | static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; | 144 | #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) |
| 145 | static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); | ||
| 144 | 146 | ||
| 145 | #ifdef CONFIG_X86_64 | 147 | #ifdef CONFIG_X86_64 |
| 146 | /* l3 pud for userspace vsyscall mapping */ | 148 | /* l3 pud for userspace vsyscall mapping */ |
| @@ -171,49 +173,182 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ | |||
| 171 | */ | 173 | */ |
| 172 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) | 174 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) |
| 173 | 175 | ||
| 176 | /* | ||
| 177 | * Xen leaves the responsibility for maintaining p2m mappings to the | ||
| 178 | * guests themselves, but it must also access and update the p2m array | ||
| 179 | * during suspend/resume when all the pages are reallocated. | ||
| 180 | * | ||
| 181 | * The p2m table is logically a flat array, but we implement it as a | ||
| 182 | * three-level tree to allow the address space to be sparse. | ||
| 183 | * | ||
| 184 | * Xen | ||
| 185 | * | | ||
| 186 | * p2m_top p2m_top_mfn | ||
| 187 | * / \ / \ | ||
| 188 | * p2m_mid p2m_mid p2m_mid_mfn p2m_mid_mfn | ||
| 189 | * / \ / \ / / | ||
| 190 | * p2m p2m p2m p2m p2m p2m p2m ... | ||
| 191 | * | ||
| 192 | * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. | ||
| 193 | * | ||
| 194 | * The p2m_top and p2m_top_mfn levels are limited to 1 page, so the | ||
| 195 | * maximum representable pseudo-physical address space is: | ||
| 196 | * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages | ||
| 197 | * | ||
| 198 | * P2M_PER_PAGE depends on the architecture, as a mfn is always | ||
| 199 | * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to | ||
| 200 | * 512 and 1024 entries respectively. | ||
| 201 | */ | ||
| 202 | |||
| 203 | unsigned long xen_max_p2m_pfn __read_mostly; | ||
| 174 | 204 | ||
| 175 | #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) | 205 | #define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) |
| 176 | #define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE) | 206 | #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) |
| 207 | #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) | ||
| 177 | 208 | ||
| 178 | /* Placeholder for holes in the address space */ | 209 | #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) |
| 179 | static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data = | ||
| 180 | { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL }; | ||
| 181 | 210 | ||
| 182 | /* Array of pointers to pages containing p2m entries */ | 211 | /* Placeholders for holes in the address space */ |
| 183 | static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data = | 212 | static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); |
| 184 | { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; | 213 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); |
| 214 | static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE); | ||
| 185 | 215 | ||
| 186 | /* Arrays of p2m arrays expressed in mfns used for save/restore */ | 216 | static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); |
| 187 | static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss; | 217 | static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); |
| 218 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); | ||
| 188 | 219 | ||
| 189 | static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE] | 220 | RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
| 190 | __page_aligned_bss; | 221 | RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
| 191 | 222 | ||
| 192 | static inline unsigned p2m_top_index(unsigned long pfn) | 223 | static inline unsigned p2m_top_index(unsigned long pfn) |
| 193 | { | 224 | { |
| 194 | BUG_ON(pfn >= MAX_DOMAIN_PAGES); | 225 | BUG_ON(pfn >= MAX_P2M_PFN); |
| 195 | return pfn / P2M_ENTRIES_PER_PAGE; | 226 | return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); |
| 227 | } | ||
| 228 | |||
| 229 | static inline unsigned p2m_mid_index(unsigned long pfn) | ||
| 230 | { | ||
| 231 | return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; | ||
| 196 | } | 232 | } |
| 197 | 233 | ||
| 198 | static inline unsigned p2m_index(unsigned long pfn) | 234 | static inline unsigned p2m_index(unsigned long pfn) |
| 199 | { | 235 | { |
| 200 | return pfn % P2M_ENTRIES_PER_PAGE; | 236 | return pfn % P2M_PER_PAGE; |
| 237 | } | ||
| 238 | |||
| 239 | static void p2m_top_init(unsigned long ***top) | ||
| 240 | { | ||
| 241 | unsigned i; | ||
| 242 | |||
| 243 | for (i = 0; i < P2M_TOP_PER_PAGE; i++) | ||
| 244 | top[i] = p2m_mid_missing; | ||
| 245 | } | ||
| 246 | |||
| 247 | static void p2m_top_mfn_init(unsigned long *top) | ||
| 248 | { | ||
| 249 | unsigned i; | ||
| 250 | |||
| 251 | for (i = 0; i < P2M_TOP_PER_PAGE; i++) | ||
| 252 | top[i] = virt_to_mfn(p2m_mid_missing_mfn); | ||
| 253 | } | ||
| 254 | |||
| 255 | static void p2m_top_mfn_p_init(unsigned long **top) | ||
| 256 | { | ||
| 257 | unsigned i; | ||
| 258 | |||
| 259 | for (i = 0; i < P2M_TOP_PER_PAGE; i++) | ||
| 260 | top[i] = p2m_mid_missing_mfn; | ||
| 261 | } | ||
| 262 | |||
| 263 | static void p2m_mid_init(unsigned long **mid) | ||
| 264 | { | ||
| 265 | unsigned i; | ||
| 266 | |||
| 267 | for (i = 0; i < P2M_MID_PER_PAGE; i++) | ||
| 268 | mid[i] = p2m_missing; | ||
| 269 | } | ||
| 270 | |||
| 271 | static void p2m_mid_mfn_init(unsigned long *mid) | ||
| 272 | { | ||
| 273 | unsigned i; | ||
| 274 | |||
| 275 | for (i = 0; i < P2M_MID_PER_PAGE; i++) | ||
| 276 | mid[i] = virt_to_mfn(p2m_missing); | ||
| 201 | } | 277 | } |
| 202 | 278 | ||
| 203 | /* Build the parallel p2m_top_mfn structures */ | 279 | static void p2m_init(unsigned long *p2m) |
| 280 | { | ||
| 281 | unsigned i; | ||
| 282 | |||
| 283 | for (i = 0; i < P2M_MID_PER_PAGE; i++) | ||
| 284 | p2m[i] = INVALID_P2M_ENTRY; | ||
| 285 | } | ||
| 286 | |||
| 287 | /* | ||
| 288 | * Build the parallel p2m_top_mfn and p2m_mid_mfn structures | ||
| 289 | * | ||
| 290 | * This is called both at boot time, and after resuming from suspend: | ||
| 291 | * - At boot time we're called very early, and must use extend_brk() | ||
| 292 | * to allocate memory. | ||
| 293 | * | ||
| 294 | * - After resume we're called from within stop_machine, but the mfn | ||
| 295 | * tree should alreay be completely allocated. | ||
| 296 | */ | ||
| 204 | void xen_build_mfn_list_list(void) | 297 | void xen_build_mfn_list_list(void) |
| 205 | { | 298 | { |
| 206 | unsigned pfn, idx; | 299 | unsigned long pfn; |
| 207 | 300 | ||
| 208 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { | 301 | /* Pre-initialize p2m_top_mfn to be completely missing */ |
| 209 | unsigned topidx = p2m_top_index(pfn); | 302 | if (p2m_top_mfn == NULL) { |
| 303 | p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 304 | p2m_mid_mfn_init(p2m_mid_missing_mfn); | ||
| 305 | |||
| 306 | p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 307 | p2m_top_mfn_p_init(p2m_top_mfn_p); | ||
| 210 | 308 | ||
| 211 | p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); | 309 | p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); |
| 310 | p2m_top_mfn_init(p2m_top_mfn); | ||
| 311 | } else { | ||
| 312 | /* Reinitialise, mfn's all change after migration */ | ||
| 313 | p2m_mid_mfn_init(p2m_mid_missing_mfn); | ||
| 212 | } | 314 | } |
| 213 | 315 | ||
| 214 | for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { | 316 | for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { |
| 215 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; | 317 | unsigned topidx = p2m_top_index(pfn); |
| 216 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); | 318 | unsigned mididx = p2m_mid_index(pfn); |
| 319 | unsigned long **mid; | ||
| 320 | unsigned long *mid_mfn_p; | ||
| 321 | |||
| 322 | mid = p2m_top[topidx]; | ||
| 323 | mid_mfn_p = p2m_top_mfn_p[topidx]; | ||
| 324 | |||
| 325 | /* Don't bother allocating any mfn mid levels if | ||
| 326 | * they're just missing, just update the stored mfn, | ||
| 327 | * since all could have changed over a migrate. | ||
| 328 | */ | ||
| 329 | if (mid == p2m_mid_missing) { | ||
| 330 | BUG_ON(mididx); | ||
| 331 | BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); | ||
| 332 | p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); | ||
| 333 | pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; | ||
| 334 | continue; | ||
| 335 | } | ||
| 336 | |||
| 337 | if (mid_mfn_p == p2m_mid_missing_mfn) { | ||
| 338 | /* | ||
| 339 | * XXX boot-time only! We should never find | ||
| 340 | * missing parts of the mfn tree after | ||
| 341 | * runtime. extend_brk() will BUG if we call | ||
| 342 | * it too late. | ||
| 343 | */ | ||
| 344 | mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 345 | p2m_mid_mfn_init(mid_mfn_p); | ||
| 346 | |||
| 347 | p2m_top_mfn_p[topidx] = mid_mfn_p; | ||
| 348 | } | ||
| 349 | |||
| 350 | p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); | ||
| 351 | mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); | ||
| 217 | } | 352 | } |
| 218 | } | 353 | } |
| 219 | 354 | ||
| @@ -222,8 +357,8 @@ void xen_setup_mfn_list_list(void) | |||
| 222 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); | 357 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); |
| 223 | 358 | ||
| 224 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = | 359 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = |
| 225 | virt_to_mfn(p2m_top_mfn_list); | 360 | virt_to_mfn(p2m_top_mfn); |
| 226 | HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages; | 361 | HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; |
| 227 | } | 362 | } |
| 228 | 363 | ||
| 229 | /* Set up p2m_top to point to the domain-builder provided p2m pages */ | 364 | /* Set up p2m_top to point to the domain-builder provided p2m pages */ |
| @@ -231,98 +366,176 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
| 231 | { | 366 | { |
| 232 | unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; | 367 | unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; |
| 233 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); | 368 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); |
| 234 | unsigned pfn; | 369 | unsigned long pfn; |
| 370 | |||
| 371 | xen_max_p2m_pfn = max_pfn; | ||
| 235 | 372 | ||
| 236 | for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { | 373 | p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); |
| 374 | p2m_init(p2m_missing); | ||
| 375 | |||
| 376 | p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 377 | p2m_mid_init(p2m_mid_missing); | ||
| 378 | |||
| 379 | p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
| 380 | p2m_top_init(p2m_top); | ||
| 381 | |||
| 382 | /* | ||
| 383 | * The domain builder gives us a pre-constructed p2m array in | ||
| 384 | * mfn_list for all the pages initially given to us, so we just | ||
| 385 | * need to graft that into our tree structure. | ||
| 386 | */ | ||
| 387 | for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { | ||
| 237 | unsigned topidx = p2m_top_index(pfn); | 388 | unsigned topidx = p2m_top_index(pfn); |
| 389 | unsigned mididx = p2m_mid_index(pfn); | ||
| 238 | 390 | ||
| 239 | p2m_top[topidx] = &mfn_list[pfn]; | 391 | if (p2m_top[topidx] == p2m_mid_missing) { |
| 240 | } | 392 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); |
| 393 | p2m_mid_init(mid); | ||
| 394 | |||
| 395 | p2m_top[topidx] = mid; | ||
| 396 | } | ||
| 241 | 397 | ||
| 242 | xen_build_mfn_list_list(); | 398 | p2m_top[topidx][mididx] = &mfn_list[pfn]; |
| 399 | } | ||
| 243 | } | 400 | } |
| 244 | 401 | ||
| 245 | unsigned long get_phys_to_machine(unsigned long pfn) | 402 | unsigned long get_phys_to_machine(unsigned long pfn) |
| 246 | { | 403 | { |
| 247 | unsigned topidx, idx; | 404 | unsigned topidx, mididx, idx; |
| 248 | 405 | ||
| 249 | if (unlikely(pfn >= MAX_DOMAIN_PAGES)) | 406 | if (unlikely(pfn >= MAX_P2M_PFN)) |
| 250 | return INVALID_P2M_ENTRY; | 407 | return INVALID_P2M_ENTRY; |
| 251 | 408 | ||
| 252 | topidx = p2m_top_index(pfn); | 409 | topidx = p2m_top_index(pfn); |
| 410 | mididx = p2m_mid_index(pfn); | ||
| 253 | idx = p2m_index(pfn); | 411 | idx = p2m_index(pfn); |
| 254 | return p2m_top[topidx][idx]; | 412 | |
| 413 | return p2m_top[topidx][mididx][idx]; | ||
| 255 | } | 414 | } |
| 256 | EXPORT_SYMBOL_GPL(get_phys_to_machine); | 415 | EXPORT_SYMBOL_GPL(get_phys_to_machine); |
| 257 | 416 | ||
| 258 | /* install a new p2m_top page */ | 417 | static void *alloc_p2m_page(void) |
| 259 | bool install_p2mtop_page(unsigned long pfn, unsigned long *p) | ||
| 260 | { | 418 | { |
| 261 | unsigned topidx = p2m_top_index(pfn); | 419 | return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); |
| 262 | unsigned long **pfnp, *mfnp; | 420 | } |
| 263 | unsigned i; | ||
| 264 | 421 | ||
| 265 | pfnp = &p2m_top[topidx]; | 422 | static void free_p2m_page(void *p) |
| 266 | mfnp = &p2m_top_mfn[topidx]; | 423 | { |
| 424 | free_page((unsigned long)p); | ||
| 425 | } | ||
| 267 | 426 | ||
| 268 | for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) | 427 | /* |
| 269 | p[i] = INVALID_P2M_ENTRY; | 428 | * Fully allocate the p2m structure for a given pfn. We need to check |
| 429 | * that both the top and mid levels are allocated, and make sure the | ||
| 430 | * parallel mfn tree is kept in sync. We may race with other cpus, so | ||
| 431 | * the new pages are installed with cmpxchg; if we lose the race then | ||
| 432 | * simply free the page we allocated and use the one that's there. | ||
| 433 | */ | ||
| 434 | static bool alloc_p2m(unsigned long pfn) | ||
| 435 | { | ||
| 436 | unsigned topidx, mididx; | ||
| 437 | unsigned long ***top_p, **mid; | ||
| 438 | unsigned long *top_mfn_p, *mid_mfn; | ||
| 270 | 439 | ||
| 271 | if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { | 440 | topidx = p2m_top_index(pfn); |
| 272 | *mfnp = virt_to_mfn(p); | 441 | mididx = p2m_mid_index(pfn); |
| 273 | return true; | 442 | |
| 443 | top_p = &p2m_top[topidx]; | ||
| 444 | mid = *top_p; | ||
| 445 | |||
| 446 | if (mid == p2m_mid_missing) { | ||
| 447 | /* Mid level is missing, allocate a new one */ | ||
| 448 | mid = alloc_p2m_page(); | ||
| 449 | if (!mid) | ||
| 450 | return false; | ||
| 451 | |||
| 452 | p2m_mid_init(mid); | ||
| 453 | |||
| 454 | if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) | ||
| 455 | free_p2m_page(mid); | ||
| 274 | } | 456 | } |
| 275 | 457 | ||
| 276 | return false; | 458 | top_mfn_p = &p2m_top_mfn[topidx]; |
| 277 | } | 459 | mid_mfn = p2m_top_mfn_p[topidx]; |
| 278 | 460 | ||
| 279 | static void alloc_p2m(unsigned long pfn) | 461 | BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); |
| 280 | { | 462 | |
| 281 | unsigned long *p; | 463 | if (mid_mfn == p2m_mid_missing_mfn) { |
| 464 | /* Separately check the mid mfn level */ | ||
| 465 | unsigned long missing_mfn; | ||
| 466 | unsigned long mid_mfn_mfn; | ||
| 467 | |||
| 468 | mid_mfn = alloc_p2m_page(); | ||
| 469 | if (!mid_mfn) | ||
| 470 | return false; | ||
| 471 | |||
| 472 | p2m_mid_mfn_init(mid_mfn); | ||
| 473 | |||
| 474 | missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); | ||
| 475 | mid_mfn_mfn = virt_to_mfn(mid_mfn); | ||
| 476 | if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) | ||
| 477 | free_p2m_page(mid_mfn); | ||
| 478 | else | ||
| 479 | p2m_top_mfn_p[topidx] = mid_mfn; | ||
| 480 | } | ||
| 481 | |||
| 482 | if (p2m_top[topidx][mididx] == p2m_missing) { | ||
| 483 | /* p2m leaf page is missing */ | ||
| 484 | unsigned long *p2m; | ||
| 485 | |||
| 486 | p2m = alloc_p2m_page(); | ||
| 487 | if (!p2m) | ||
| 488 | return false; | ||
| 282 | 489 | ||
| 283 | p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); | 490 | p2m_init(p2m); |
| 284 | BUG_ON(p == NULL); | 491 | |
| 492 | if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) | ||
| 493 | free_p2m_page(p2m); | ||
| 494 | else | ||
| 495 | mid_mfn[mididx] = virt_to_mfn(p2m); | ||
| 496 | } | ||
| 285 | 497 | ||
| 286 | if (!install_p2mtop_page(pfn, p)) | 498 | return true; |
| 287 | free_page((unsigned long)p); | ||
| 288 | } | 499 | } |
| 289 | 500 | ||
| 290 | /* Try to install p2m mapping; fail if intermediate bits missing */ | 501 | /* Try to install p2m mapping; fail if intermediate bits missing */ |
| 291 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 502 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
| 292 | { | 503 | { |
| 293 | unsigned topidx, idx; | 504 | unsigned topidx, mididx, idx; |
| 294 | 505 | ||
| 295 | if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { | 506 | if (unlikely(pfn >= MAX_P2M_PFN)) { |
| 296 | BUG_ON(mfn != INVALID_P2M_ENTRY); | 507 | BUG_ON(mfn != INVALID_P2M_ENTRY); |
| 297 | return true; | 508 | return true; |
| 298 | } | 509 | } |
| 299 | 510 | ||
| 300 | topidx = p2m_top_index(pfn); | 511 | topidx = p2m_top_index(pfn); |
| 301 | if (p2m_top[topidx] == p2m_missing) { | 512 | mididx = p2m_mid_index(pfn); |
| 302 | if (mfn == INVALID_P2M_ENTRY) | ||
| 303 | return true; | ||
| 304 | return false; | ||
| 305 | } | ||
| 306 | |||
| 307 | idx = p2m_index(pfn); | 513 | idx = p2m_index(pfn); |
| 308 | p2m_top[topidx][idx] = mfn; | 514 | |
| 515 | if (p2m_top[topidx][mididx] == p2m_missing) | ||
| 516 | return mfn == INVALID_P2M_ENTRY; | ||
| 517 | |||
| 518 | p2m_top[topidx][mididx][idx] = mfn; | ||
| 309 | 519 | ||
| 310 | return true; | 520 | return true; |
| 311 | } | 521 | } |
| 312 | 522 | ||
| 313 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 523 | bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
| 314 | { | 524 | { |
| 315 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | 525 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { |
| 316 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | 526 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); |
| 317 | return; | 527 | return true; |
| 318 | } | 528 | } |
| 319 | 529 | ||
| 320 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | 530 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { |
| 321 | alloc_p2m(pfn); | 531 | if (!alloc_p2m(pfn)) |
| 532 | return false; | ||
| 322 | 533 | ||
| 323 | if (!__set_phys_to_machine(pfn, mfn)) | 534 | if (!__set_phys_to_machine(pfn, mfn)) |
| 324 | BUG(); | 535 | return false; |
| 325 | } | 536 | } |
| 537 | |||
| 538 | return true; | ||
| 326 | } | 539 | } |
| 327 | 540 | ||
| 328 | unsigned long arbitrary_virt_to_mfn(void *vaddr) | 541 | unsigned long arbitrary_virt_to_mfn(void *vaddr) |
| @@ -399,7 +612,7 @@ static bool xen_iomap_pte(pte_t pte) | |||
| 399 | return pte_flags(pte) & _PAGE_IOMAP; | 612 | return pte_flags(pte) & _PAGE_IOMAP; |
| 400 | } | 613 | } |
| 401 | 614 | ||
| 402 | static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) | 615 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
| 403 | { | 616 | { |
| 404 | struct multicall_space mcs; | 617 | struct multicall_space mcs; |
| 405 | struct mmu_update *u; | 618 | struct mmu_update *u; |
| @@ -411,10 +624,16 @@ static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) | |||
| 411 | u->ptr = arbitrary_virt_to_machine(ptep).maddr; | 624 | u->ptr = arbitrary_virt_to_machine(ptep).maddr; |
| 412 | u->val = pte_val_ma(pteval); | 625 | u->val = pte_val_ma(pteval); |
| 413 | 626 | ||
| 414 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO); | 627 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
| 415 | 628 | ||
| 416 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 629 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 417 | } | 630 | } |
| 631 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); | ||
| 632 | |||
| 633 | static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) | ||
| 634 | { | ||
| 635 | xen_set_domain_pte(ptep, pteval, DOMID_IO); | ||
| 636 | } | ||
| 418 | 637 | ||
| 419 | static void xen_extend_mmu_update(const struct mmu_update *update) | 638 | static void xen_extend_mmu_update(const struct mmu_update *update) |
| 420 | { | 639 | { |
| @@ -561,7 +780,20 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
| 561 | if (val & _PAGE_PRESENT) { | 780 | if (val & _PAGE_PRESENT) { |
| 562 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 781 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 563 | pteval_t flags = val & PTE_FLAGS_MASK; | 782 | pteval_t flags = val & PTE_FLAGS_MASK; |
| 564 | val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags; | 783 | unsigned long mfn = pfn_to_mfn(pfn); |
| 784 | |||
| 785 | /* | ||
| 786 | * If there's no mfn for the pfn, then just create an | ||
| 787 | * empty non-present pte. Unfortunately this loses | ||
| 788 | * information about the original pfn, so | ||
| 789 | * pte_mfn_to_pfn is asymmetric. | ||
| 790 | */ | ||
| 791 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | ||
| 792 | mfn = 0; | ||
| 793 | flags = 0; | ||
| 794 | } | ||
| 795 | |||
| 796 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; | ||
| 565 | } | 797 | } |
| 566 | 798 | ||
| 567 | return val; | 799 | return val; |
| @@ -583,10 +815,18 @@ static pteval_t iomap_pte(pteval_t val) | |||
| 583 | 815 | ||
| 584 | pteval_t xen_pte_val(pte_t pte) | 816 | pteval_t xen_pte_val(pte_t pte) |
| 585 | { | 817 | { |
| 586 | if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP)) | 818 | pteval_t pteval = pte.pte; |
| 587 | return pte.pte; | 819 | |
| 820 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ | ||
| 821 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | ||
| 822 | WARN_ON(!pat_enabled); | ||
| 823 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | ||
| 824 | } | ||
| 588 | 825 | ||
| 589 | return pte_mfn_to_pfn(pte.pte); | 826 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
| 827 | return pteval; | ||
| 828 | |||
| 829 | return pte_mfn_to_pfn(pteval); | ||
| 590 | } | 830 | } |
| 591 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 831 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
| 592 | 832 | ||
| @@ -596,10 +836,48 @@ pgdval_t xen_pgd_val(pgd_t pgd) | |||
| 596 | } | 836 | } |
| 597 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); | 837 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); |
| 598 | 838 | ||
| 839 | /* | ||
| 840 | * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 | ||
| 841 | * are reserved for now, to correspond to the Intel-reserved PAT | ||
| 842 | * types. | ||
| 843 | * | ||
| 844 | * We expect Linux's PAT set as follows: | ||
| 845 | * | ||
| 846 | * Idx PTE flags Linux Xen Default | ||
| 847 | * 0 WB WB WB | ||
| 848 | * 1 PWT WC WT WT | ||
| 849 | * 2 PCD UC- UC- UC- | ||
| 850 | * 3 PCD PWT UC UC UC | ||
| 851 | * 4 PAT WB WC WB | ||
| 852 | * 5 PAT PWT WC WP WT | ||
| 853 | * 6 PAT PCD UC- UC UC- | ||
| 854 | * 7 PAT PCD PWT UC UC UC | ||
| 855 | */ | ||
| 856 | |||
| 857 | void xen_set_pat(u64 pat) | ||
| 858 | { | ||
| 859 | /* We expect Linux to use a PAT setting of | ||
| 860 | * UC UC- WC WB (ignoring the PAT flag) */ | ||
| 861 | WARN_ON(pat != 0x0007010600070106ull); | ||
| 862 | } | ||
| 863 | |||
| 599 | pte_t xen_make_pte(pteval_t pte) | 864 | pte_t xen_make_pte(pteval_t pte) |
| 600 | { | 865 | { |
| 601 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 866 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
| 602 | 867 | ||
| 868 | /* If Linux is trying to set a WC pte, then map to the Xen WC. | ||
| 869 | * If _PAGE_PAT is set, then it probably means it is really | ||
| 870 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | ||
| 871 | * things work out OK... | ||
| 872 | * | ||
| 873 | * (We should never see kernel mappings with _PAGE_PSE set, | ||
| 874 | * but we could see hugetlbfs mappings, I think.). | ||
| 875 | */ | ||
| 876 | if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { | ||
| 877 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | ||
| 878 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | ||
| 879 | } | ||
| 880 | |||
| 603 | /* | 881 | /* |
| 604 | * Unprivileged domains are allowed to do IOMAPpings for | 882 | * Unprivileged domains are allowed to do IOMAPpings for |
| 605 | * PCI passthrough, but not map ISA space. The ISA | 883 | * PCI passthrough, but not map ISA space. The ISA |
| @@ -1712,6 +1990,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
| 1712 | unsigned ident_pte; | 1990 | unsigned ident_pte; |
| 1713 | unsigned long pfn; | 1991 | unsigned long pfn; |
| 1714 | 1992 | ||
| 1993 | level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, | ||
| 1994 | PAGE_SIZE); | ||
| 1995 | |||
| 1715 | ident_pte = 0; | 1996 | ident_pte = 0; |
| 1716 | pfn = 0; | 1997 | pfn = 0; |
| 1717 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | 1998 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { |
| @@ -1722,7 +2003,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
| 1722 | pte_page = m2v(pmd[pmdidx].pmd); | 2003 | pte_page = m2v(pmd[pmdidx].pmd); |
| 1723 | else { | 2004 | else { |
| 1724 | /* Check for free pte pages */ | 2005 | /* Check for free pte pages */ |
| 1725 | if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) | 2006 | if (ident_pte == LEVEL1_IDENT_ENTRIES) |
| 1726 | break; | 2007 | break; |
| 1727 | 2008 | ||
| 1728 | pte_page = &level1_ident_pgt[ident_pte]; | 2009 | pte_page = &level1_ident_pgt[ident_pte]; |
| @@ -1837,13 +2118,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
| 1837 | return pgd; | 2118 | return pgd; |
| 1838 | } | 2119 | } |
| 1839 | #else /* !CONFIG_X86_64 */ | 2120 | #else /* !CONFIG_X86_64 */ |
| 1840 | static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; | 2121 | static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); |
| 1841 | 2122 | ||
| 1842 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 2123 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
| 1843 | unsigned long max_pfn) | 2124 | unsigned long max_pfn) |
| 1844 | { | 2125 | { |
| 1845 | pmd_t *kernel_pmd; | 2126 | pmd_t *kernel_pmd; |
| 1846 | 2127 | ||
| 2128 | level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); | ||
| 2129 | |||
| 1847 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 2130 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
| 1848 | xen_start_info->nr_pt_frames * PAGE_SIZE + | 2131 | xen_start_info->nr_pt_frames * PAGE_SIZE + |
| 1849 | 512*1024); | 2132 | 512*1024); |
| @@ -2269,6 +2552,72 @@ void __init xen_hvm_init_mmu_ops(void) | |||
| 2269 | } | 2552 | } |
| 2270 | #endif | 2553 | #endif |
| 2271 | 2554 | ||
| 2555 | #define REMAP_BATCH_SIZE 16 | ||
| 2556 | |||
| 2557 | struct remap_data { | ||
| 2558 | unsigned long mfn; | ||
| 2559 | pgprot_t prot; | ||
| 2560 | struct mmu_update *mmu_update; | ||
| 2561 | }; | ||
| 2562 | |||
| 2563 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | ||
| 2564 | unsigned long addr, void *data) | ||
| 2565 | { | ||
| 2566 | struct remap_data *rmd = data; | ||
| 2567 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); | ||
| 2568 | |||
| 2569 | rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; | ||
| 2570 | rmd->mmu_update->val = pte_val_ma(pte); | ||
| 2571 | rmd->mmu_update++; | ||
| 2572 | |||
| 2573 | return 0; | ||
| 2574 | } | ||
| 2575 | |||
| 2576 | int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | ||
| 2577 | unsigned long addr, | ||
| 2578 | unsigned long mfn, int nr, | ||
| 2579 | pgprot_t prot, unsigned domid) | ||
| 2580 | { | ||
| 2581 | struct remap_data rmd; | ||
| 2582 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; | ||
| 2583 | int batch; | ||
| 2584 | unsigned long range; | ||
| 2585 | int err = 0; | ||
| 2586 | |||
| 2587 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | ||
| 2588 | |||
| 2589 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | ||
| 2590 | |||
| 2591 | rmd.mfn = mfn; | ||
| 2592 | rmd.prot = prot; | ||
| 2593 | |||
| 2594 | while (nr) { | ||
| 2595 | batch = min(REMAP_BATCH_SIZE, nr); | ||
| 2596 | range = (unsigned long)batch << PAGE_SHIFT; | ||
| 2597 | |||
| 2598 | rmd.mmu_update = mmu_update; | ||
| 2599 | err = apply_to_page_range(vma->vm_mm, addr, range, | ||
| 2600 | remap_area_mfn_pte_fn, &rmd); | ||
| 2601 | if (err) | ||
| 2602 | goto out; | ||
| 2603 | |||
| 2604 | err = -EFAULT; | ||
| 2605 | if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) | ||
| 2606 | goto out; | ||
| 2607 | |||
| 2608 | nr -= batch; | ||
| 2609 | addr += range; | ||
| 2610 | } | ||
| 2611 | |||
| 2612 | err = 0; | ||
| 2613 | out: | ||
| 2614 | |||
| 2615 | flush_tlb_all(); | ||
| 2616 | |||
| 2617 | return err; | ||
| 2618 | } | ||
| 2619 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | ||
| 2620 | |||
| 2272 | #ifdef CONFIG_XEN_DEBUG_FS | 2621 | #ifdef CONFIG_XEN_DEBUG_FS |
| 2273 | 2622 | ||
| 2274 | static struct dentry *d_mmu_debug; | 2623 | static struct dentry *d_mmu_debug; |
