aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/xen/page.h8
-rw-r--r--arch/x86/xen/p2m.c236
2 files changed, 240 insertions, 4 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8ea977277c55..65fa4f26aa34 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -29,8 +29,10 @@ typedef struct xpaddr {
29 29
30/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ 30/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
31#define INVALID_P2M_ENTRY (~0UL) 31#define INVALID_P2M_ENTRY (~0UL)
32#define FOREIGN_FRAME_BIT (1UL<<31) 32#define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1))
33#define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2))
33#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) 34#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
35#define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT)
34 36
35/* Maximum amount of memory we can handle in a domain in pages */ 37/* Maximum amount of memory we can handle in a domain in pages */
36#define MAX_DOMAIN_PAGES \ 38#define MAX_DOMAIN_PAGES \
@@ -42,6 +44,8 @@ extern unsigned int machine_to_phys_order;
42extern unsigned long get_phys_to_machine(unsigned long pfn); 44extern unsigned long get_phys_to_machine(unsigned long pfn);
43extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 45extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
44extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 46extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern unsigned long set_phys_range_identity(unsigned long pfn_s,
48 unsigned long pfn_e);
45 49
46extern int m2p_add_override(unsigned long mfn, struct page *page); 50extern int m2p_add_override(unsigned long mfn, struct page *page);
47extern int m2p_remove_override(struct page *page); 51extern int m2p_remove_override(struct page *page);
@@ -58,7 +62,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
58 mfn = get_phys_to_machine(pfn); 62 mfn = get_phys_to_machine(pfn);
59 63
60 if (mfn != INVALID_P2M_ENTRY) 64 if (mfn != INVALID_P2M_ENTRY)
61 mfn &= ~FOREIGN_FRAME_BIT; 65 mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT);
62 66
63 return mfn; 67 return mfn;
64} 68}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index df4e36775339..809fe3536301 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -23,6 +23,129 @@
23 * P2M_PER_PAGE depends on the architecture, as a mfn is always 23 * P2M_PER_PAGE depends on the architecture, as a mfn is always
24 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to 24 * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
25 * 512 and 1024 entries respectively. 25 * 512 and 1024 entries respectively.
26 *
27 * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
28 *
29 * However not all entries are filled with MFNs. Specifically for all other
30 * leaf entries, or for the top root, or middle one, for which there is a void
31 * entry, we assume it is "missing". So (for example)
32 * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
33 *
34 * We also have the possibility of setting 1-1 mappings on certain regions, so
35 * that:
36 * pfn_to_mfn(0xc0000)=0xc0000
37 *
38 * The benefit of this is, that we can assume for non-RAM regions (think
39 * PCI BARs, or ACPI spaces), we can create mappings easily b/c we
40 * get the PFN value to match the MFN.
41 *
42 * For this to work efficiently we have one new page p2m_identity and
43 * allocate (via reserved_brk) any other pages we need to cover the sides
44 * (1GB or 4MB boundary violations). All entries in p2m_identity are set to
45 * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs,
46 * no other fancy value).
47 *
48 * On lookup we spot that the entry points to p2m_identity and return the
49 * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
50 * If the entry points to an allocated page, we just proceed as before and
51 * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
52 * appropriate functions (pfn_to_mfn).
53 *
54 * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
55 * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
56 * non-identity pfn. To protect ourselves against we elect to set (and get) the
57 * IDENTITY_FRAME_BIT on all identity mapped PFNs.
58 *
59 * This simplistic diagram is used to explain the more subtle piece of code.
60 * There is also a digram of the P2M at the end that can help.
61 * Imagine your E820 looking as so:
62 *
63 * 1GB 2GB
64 * /-------------------+---------\/----\ /----------\ /---+-----\
65 * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
66 * \-------------------+---------/\----/ \----------/ \---+-----/
67 * ^- 1029MB ^- 2001MB
68 *
69 * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100),
70 * 2048MB = 524288 (0x80000)]
71 *
72 * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB
73 * is actually not present (would have to kick the balloon driver to put it in).
74 *
75 * When we are told to set the PFNs for identity mapping (see patch: "xen/setup:
76 * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start
77 * of the PFN and the end PFN (263424 and 512256 respectively). The first step
78 * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
79 * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
80 * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn
81 * to end pfn. We reserve_brk top leaf pages if they are missing (means they
82 * point to p2m_mid_missing).
83 *
84 * With the E820 example above, 263424 is not 1GB aligned so we allocate a
85 * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
86 * Each entry in the allocate page is "missing" (points to p2m_missing).
87 *
88 * Next stage is to determine if we need to do a more granular boundary check
89 * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
90 * We check if the start pfn and end pfn violate that boundary check, and if
91 * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer
92 * granularity of setting which PFNs are missing and which ones are identity.
93 * In our example 263424 and 512256 both fail the check so we reserve_brk two
94 * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
95 * values) and assign them to p2m[1][2] and p2m[1][488] respectively.
96 *
97 * At this point we would at minimum reserve_brk one page, but could be up to
98 * three. Each call to set_phys_range_identity has at maximum a three page
99 * cost. If we were to query the P2M at this stage, all those entries from
100 * start PFN through end PFN (so 1029MB -> 2001MB) would return
101 * INVALID_P2M_ENTRY ("missing").
102 *
103 * The next step is to walk from the start pfn to the end pfn setting
104 * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
105 * If we find that the middle leaf is pointing to p2m_missing we can swap it
106 * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this
107 * point we do not need to worry about boundary aligment (so no need to
108 * reserve_brk a middle page, figure out which PFNs are "missing" and which
109 * ones are identity), as that has been done earlier. If we find that the
110 * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
111 * that page (which covers 512 PFNs) and set the appropriate PFN with
112 * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we
113 * set from p2m[1][2][256->511] and p2m[1][488][0->256] with
114 * IDENTITY_FRAME_BIT set.
115 *
116 * All other regions that are void (or not filled) either point to p2m_missing
117 * (considered missing) or have the default value of INVALID_P2M_ENTRY (also
118 * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
119 * contain the INVALID_P2M_ENTRY value and are considered "missing."
120 *
121 * This is what the p2m ends up looking (for the E820 above) with this
122 * fabulous drawing:
123 *
124 * p2m /--------------\
125 * /-----\ | &mfn_list[0],| /-----------------\
126 * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. |
127 * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] |
128 * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] |
129 * |-----| \ | [p2m_identity]+\\ | .... |
130 * | 2 |--\ \-------------------->| ... | \\ \----------------/
131 * |-----| \ \---------------/ \\
132 * | 3 |\ \ \\ p2m_identity
133 * |-----| \ \-------------------->/---------------\ /-----------------\
134 * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... |
135 * \-----/ / | [p2m_identity]+-->| ..., ~0 |
136 * / /---------------\ | .... | \-----------------/
137 * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. |
138 * / | IDENTITY[@256]|<----/ \---------------/
139 * / | ~0, ~0, .... |
140 * | \---------------/
141 * |
142 * p2m_missing p2m_missing
143 * /------------------\ /------------\
144 * | [p2m_mid_missing]+---->| ~0, ~0, ~0 |
145 * | [p2m_mid_missing]+---->| ..., ~0 |
146 * \------------------/ \------------/
147 *
148 * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
26 */ 149 */
27 150
28#include <linux/init.h> 151#include <linux/init.h>
@@ -59,9 +182,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
59static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); 182static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
60static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); 183static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
61 184
185static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
186
62RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 187RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
63RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 188RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
64 189
190/* We might hit two boundary violations at the start and end, at max each
191 * boundary violation will require three middle nodes. */
192RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3);
193
65static inline unsigned p2m_top_index(unsigned long pfn) 194static inline unsigned p2m_top_index(unsigned long pfn)
66{ 195{
67 BUG_ON(pfn >= MAX_P2M_PFN); 196 BUG_ON(pfn >= MAX_P2M_PFN);
@@ -221,6 +350,9 @@ void __init xen_build_dynamic_phys_to_machine(void)
221 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); 350 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
222 p2m_top_init(p2m_top); 351 p2m_top_init(p2m_top);
223 352
353 p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
354 p2m_init(p2m_identity);
355
224 /* 356 /*
225 * The domain builder gives us a pre-constructed p2m array in 357 * The domain builder gives us a pre-constructed p2m array in
226 * mfn_list for all the pages initially given to us, so we just 358 * mfn_list for all the pages initially given to us, so we just
@@ -272,6 +404,14 @@ unsigned long get_phys_to_machine(unsigned long pfn)
272 mididx = p2m_mid_index(pfn); 404 mididx = p2m_mid_index(pfn);
273 idx = p2m_index(pfn); 405 idx = p2m_index(pfn);
274 406
407 /*
408 * The INVALID_P2M_ENTRY is filled in both p2m_*identity
409 * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
410 * would be wrong.
411 */
412 if (p2m_top[topidx][mididx] == p2m_identity)
413 return IDENTITY_FRAME(pfn);
414
275 return p2m_top[topidx][mididx][idx]; 415 return p2m_top[topidx][mididx][idx];
276} 416}
277EXPORT_SYMBOL_GPL(get_phys_to_machine); 417EXPORT_SYMBOL_GPL(get_phys_to_machine);
@@ -341,9 +481,11 @@ static bool alloc_p2m(unsigned long pfn)
341 p2m_top_mfn_p[topidx] = mid_mfn; 481 p2m_top_mfn_p[topidx] = mid_mfn;
342 } 482 }
343 483
344 if (p2m_top[topidx][mididx] == p2m_missing) { 484 if (p2m_top[topidx][mididx] == p2m_identity ||
485 p2m_top[topidx][mididx] == p2m_missing) {
345 /* p2m leaf page is missing */ 486 /* p2m leaf page is missing */
346 unsigned long *p2m; 487 unsigned long *p2m;
488 unsigned long *p2m_orig = p2m_top[topidx][mididx];
347 489
348 p2m = alloc_p2m_page(); 490 p2m = alloc_p2m_page();
349 if (!p2m) 491 if (!p2m)
@@ -351,7 +493,7 @@ static bool alloc_p2m(unsigned long pfn)
351 493
352 p2m_init(p2m); 494 p2m_init(p2m);
353 495
354 if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) 496 if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig)
355 free_p2m_page(p2m); 497 free_p2m_page(p2m);
356 else 498 else
357 mid_mfn[mididx] = virt_to_mfn(p2m); 499 mid_mfn[mididx] = virt_to_mfn(p2m);
@@ -360,6 +502,82 @@ static bool alloc_p2m(unsigned long pfn)
360 return true; 502 return true;
361} 503}
362 504
505bool __early_alloc_p2m(unsigned long pfn)
506{
507 unsigned topidx, mididx, idx;
508
509 topidx = p2m_top_index(pfn);
510 mididx = p2m_mid_index(pfn);
511 idx = p2m_index(pfn);
512
513 /* Pfff.. No boundary cross-over, lets get out. */
514 if (!idx)
515 return false;
516
517 WARN(p2m_top[topidx][mididx] == p2m_identity,
518 "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
519 topidx, mididx);
520
521 /*
522 * Could be done by xen_build_dynamic_phys_to_machine..
523 */
524 if (p2m_top[topidx][mididx] != p2m_missing)
525 return false;
526
527 /* Boundary cross-over for the edges: */
528 if (idx) {
529 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
530
531 p2m_init(p2m);
532
533 p2m_top[topidx][mididx] = p2m;
534
535 }
536 return idx != 0;
537}
538unsigned long set_phys_range_identity(unsigned long pfn_s,
539 unsigned long pfn_e)
540{
541 unsigned long pfn;
542
543 if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN))
544 return 0;
545
546 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
547 return pfn_e - pfn_s;
548
549 if (pfn_s > pfn_e)
550 return 0;
551
552 for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1));
553 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
554 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
555 {
556 unsigned topidx = p2m_top_index(pfn);
557 if (p2m_top[topidx] == p2m_mid_missing) {
558 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
559
560 p2m_mid_init(mid);
561
562 p2m_top[topidx] = mid;
563 }
564 }
565
566 __early_alloc_p2m(pfn_s);
567 __early_alloc_p2m(pfn_e);
568
569 for (pfn = pfn_s; pfn < pfn_e; pfn++)
570 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
571 break;
572
573 if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
574 "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
575 (pfn_e - pfn_s) - (pfn - pfn_s)))
576 printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn);
577
578 return pfn - pfn_s;
579}
580
363/* Try to install p2m mapping; fail if intermediate bits missing */ 581/* Try to install p2m mapping; fail if intermediate bits missing */
364bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) 582bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
365{ 583{
@@ -378,6 +596,20 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
378 mididx = p2m_mid_index(pfn); 596 mididx = p2m_mid_index(pfn);
379 idx = p2m_index(pfn); 597 idx = p2m_index(pfn);
380 598
599 /* For sparse holes were the p2m leaf has real PFN along with
600 * PCI holes, stick in the PFN as the MFN value.
601 */
602 if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
603 if (p2m_top[topidx][mididx] == p2m_identity)
604 return true;
605
606 /* Swap over from MISSING to IDENTITY if needed. */
607 if (p2m_top[topidx][mididx] == p2m_missing) {
608 p2m_top[topidx][mididx] = p2m_identity;
609 return true;
610 }
611 }
612
381 if (p2m_top[topidx][mididx] == p2m_missing) 613 if (p2m_top[topidx][mididx] == p2m_missing)
382 return mfn == INVALID_P2M_ENTRY; 614 return mfn == INVALID_P2M_ENTRY;
383 615