aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/cacheflush.h54
-rw-r--r--arch/x86/mm/pat.c91
2 files changed, 102 insertions, 43 deletions
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e55dfc1ad453..b54f6afe7ec4 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
43 memcpy(dst, src, len); 43 memcpy(dst, src, len);
44} 44}
45 45
46#define PG_non_WB PG_arch_1 46#define PG_WC PG_arch_1
47PAGEFLAG(NonWB, non_WB) 47PAGEFLAG(WC, WC)
48
49#ifdef CONFIG_X86_PAT
50/*
51 * X86 PAT uses page flags WC and Uncached together to keep track of
52 * memory type of pages that have backing page struct. X86 PAT supports 3
53 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
54 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
55 * been changed from its default (value of -1 used to denote this).
56 * Note we do not support _PAGE_CACHE_UC here.
57 *
58 * Caller must hold memtype_lock for atomicity.
59 */
60static inline unsigned long get_page_memtype(struct page *pg)
61{
62 if (!PageUncached(pg) && !PageWC(pg))
63 return -1;
64 else if (!PageUncached(pg) && PageWC(pg))
65 return _PAGE_CACHE_WC;
66 else if (PageUncached(pg) && !PageWC(pg))
67 return _PAGE_CACHE_UC_MINUS;
68 else
69 return _PAGE_CACHE_WB;
70}
71
72static inline void set_page_memtype(struct page *pg, unsigned long memtype)
73{
74 switch (memtype) {
75 case _PAGE_CACHE_WC:
76 ClearPageUncached(pg);
77 SetPageWC(pg);
78 break;
79 case _PAGE_CACHE_UC_MINUS:
80 SetPageUncached(pg);
81 ClearPageWC(pg);
82 break;
83 case _PAGE_CACHE_WB:
84 SetPageUncached(pg);
85 SetPageWC(pg);
86 break;
87 default:
88 case -1:
89 ClearPageUncached(pg);
90 ClearPageWC(pg);
91 break;
92 }
93}
94#else
95static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
96static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
97#endif
48 98
49/* 99/*
50 * The set_memory_* API can be used to change various attributes of a virtual 100 * The set_memory_* API can be used to change various attributes of a virtual
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c90f2420f56c..1a9d0f07593f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -288,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
288} 288}
289 289
290/* 290/*
291 * For RAM pages, mark the pages as non WB memory type using 291 * For RAM pages, we use page flags to mark the pages with appropriate type.
292 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 292 * Here we do two pass:
293 * set_memory_wc() on a RAM page at a time before marking it as WB again. 293 * - Find the memtype of all the pages in the range, look for any conflicts
294 * This is ok, because only one driver will be owning the page and 294 * - In case of no conflicts, set the new memtype for pages in the range
295 * doing set_memory_*() calls.
296 * 295 *
297 * For now, we use PageNonWB to track that the RAM page is being mapped 296 * Caller must hold memtype_lock for atomicity.
298 * as non WB. In future, we will have to use one more flag
299 * (or some other mechanism in page_struct) to distinguish between
300 * UC and WC mapping.
301 */ 297 */
302static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 298static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
303 unsigned long *new_type) 299 unsigned long *new_type)
304{ 300{
305 struct page *page; 301 struct page *page;
306 u64 pfn, end_pfn; 302 u64 pfn;
303
304 if (req_type == _PAGE_CACHE_UC) {
305 /* We do not support strong UC */
306 WARN_ON_ONCE(1);
307 req_type = _PAGE_CACHE_UC_MINUS;
308 }
307 309
308 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 310 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
309 page = pfn_to_page(pfn); 311 unsigned long type;
310 if (page_mapped(page) || PageNonWB(page))
311 goto out;
312 312
313 SetPageNonWB(page); 313 page = pfn_to_page(pfn);
314 type = get_page_memtype(page);
315 if (type != -1) {
316 printk(KERN_INFO "reserve_ram_pages_type failed "
317 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
318 start, end, type, req_type);
319 if (new_type)
320 *new_type = type;
321
322 return -EBUSY;
323 }
314 } 324 }
315 return 0;
316 325
317out: 326 if (new_type)
318 end_pfn = pfn; 327 *new_type = req_type;
319 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { 328
329 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
320 page = pfn_to_page(pfn); 330 page = pfn_to_page(pfn);
321 ClearPageNonWB(page); 331 set_page_memtype(page, req_type);
322 } 332 }
323 333 return 0;
324 return -EINVAL;
325} 334}
326 335
327static int free_ram_pages_type(u64 start, u64 end) 336static int free_ram_pages_type(u64 start, u64 end)
328{ 337{
329 struct page *page; 338 struct page *page;
330 u64 pfn, end_pfn; 339 u64 pfn;
331 340
332 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 341 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
333 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
334 if (page_mapped(page) || !PageNonWB(page)) 343 set_page_memtype(page, -1);
335 goto out;
336
337 ClearPageNonWB(page);
338 } 344 }
339 return 0; 345 return 0;
340
341out:
342 end_pfn = pfn;
343 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
344 page = pfn_to_page(pfn);
345 SetPageNonWB(page);
346 }
347 return -EINVAL;
348} 346}
349 347
350/* 348/*
@@ -405,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
405 *new_type = actual_type; 403 *new_type = actual_type;
406 404
407 is_range_ram = pat_pagerange_is_ram(start, end); 405 is_range_ram = pat_pagerange_is_ram(start, end);
408 if (is_range_ram == 1) 406 if (is_range_ram == 1) {
409 return reserve_ram_pages_type(start, end, req_type, 407
410 new_type); 408 spin_lock(&memtype_lock);
411 else if (is_range_ram < 0) 409 err = reserve_ram_pages_type(start, end, req_type, new_type);
410 spin_unlock(&memtype_lock);
411
412 return err;
413 } else if (is_range_ram < 0) {
412 return -EINVAL; 414 return -EINVAL;
415 }
413 416
414 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 417 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
415 if (!new) 418 if (!new)
@@ -505,10 +508,16 @@ int free_memtype(u64 start, u64 end)
505 return 0; 508 return 0;
506 509
507 is_range_ram = pat_pagerange_is_ram(start, end); 510 is_range_ram = pat_pagerange_is_ram(start, end);
508 if (is_range_ram == 1) 511 if (is_range_ram == 1) {
509 return free_ram_pages_type(start, end); 512
510 else if (is_range_ram < 0) 513 spin_lock(&memtype_lock);
514 err = free_ram_pages_type(start, end);
515 spin_unlock(&memtype_lock);
516
517 return err;
518 } else if (is_range_ram < 0) {
511 return -EINVAL; 519 return -EINVAL;
520 }
512 521
513 spin_lock(&memtype_lock); 522 spin_lock(&memtype_lock);
514 523