aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c91
1 files changed, 50 insertions, 41 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c90f2420f56c..1a9d0f07593f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -288,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
288} 288}
289 289
290/* 290/*
291 * For RAM pages, mark the pages as non WB memory type using 291 * For RAM pages, we use page flags to mark the pages with appropriate type.
292 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 292 * Here we do two pass:
293 * set_memory_wc() on a RAM page at a time before marking it as WB again. 293 * - Find the memtype of all the pages in the range, look for any conflicts
294 * This is ok, because only one driver will be owning the page and 294 * - In case of no conflicts, set the new memtype for pages in the range
295 * doing set_memory_*() calls.
296 * 295 *
297 * For now, we use PageNonWB to track that the RAM page is being mapped 296 * Caller must hold memtype_lock for atomicity.
298 * as non WB. In future, we will have to use one more flag
299 * (or some other mechanism in page_struct) to distinguish between
300 * UC and WC mapping.
301 */ 297 */
302static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 298static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
303 unsigned long *new_type) 299 unsigned long *new_type)
304{ 300{
305 struct page *page; 301 struct page *page;
306 u64 pfn, end_pfn; 302 u64 pfn;
303
304 if (req_type == _PAGE_CACHE_UC) {
305 /* We do not support strong UC */
306 WARN_ON_ONCE(1);
307 req_type = _PAGE_CACHE_UC_MINUS;
308 }
307 309
308 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 310 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
309 page = pfn_to_page(pfn); 311 unsigned long type;
310 if (page_mapped(page) || PageNonWB(page))
311 goto out;
312 312
313 SetPageNonWB(page); 313 page = pfn_to_page(pfn);
314 type = get_page_memtype(page);
315 if (type != -1) {
316 printk(KERN_INFO "reserve_ram_pages_type failed "
317 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
318 start, end, type, req_type);
319 if (new_type)
320 *new_type = type;
321
322 return -EBUSY;
323 }
314 } 324 }
315 return 0;
316 325
317out: 326 if (new_type)
318 end_pfn = pfn; 327 *new_type = req_type;
319 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { 328
329 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
320 page = pfn_to_page(pfn); 330 page = pfn_to_page(pfn);
321 ClearPageNonWB(page); 331 set_page_memtype(page, req_type);
322 } 332 }
323 333 return 0;
324 return -EINVAL;
325} 334}
326 335
327static int free_ram_pages_type(u64 start, u64 end) 336static int free_ram_pages_type(u64 start, u64 end)
328{ 337{
329 struct page *page; 338 struct page *page;
330 u64 pfn, end_pfn; 339 u64 pfn;
331 340
332 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 341 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
333 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
334 if (page_mapped(page) || !PageNonWB(page)) 343 set_page_memtype(page, -1);
335 goto out;
336
337 ClearPageNonWB(page);
338 } 344 }
339 return 0; 345 return 0;
340
341out:
342 end_pfn = pfn;
343 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
344 page = pfn_to_page(pfn);
345 SetPageNonWB(page);
346 }
347 return -EINVAL;
348} 346}
349 347
350/* 348/*
@@ -405,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
405 *new_type = actual_type; 403 *new_type = actual_type;
406 404
407 is_range_ram = pat_pagerange_is_ram(start, end); 405 is_range_ram = pat_pagerange_is_ram(start, end);
408 if (is_range_ram == 1) 406 if (is_range_ram == 1) {
409 return reserve_ram_pages_type(start, end, req_type, 407
410 new_type); 408 spin_lock(&memtype_lock);
411 else if (is_range_ram < 0) 409 err = reserve_ram_pages_type(start, end, req_type, new_type);
410 spin_unlock(&memtype_lock);
411
412 return err;
413 } else if (is_range_ram < 0) {
412 return -EINVAL; 414 return -EINVAL;
415 }
413 416
414 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 417 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
415 if (!new) 418 if (!new)
@@ -505,10 +508,16 @@ int free_memtype(u64 start, u64 end)
505 return 0; 508 return 0;
506 509
507 is_range_ram = pat_pagerange_is_ram(start, end); 510 is_range_ram = pat_pagerange_is_ram(start, end);
508 if (is_range_ram == 1) 511 if (is_range_ram == 1) {
509 return free_ram_pages_type(start, end); 512
510 else if (is_range_ram < 0) 513 spin_lock(&memtype_lock);
514 err = free_ram_pages_type(start, end);
515 spin_unlock(&memtype_lock);
516
517 return err;
518 } else if (is_range_ram < 0) {
511 return -EINVAL; 519 return -EINVAL;
520 }
512 521
513 spin_lock(&memtype_lock); 522 spin_lock(&memtype_lock);
514 523