aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pat.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 951011166ef5..501fc60e5e4d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -190,8 +190,6 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
190 * Here we do two pass: 190 * Here we do two pass:
191 * - Find the memtype of all the pages in the range, look for any conflicts 191 * - Find the memtype of all the pages in the range, look for any conflicts
192 * - In case of no conflicts, set the new memtype for pages in the range 192 * - In case of no conflicts, set the new memtype for pages in the range
193 *
194 * Caller must hold memtype_lock for atomicity.
195 */ 193 */
196static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 194static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
197 unsigned long *new_type) 195 unsigned long *new_type)
@@ -297,9 +295,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
297 is_range_ram = pat_pagerange_is_ram(start, end); 295 is_range_ram = pat_pagerange_is_ram(start, end);
298 if (is_range_ram == 1) { 296 if (is_range_ram == 1) {
299 297
300 spin_lock(&memtype_lock);
301 err = reserve_ram_pages_type(start, end, req_type, new_type); 298 err = reserve_ram_pages_type(start, end, req_type, new_type);
302 spin_unlock(&memtype_lock);
303 299
304 return err; 300 return err;
305 } else if (is_range_ram < 0) { 301 } else if (is_range_ram < 0) {
@@ -351,9 +347,7 @@ int free_memtype(u64 start, u64 end)
351 is_range_ram = pat_pagerange_is_ram(start, end); 347 is_range_ram = pat_pagerange_is_ram(start, end);
352 if (is_range_ram == 1) { 348 if (is_range_ram == 1) {
353 349
354 spin_lock(&memtype_lock);
355 err = free_ram_pages_type(start, end); 350 err = free_ram_pages_type(start, end);
356 spin_unlock(&memtype_lock);
357 351
358 return err; 352 return err;
359 } else if (is_range_ram < 0) { 353 } else if (is_range_ram < 0) {
@@ -394,10 +388,8 @@ static unsigned long lookup_memtype(u64 paddr)
394 388
395 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) { 389 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
396 struct page *page; 390 struct page *page;
397 spin_lock(&memtype_lock);
398 page = pfn_to_page(paddr >> PAGE_SHIFT); 391 page = pfn_to_page(paddr >> PAGE_SHIFT);
399 rettype = get_page_memtype(page); 392 rettype = get_page_memtype(page);
400 spin_unlock(&memtype_lock);
401 /* 393 /*
402 * -1 from get_page_memtype() implies RAM page is in its 394 * -1 from get_page_memtype() implies RAM page is in its
403 * default state and not reserved, and hence of type WB 395 * default state and not reserved, and hence of type WB