aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c360
1 files changed, 265 insertions, 95 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index b2f7d3e59b86..e78cd0ec2bcf 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -15,6 +15,7 @@
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/rbtree.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/processor.h> 21#include <asm/processor.h>
@@ -80,6 +81,7 @@ enum {
80void pat_init(void) 81void pat_init(void)
81{ 82{
82 u64 pat; 83 u64 pat;
84 bool boot_cpu = !boot_pat_state;
83 85
84 if (!pat_enabled) 86 if (!pat_enabled)
85 return; 87 return;
@@ -121,8 +123,10 @@ void pat_init(void)
121 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); 123 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
122 124
123 wrmsrl(MSR_IA32_CR_PAT, pat); 125 wrmsrl(MSR_IA32_CR_PAT, pat);
124 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 126
125 smp_processor_id(), boot_pat_state, pat); 127 if (boot_cpu)
128 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
129 smp_processor_id(), boot_pat_state, pat);
126} 130}
127 131
128#undef PAT 132#undef PAT
@@ -148,11 +152,10 @@ static char *cattr_name(unsigned long flags)
148 * areas). All the aliases have the same cache attributes of course. 152 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes. 153 * Zero attributes are represented as holes.
150 * 154 *
151 * Currently the data structure is a list because the number of mappings 155 * The data structure is a list that is also organized as an rbtree
152 * are expected to be relatively small. If this should be a problem 156 * sorted on the start address of memtype range.
153 * it could be changed to a rbtree or similar.
154 * 157 *
155 * memtype_lock protects the whole list. 158 * memtype_lock protects both the linear list and rbtree.
156 */ 159 */
157 160
158struct memtype { 161struct memtype {
@@ -160,11 +163,53 @@ struct memtype {
160 u64 end; 163 u64 end;
161 unsigned long type; 164 unsigned long type;
162 struct list_head nd; 165 struct list_head nd;
166 struct rb_node rb;
163}; 167};
164 168
169static struct rb_root memtype_rbroot = RB_ROOT;
165static LIST_HEAD(memtype_list); 170static LIST_HEAD(memtype_list);
166static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ 171static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
167 172
173static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
174{
175 struct rb_node *node = root->rb_node;
176 struct memtype *last_lower = NULL;
177
178 while (node) {
179 struct memtype *data = container_of(node, struct memtype, rb);
180
181 if (data->start < start) {
182 last_lower = data;
183 node = node->rb_right;
184 } else if (data->start > start) {
185 node = node->rb_left;
186 } else
187 return data;
188 }
189
190 /* Will return NULL if there is no entry with its start <= start */
191 return last_lower;
192}
193
194static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
195{
196 struct rb_node **new = &(root->rb_node);
197 struct rb_node *parent = NULL;
198
199 while (*new) {
200 struct memtype *this = container_of(*new, struct memtype, rb);
201
202 parent = *new;
203 if (data->start <= this->start)
204 new = &((*new)->rb_left);
205 else if (data->start > this->start)
206 new = &((*new)->rb_right);
207 }
208
209 rb_link_node(&data->rb, parent, new);
210 rb_insert_color(&data->rb, root);
211}
212
168/* 213/*
169 * Does intersection of PAT memory type and MTRR memory type and returns 214 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it. 215 * the resulting memory type as PAT understands it.
@@ -218,9 +263,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
218 return -EBUSY; 263 return -EBUSY;
219} 264}
220 265
221static struct memtype *cached_entry;
222static u64 cached_start;
223
224static int pat_pagerange_is_ram(unsigned long start, unsigned long end) 266static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225{ 267{
226 int ram_page = 0, not_rampage = 0; 268 int ram_page = 0, not_rampage = 0;
@@ -249,63 +291,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
249} 291}
250 292
251/* 293/*
252 * For RAM pages, mark the pages as non WB memory type using 294 * For RAM pages, we use page flags to mark the pages with appropriate type.
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 295 * Here we do two pass:
254 * set_memory_wc() on a RAM page at a time before marking it as WB again. 296 * - Find the memtype of all the pages in the range, look for any conflicts
255 * This is ok, because only one driver will be owning the page and 297 * - In case of no conflicts, set the new memtype for pages in the range
256 * doing set_memory_*() calls.
257 * 298 *
258 * For now, we use PageNonWB to track that the RAM page is being mapped 299 * Caller must hold memtype_lock for atomicity.
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
261 * UC and WC mapping.
262 */ 300 */
263static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 301static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
264 unsigned long *new_type) 302 unsigned long *new_type)
265{ 303{
266 struct page *page; 304 struct page *page;
267 u64 pfn, end_pfn; 305 u64 pfn;
306
307 if (req_type == _PAGE_CACHE_UC) {
308 /* We do not support strong UC */
309 WARN_ON_ONCE(1);
310 req_type = _PAGE_CACHE_UC_MINUS;
311 }
268 312
269 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 313 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
270 page = pfn_to_page(pfn); 314 unsigned long type;
271 if (page_mapped(page) || PageNonWB(page))
272 goto out;
273 315
274 SetPageNonWB(page); 316 page = pfn_to_page(pfn);
317 type = get_page_memtype(page);
318 if (type != -1) {
319 printk(KERN_INFO "reserve_ram_pages_type failed "
320 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
321 start, end, type, req_type);
322 if (new_type)
323 *new_type = type;
324
325 return -EBUSY;
326 }
275 } 327 }
276 return 0;
277 328
278out: 329 if (new_type)
279 end_pfn = pfn; 330 *new_type = req_type;
280 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { 331
332 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
281 page = pfn_to_page(pfn); 333 page = pfn_to_page(pfn);
282 ClearPageNonWB(page); 334 set_page_memtype(page, req_type);
283 } 335 }
284 336 return 0;
285 return -EINVAL;
286} 337}
287 338
288static int free_ram_pages_type(u64 start, u64 end) 339static int free_ram_pages_type(u64 start, u64 end)
289{ 340{
290 struct page *page; 341 struct page *page;
291 u64 pfn, end_pfn; 342 u64 pfn;
292 343
293 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 344 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
294 page = pfn_to_page(pfn); 345 page = pfn_to_page(pfn);
295 if (page_mapped(page) || !PageNonWB(page)) 346 set_page_memtype(page, -1);
296 goto out;
297
298 ClearPageNonWB(page);
299 } 347 }
300 return 0; 348 return 0;
301
302out:
303 end_pfn = pfn;
304 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
305 page = pfn_to_page(pfn);
306 SetPageNonWB(page);
307 }
308 return -EINVAL;
309} 349}
310 350
311/* 351/*
@@ -339,6 +379,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
339 if (new_type) { 379 if (new_type) {
340 if (req_type == -1) 380 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB; 381 *new_type = _PAGE_CACHE_WB;
382 else if (req_type == _PAGE_CACHE_WC)
383 *new_type = _PAGE_CACHE_UC_MINUS;
342 else 384 else
343 *new_type = req_type & _PAGE_CACHE_MASK; 385 *new_type = req_type & _PAGE_CACHE_MASK;
344 } 386 }
@@ -364,11 +406,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
364 *new_type = actual_type; 406 *new_type = actual_type;
365 407
366 is_range_ram = pat_pagerange_is_ram(start, end); 408 is_range_ram = pat_pagerange_is_ram(start, end);
367 if (is_range_ram == 1) 409 if (is_range_ram == 1) {
368 return reserve_ram_pages_type(start, end, req_type, 410
369 new_type); 411 spin_lock(&memtype_lock);
370 else if (is_range_ram < 0) 412 err = reserve_ram_pages_type(start, end, req_type, new_type);
413 spin_unlock(&memtype_lock);
414
415 return err;
416 } else if (is_range_ram < 0) {
371 return -EINVAL; 417 return -EINVAL;
418 }
372 419
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 420 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new) 421 if (!new)
@@ -380,17 +427,11 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
380 427
381 spin_lock(&memtype_lock); 428 spin_lock(&memtype_lock);
382 429
383 if (cached_entry && start >= cached_start)
384 entry = cached_entry;
385 else
386 entry = list_entry(&memtype_list, struct memtype, nd);
387
388 /* Search for existing mapping that overlaps the current range */ 430 /* Search for existing mapping that overlaps the current range */
389 where = NULL; 431 where = NULL;
390 list_for_each_entry_continue(entry, &memtype_list, nd) { 432 list_for_each_entry(entry, &memtype_list, nd) {
391 if (end <= entry->start) { 433 if (end <= entry->start) {
392 where = entry->nd.prev; 434 where = entry->nd.prev;
393 cached_entry = list_entry(where, struct memtype, nd);
394 break; 435 break;
395 } else if (start <= entry->start) { /* end > entry->start */ 436 } else if (start <= entry->start) { /* end > entry->start */
396 err = chk_conflict(new, entry, new_type); 437 err = chk_conflict(new, entry, new_type);
@@ -398,8 +439,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
398 dprintk("Overlap at 0x%Lx-0x%Lx\n", 439 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end); 440 entry->start, entry->end);
400 where = entry->nd.prev; 441 where = entry->nd.prev;
401 cached_entry = list_entry(where,
402 struct memtype, nd);
403 } 442 }
404 break; 443 break;
405 } else if (start < entry->end) { /* start > entry->start */ 444 } else if (start < entry->end) { /* start > entry->start */
@@ -407,8 +446,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
407 if (!err) { 446 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n", 447 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end); 448 entry->start, entry->end);
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412 449
413 /* 450 /*
414 * Move to right position in the linked 451 * Move to right position in the linked
@@ -436,13 +473,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
436 return err; 473 return err;
437 } 474 }
438 475
439 cached_start = start;
440
441 if (where) 476 if (where)
442 list_add(&new->nd, where); 477 list_add(&new->nd, where);
443 else 478 else
444 list_add_tail(&new->nd, &memtype_list); 479 list_add_tail(&new->nd, &memtype_list);
445 480
481 memtype_rb_insert(&memtype_rbroot, new);
482
446 spin_unlock(&memtype_lock); 483 spin_unlock(&memtype_lock);
447 484
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 485 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -454,7 +491,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
454 491
455int free_memtype(u64 start, u64 end) 492int free_memtype(u64 start, u64 end)
456{ 493{
457 struct memtype *entry; 494 struct memtype *entry, *saved_entry;
458 int err = -EINVAL; 495 int err = -EINVAL;
459 int is_range_ram; 496 int is_range_ram;
460 497
@@ -466,23 +503,58 @@ int free_memtype(u64 start, u64 end)
466 return 0; 503 return 0;
467 504
468 is_range_ram = pat_pagerange_is_ram(start, end); 505 is_range_ram = pat_pagerange_is_ram(start, end);
469 if (is_range_ram == 1) 506 if (is_range_ram == 1) {
470 return free_ram_pages_type(start, end); 507
471 else if (is_range_ram < 0) 508 spin_lock(&memtype_lock);
509 err = free_ram_pages_type(start, end);
510 spin_unlock(&memtype_lock);
511
512 return err;
513 } else if (is_range_ram < 0) {
472 return -EINVAL; 514 return -EINVAL;
515 }
473 516
474 spin_lock(&memtype_lock); 517 spin_lock(&memtype_lock);
475 list_for_each_entry(entry, &memtype_list, nd) { 518
519 entry = memtype_rb_search(&memtype_rbroot, start);
520 if (unlikely(entry == NULL))
521 goto unlock_ret;
522
523 /*
524 * Saved entry points to an entry with start same or less than what
525 * we searched for. Now go through the list in both directions to look
526 * for the entry that matches with both start and end, with list stored
527 * in sorted start address
528 */
529 saved_entry = entry;
530 list_for_each_entry_from(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) { 531 if (entry->start == start && entry->end == end) {
477 if (cached_entry == entry || cached_start == start) 532 rb_erase(&entry->rb, &memtype_rbroot);
478 cached_entry = NULL; 533 list_del(&entry->nd);
534 kfree(entry);
535 err = 0;
536 break;
537 } else if (entry->start > start) {
538 break;
539 }
540 }
479 541
542 if (!err)
543 goto unlock_ret;
544
545 entry = saved_entry;
546 list_for_each_entry_reverse(entry, &memtype_list, nd) {
547 if (entry->start == start && entry->end == end) {
548 rb_erase(&entry->rb, &memtype_rbroot);
480 list_del(&entry->nd); 549 list_del(&entry->nd);
481 kfree(entry); 550 kfree(entry);
482 err = 0; 551 err = 0;
483 break; 552 break;
553 } else if (entry->start < start) {
554 break;
484 } 555 }
485 } 556 }
557unlock_ret:
486 spin_unlock(&memtype_lock); 558 spin_unlock(&memtype_lock);
487 559
488 if (err) { 560 if (err) {
@@ -496,6 +568,101 @@ int free_memtype(u64 start, u64 end)
496} 568}
497 569
498 570
571/**
572 * lookup_memtype - Looksup the memory type for a physical address
573 * @paddr: physical address of which memory type needs to be looked up
574 *
575 * Only to be called when PAT is enabled
576 *
577 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
578 * _PAGE_CACHE_UC
579 */
580static unsigned long lookup_memtype(u64 paddr)
581{
582 int rettype = _PAGE_CACHE_WB;
583 struct memtype *entry;
584
585 if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
586 return rettype;
587
588 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
589 struct page *page;
590 spin_lock(&memtype_lock);
591 page = pfn_to_page(paddr >> PAGE_SHIFT);
592 rettype = get_page_memtype(page);
593 spin_unlock(&memtype_lock);
594 /*
595 * -1 from get_page_memtype() implies RAM page is in its
596 * default state and not reserved, and hence of type WB
597 */
598 if (rettype == -1)
599 rettype = _PAGE_CACHE_WB;
600
601 return rettype;
602 }
603
604 spin_lock(&memtype_lock);
605
606 entry = memtype_rb_search(&memtype_rbroot, paddr);
607 if (entry != NULL)
608 rettype = entry->type;
609 else
610 rettype = _PAGE_CACHE_UC_MINUS;
611
612 spin_unlock(&memtype_lock);
613 return rettype;
614}
615
616/**
617 * io_reserve_memtype - Request a memory type mapping for a region of memory
618 * @start: start (physical address) of the region
619 * @end: end (physical address) of the region
620 * @type: A pointer to memtype, with requested type. On success, requested
621 * or any other compatible type that was available for the region is returned
622 *
623 * On success, returns 0
624 * On failure, returns non-zero
625 */
626int io_reserve_memtype(resource_size_t start, resource_size_t end,
627 unsigned long *type)
628{
629 resource_size_t size = end - start;
630 unsigned long req_type = *type;
631 unsigned long new_type;
632 int ret;
633
634 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
635
636 ret = reserve_memtype(start, end, req_type, &new_type);
637 if (ret)
638 goto out_err;
639
640 if (!is_new_memtype_allowed(start, size, req_type, new_type))
641 goto out_free;
642
643 if (kernel_map_sync_memtype(start, size, new_type) < 0)
644 goto out_free;
645
646 *type = new_type;
647 return 0;
648
649out_free:
650 free_memtype(start, end);
651 ret = -EBUSY;
652out_err:
653 return ret;
654}
655
656/**
657 * io_free_memtype - Release a memory type mapping for a region of memory
658 * @start: start (physical address) of the region
659 * @end: end (physical address) of the region
660 */
661void io_free_memtype(resource_size_t start, resource_size_t end)
662{
663 free_memtype(start, end);
664}
665
499pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 666pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot) 667 unsigned long size, pgprot_t vma_prot)
501{ 668{
@@ -577,7 +744,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
577{ 744{
578 unsigned long id_sz; 745 unsigned long id_sz;
579 746
580 if (!pat_enabled || base >= __pa(high_memory)) 747 if (base >= __pa(high_memory))
581 return 0; 748 return 0;
582 749
583 id_sz = (__pa(high_memory) < base + size) ? 750 id_sz = (__pa(high_memory) < base + size) ?
@@ -612,11 +779,29 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
612 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 779 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
613 780
614 /* 781 /*
615 * reserve_pfn_range() doesn't support RAM pages. Maintain the current 782 * reserve_pfn_range() for RAM pages. We do not refcount to keep
616 * behavior with RAM pages by returning success. 783 * track of number of mappings of RAM pages. We can assert that
784 * the type requested matches the type of first page in the range.
617 */ 785 */
618 if (is_ram != 0) 786 if (is_ram) {
787 if (!pat_enabled)
788 return 0;
789
790 flags = lookup_memtype(paddr);
791 if (want_flags != flags) {
792 printk(KERN_WARNING
793 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
794 current->comm, current->pid,
795 cattr_name(want_flags),
796 (unsigned long long)paddr,
797 (unsigned long long)(paddr + size),
798 cattr_name(flags));
799 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
800 (~_PAGE_CACHE_MASK)) |
801 flags);
802 }
619 return 0; 803 return 0;
804 }
620 805
621 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 806 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
622 if (ret) 807 if (ret)
@@ -678,14 +863,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
678 unsigned long vma_size = vma->vm_end - vma->vm_start; 863 unsigned long vma_size = vma->vm_end - vma->vm_start;
679 pgprot_t pgprot; 864 pgprot_t pgprot;
680 865
681 if (!pat_enabled)
682 return 0;
683
684 /*
685 * For now, only handle remap_pfn_range() vmas where
686 * is_linear_pfn_mapping() == TRUE. Handling of
687 * vm_insert_pfn() is TBD.
688 */
689 if (is_linear_pfn_mapping(vma)) { 866 if (is_linear_pfn_mapping(vma)) {
690 /* 867 /*
691 * reserve the whole chunk covered by vma. We need the 868 * reserve the whole chunk covered by vma. We need the
@@ -713,23 +890,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
713int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 890int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
714 unsigned long pfn, unsigned long size) 891 unsigned long pfn, unsigned long size)
715{ 892{
893 unsigned long flags;
716 resource_size_t paddr; 894 resource_size_t paddr;
717 unsigned long vma_size = vma->vm_end - vma->vm_start; 895 unsigned long vma_size = vma->vm_end - vma->vm_start;
718 896
719 if (!pat_enabled)
720 return 0;
721
722 /*
723 * For now, only handle remap_pfn_range() vmas where
724 * is_linear_pfn_mapping() == TRUE. Handling of
725 * vm_insert_pfn() is TBD.
726 */
727 if (is_linear_pfn_mapping(vma)) { 897 if (is_linear_pfn_mapping(vma)) {
728 /* reserve the whole chunk starting from vm_pgoff */ 898 /* reserve the whole chunk starting from vm_pgoff */
729 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 899 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
730 return reserve_pfn_range(paddr, vma_size, prot, 0); 900 return reserve_pfn_range(paddr, vma_size, prot, 0);
731 } 901 }
732 902
903 if (!pat_enabled)
904 return 0;
905
906 /* for vm_insert_pfn and friends, we set prot based on lookup */
907 flags = lookup_memtype(pfn << PAGE_SHIFT);
908 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
909 flags);
910
733 return 0; 911 return 0;
734} 912}
735 913
@@ -744,14 +922,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
744 resource_size_t paddr; 922 resource_size_t paddr;
745 unsigned long vma_size = vma->vm_end - vma->vm_start; 923 unsigned long vma_size = vma->vm_end - vma->vm_start;
746 924
747 if (!pat_enabled)
748 return;
749
750 /*
751 * For now, only handle remap_pfn_range() vmas where
752 * is_linear_pfn_mapping() == TRUE. Handling of
753 * vm_insert_pfn() is TBD.
754 */
755 if (is_linear_pfn_mapping(vma)) { 925 if (is_linear_pfn_mapping(vma)) {
756 /* free the whole chunk starting from vm_pgoff */ 926 /* free the whole chunk starting from vm_pgoff */
757 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 927 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;