aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat.c
diff options
context:
space:
mode:
authorPallipadi, Venkatesh <venkatesh.pallipadi@intel.com>2010-02-10 18:26:07 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-02-18 18:41:36 -0500
commit9e41a49aab88a5a6c8f4875bf10a5543bc321f2d (patch)
tree65a4921f5a7973f017e66b9ca00b6427eb3a6c83 /arch/x86/mm/pat.c
parentbe5a0c126ad1dea2128dc5aef12c87083518d1ab (diff)
x86, pat: Migrate to rbtree only backend for pat memtype management
Move pat backend to fully rbtree based implementation from the existing rbtree and linked list hybrid. New rbtree based solution uses interval trees (augmented rbtrees) in order to store the PAT ranges. The new code seprates out the pat backend to pat_rbtree.c file, making is cleaner. The change also makes the PAT lookup, reserve and free operations more optimal, as we don't have to traverse linear linked list of few tens of entries in normal case. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> LKML-Reference: <20100210232607.GB11465@linux-os.sc.intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/pat.c')
-rw-r--r--arch/x86/mm/pat.c209
1 files changed, 5 insertions, 204 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 628e507b7936..951011166ef5 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -130,65 +130,7 @@ void pat_init(void)
130 130
131#undef PAT 131#undef PAT
132 132
133/* 133static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
134 * The global memtype list keeps track of memory type for specific
135 * physical memory areas. Conflicting memory types in different
136 * mappings can cause CPU cache corruption. To avoid this we keep track.
137 *
138 * The list is sorted based on starting address and can contain multiple
139 * entries for each address (this allows reference counting for overlapping
140 * areas). All the aliases have the same cache attributes of course.
141 * Zero attributes are represented as holes.
142 *
143 * The data structure is a list that is also organized as an rbtree
144 * sorted on the start address of memtype range.
145 *
146 * memtype_lock protects both the linear list and rbtree.
147 */
148
149static struct rb_root memtype_rbroot = RB_ROOT;
150static LIST_HEAD(memtype_list);
151static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
152
153static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
154{
155 struct rb_node *node = root->rb_node;
156 struct memtype *last_lower = NULL;
157
158 while (node) {
159 struct memtype *data = container_of(node, struct memtype, rb);
160
161 if (data->start < start) {
162 last_lower = data;
163 node = node->rb_right;
164 } else if (data->start > start) {
165 node = node->rb_left;
166 } else
167 return data;
168 }
169
170 /* Will return NULL if there is no entry with its start <= start */
171 return last_lower;
172}
173
174static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
175{
176 struct rb_node **new = &(root->rb_node);
177 struct rb_node *parent = NULL;
178
179 while (*new) {
180 struct memtype *this = container_of(*new, struct memtype, rb);
181
182 parent = *new;
183 if (data->start <= this->start)
184 new = &((*new)->rb_left);
185 else if (data->start > this->start)
186 new = &((*new)->rb_right);
187 }
188
189 rb_link_node(&data->rb, parent, new);
190 rb_insert_color(&data->rb, root);
191}
192 134
193/* 135/*
194 * Does intersection of PAT memory type and MTRR memory type and returns 136 * Does intersection of PAT memory type and MTRR memory type and returns
@@ -216,33 +158,6 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
216 return req_type; 158 return req_type;
217} 159}
218 160
219static int
220chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
221{
222 if (new->type != entry->type) {
223 if (type) {
224 new->type = entry->type;
225 *type = entry->type;
226 } else
227 goto conflict;
228 }
229
230 /* check overlaps with more than one entry in the list */
231 list_for_each_entry_continue(entry, &memtype_list, nd) {
232 if (new->end <= entry->start)
233 break;
234 else if (new->type != entry->type)
235 goto conflict;
236 }
237 return 0;
238
239 conflict:
240 printk(KERN_INFO "%s:%d conflicting memory types "
241 "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
242 new->end, cattr_name(new->type), cattr_name(entry->type));
243 return -EBUSY;
244}
245
246static int pat_pagerange_is_ram(unsigned long start, unsigned long end) 161static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
247{ 162{
248 int ram_page = 0, not_rampage = 0; 163 int ram_page = 0, not_rampage = 0;
@@ -328,64 +243,6 @@ static int free_ram_pages_type(u64 start, u64 end)
328 return 0; 243 return 0;
329} 244}
330 245
331static int memtype_check_insert(struct memtype *new, unsigned long *new_type)
332{
333 struct memtype *entry;
334 u64 start, end;
335 unsigned long actual_type;
336 struct list_head *where;
337 int err = 0;
338
339 start = new->start;
340 end = new->end;
341 actual_type = new->type;
342
343 /* Search for existing mapping that overlaps the current range */
344 where = NULL;
345 list_for_each_entry(entry, &memtype_list, nd) {
346 if (end <= entry->start) {
347 where = entry->nd.prev;
348 break;
349 } else if (start <= entry->start) { /* end > entry->start */
350 err = chk_conflict(new, entry, new_type);
351 if (!err) {
352 dprintk("Overlap at 0x%Lx-0x%Lx\n",
353 entry->start, entry->end);
354 where = entry->nd.prev;
355 }
356 break;
357 } else if (start < entry->end) { /* start > entry->start */
358 err = chk_conflict(new, entry, new_type);
359 if (!err) {
360 dprintk("Overlap at 0x%Lx-0x%Lx\n",
361 entry->start, entry->end);
362
363 /*
364 * Move to right position in the linked
365 * list to add this new entry
366 */
367 list_for_each_entry_continue(entry,
368 &memtype_list, nd) {
369 if (start <= entry->start) {
370 where = entry->nd.prev;
371 break;
372 }
373 }
374 }
375 break;
376 }
377 }
378 if (!err) {
379 if (where)
380 list_add(&new->nd, where);
381 else
382 list_add_tail(&new->nd, &memtype_list);
383
384 memtype_rb_insert(&memtype_rbroot, new);
385 }
386 return err;
387}
388
389/* 246/*
390 * req_type typically has one of the: 247 * req_type typically has one of the:
391 * - _PAGE_CACHE_WB 248 * - _PAGE_CACHE_WB
@@ -459,7 +316,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
459 316
460 spin_lock(&memtype_lock); 317 spin_lock(&memtype_lock);
461 318
462 err = memtype_check_insert(new, new_type); 319 err = rbt_memtype_check_insert(new, new_type);
463 if (err) { 320 if (err) {
464 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 321 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
465 "track %s, req %s\n", 322 "track %s, req %s\n",
@@ -481,7 +338,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
481 338
482int free_memtype(u64 start, u64 end) 339int free_memtype(u64 start, u64 end)
483{ 340{
484 struct memtype *entry, *saved_entry;
485 int err = -EINVAL; 341 int err = -EINVAL;
486 int is_range_ram; 342 int is_range_ram;
487 343
@@ -505,46 +361,7 @@ int free_memtype(u64 start, u64 end)
505 } 361 }
506 362
507 spin_lock(&memtype_lock); 363 spin_lock(&memtype_lock);
508 364 err = rbt_memtype_erase(start, end);
509 entry = memtype_rb_search(&memtype_rbroot, start);
510 if (unlikely(entry == NULL))
511 goto unlock_ret;
512
513 /*
514 * Saved entry points to an entry with start same or less than what
515 * we searched for. Now go through the list in both directions to look
516 * for the entry that matches with both start and end, with list stored
517 * in sorted start address
518 */
519 saved_entry = entry;
520 list_for_each_entry_from(entry, &memtype_list, nd) {
521 if (entry->start == start && entry->end == end) {
522 rb_erase(&entry->rb, &memtype_rbroot);
523 list_del(&entry->nd);
524 kfree(entry);
525 err = 0;
526 break;
527 } else if (entry->start > start) {
528 break;
529 }
530 }
531
532 if (!err)
533 goto unlock_ret;
534
535 entry = saved_entry;
536 list_for_each_entry_reverse(entry, &memtype_list, nd) {
537 if (entry->start == start && entry->end == end) {
538 rb_erase(&entry->rb, &memtype_rbroot);
539 list_del(&entry->nd);
540 kfree(entry);
541 err = 0;
542 break;
543 } else if (entry->start < start) {
544 break;
545 }
546 }
547unlock_ret:
548 spin_unlock(&memtype_lock); 365 spin_unlock(&memtype_lock);
549 366
550 if (err) { 367 if (err) {
@@ -593,7 +410,7 @@ static unsigned long lookup_memtype(u64 paddr)
593 410
594 spin_lock(&memtype_lock); 411 spin_lock(&memtype_lock);
595 412
596 entry = memtype_rb_search(&memtype_rbroot, paddr); 413 entry = rbt_memtype_lookup(paddr);
597 if (entry != NULL) 414 if (entry != NULL)
598 rettype = entry->type; 415 rettype = entry->type;
599 else 416 else
@@ -930,22 +747,6 @@ EXPORT_SYMBOL_GPL(pgprot_writecombine);
930 747
931#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 748#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
932 749
933/* get Nth element of the linked list */
934static int copy_memtype_nth_element(struct memtype *out, loff_t pos)
935{
936 struct memtype *list_node;
937 int i = 1;
938
939 list_for_each_entry(list_node, &memtype_list, nd) {
940 if (pos == i) {
941 *out = *list_node;
942 return 0;
943 }
944 ++i;
945 }
946 return 1;
947}
948
949static struct memtype *memtype_get_idx(loff_t pos) 750static struct memtype *memtype_get_idx(loff_t pos)
950{ 751{
951 struct memtype *print_entry; 752 struct memtype *print_entry;
@@ -956,7 +757,7 @@ static struct memtype *memtype_get_idx(loff_t pos)
956 return NULL; 757 return NULL;
957 758
958 spin_lock(&memtype_lock); 759 spin_lock(&memtype_lock);
959 ret = copy_memtype_nth_element(print_entry, pos); 760 ret = rbt_memtype_copy_nth_element(print_entry, pos);
960 spin_unlock(&memtype_lock); 761 spin_unlock(&memtype_lock);
961 762
962 if (!ret) { 763 if (!ret) {