diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/pat.c | 106 |
1 files changed, 86 insertions, 20 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 82d097ce3091..c90f2420f56c 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/gfp.h> | 15 | #include <linux/gfp.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/rbtree.h> | ||
18 | 19 | ||
19 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
20 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
@@ -148,11 +149,10 @@ static char *cattr_name(unsigned long flags) | |||
148 | * areas). All the aliases have the same cache attributes of course. | 149 | * areas). All the aliases have the same cache attributes of course. |
149 | * Zero attributes are represented as holes. | 150 | * Zero attributes are represented as holes. |
150 | * | 151 | * |
151 | * Currently the data structure is a list because the number of mappings | 152 | * The data structure is a list that is also organized as an rbtree |
152 | * are expected to be relatively small. If this should be a problem | 153 | * sorted on the start address of memtype range. |
153 | * it could be changed to a rbtree or similar. | ||
154 | * | 154 | * |
155 | * memtype_lock protects the whole list. | 155 | * memtype_lock protects both the linear list and rbtree. |
156 | */ | 156 | */ |
157 | 157 | ||
158 | struct memtype { | 158 | struct memtype { |
@@ -160,11 +160,53 @@ struct memtype { | |||
160 | u64 end; | 160 | u64 end; |
161 | unsigned long type; | 161 | unsigned long type; |
162 | struct list_head nd; | 162 | struct list_head nd; |
163 | struct rb_node rb; | ||
163 | }; | 164 | }; |
164 | 165 | ||
166 | static struct rb_root memtype_rbroot = RB_ROOT; | ||
165 | static LIST_HEAD(memtype_list); | 167 | static LIST_HEAD(memtype_list); |
166 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ | 168 | static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ |
167 | 169 | ||
170 | static struct memtype *memtype_rb_search(struct rb_root *root, u64 start) | ||
171 | { | ||
172 | struct rb_node *node = root->rb_node; | ||
173 | struct memtype *last_lower = NULL; | ||
174 | |||
175 | while (node) { | ||
176 | struct memtype *data = container_of(node, struct memtype, rb); | ||
177 | |||
178 | if (data->start < start) { | ||
179 | last_lower = data; | ||
180 | node = node->rb_right; | ||
181 | } else if (data->start > start) { | ||
182 | node = node->rb_left; | ||
183 | } else | ||
184 | return data; | ||
185 | } | ||
186 | |||
187 | /* Will return NULL if there is no entry with its start <= start */ | ||
188 | return last_lower; | ||
189 | } | ||
190 | |||
191 | static void memtype_rb_insert(struct rb_root *root, struct memtype *data) | ||
192 | { | ||
193 | struct rb_node **new = &(root->rb_node); | ||
194 | struct rb_node *parent = NULL; | ||
195 | |||
196 | while (*new) { | ||
197 | struct memtype *this = container_of(*new, struct memtype, rb); | ||
198 | |||
199 | parent = *new; | ||
200 | if (data->start <= this->start) | ||
201 | new = &((*new)->rb_left); | ||
202 | else if (data->start > this->start) | ||
203 | new = &((*new)->rb_right); | ||
204 | } | ||
205 | |||
206 | rb_link_node(&data->rb, parent, new); | ||
207 | rb_insert_color(&data->rb, root); | ||
208 | } | ||
209 | |||
168 | /* | 210 | /* |
169 | * Does intersection of PAT memory type and MTRR memory type and returns | 211 | * Does intersection of PAT memory type and MTRR memory type and returns |
170 | * the resulting memory type as PAT understands it. | 212 | * the resulting memory type as PAT understands it. |
@@ -218,9 +260,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type) | |||
218 | return -EBUSY; | 260 | return -EBUSY; |
219 | } | 261 | } |
220 | 262 | ||
221 | static struct memtype *cached_entry; | ||
222 | static u64 cached_start; | ||
223 | |||
224 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) | 263 | static int pat_pagerange_is_ram(unsigned long start, unsigned long end) |
225 | { | 264 | { |
226 | int ram_page = 0, not_rampage = 0; | 265 | int ram_page = 0, not_rampage = 0; |
@@ -382,17 +421,19 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
382 | 421 | ||
383 | spin_lock(&memtype_lock); | 422 | spin_lock(&memtype_lock); |
384 | 423 | ||
385 | if (cached_entry && start >= cached_start) | 424 | entry = memtype_rb_search(&memtype_rbroot, new->start); |
386 | entry = cached_entry; | 425 | if (likely(entry != NULL)) { |
387 | else | 426 | /* To work correctly with list_for_each_entry_continue */ |
427 | entry = list_entry(entry->nd.prev, struct memtype, nd); | ||
428 | } else { | ||
388 | entry = list_entry(&memtype_list, struct memtype, nd); | 429 | entry = list_entry(&memtype_list, struct memtype, nd); |
430 | } | ||
389 | 431 | ||
390 | /* Search for existing mapping that overlaps the current range */ | 432 | /* Search for existing mapping that overlaps the current range */ |
391 | where = NULL; | 433 | where = NULL; |
392 | list_for_each_entry_continue(entry, &memtype_list, nd) { | 434 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
393 | if (end <= entry->start) { | 435 | if (end <= entry->start) { |
394 | where = entry->nd.prev; | 436 | where = entry->nd.prev; |
395 | cached_entry = list_entry(where, struct memtype, nd); | ||
396 | break; | 437 | break; |
397 | } else if (start <= entry->start) { /* end > entry->start */ | 438 | } else if (start <= entry->start) { /* end > entry->start */ |
398 | err = chk_conflict(new, entry, new_type); | 439 | err = chk_conflict(new, entry, new_type); |
@@ -400,8 +441,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
400 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 441 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
401 | entry->start, entry->end); | 442 | entry->start, entry->end); |
402 | where = entry->nd.prev; | 443 | where = entry->nd.prev; |
403 | cached_entry = list_entry(where, | ||
404 | struct memtype, nd); | ||
405 | } | 444 | } |
406 | break; | 445 | break; |
407 | } else if (start < entry->end) { /* start > entry->start */ | 446 | } else if (start < entry->end) { /* start > entry->start */ |
@@ -409,8 +448,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
409 | if (!err) { | 448 | if (!err) { |
410 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 449 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
411 | entry->start, entry->end); | 450 | entry->start, entry->end); |
412 | cached_entry = list_entry(entry->nd.prev, | ||
413 | struct memtype, nd); | ||
414 | 451 | ||
415 | /* | 452 | /* |
416 | * Move to right position in the linked | 453 | * Move to right position in the linked |
@@ -438,13 +475,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
438 | return err; | 475 | return err; |
439 | } | 476 | } |
440 | 477 | ||
441 | cached_start = start; | ||
442 | |||
443 | if (where) | 478 | if (where) |
444 | list_add(&new->nd, where); | 479 | list_add(&new->nd, where); |
445 | else | 480 | else |
446 | list_add_tail(&new->nd, &memtype_list); | 481 | list_add_tail(&new->nd, &memtype_list); |
447 | 482 | ||
483 | memtype_rb_insert(&memtype_rbroot, new); | ||
484 | |||
448 | spin_unlock(&memtype_lock); | 485 | spin_unlock(&memtype_lock); |
449 | 486 | ||
450 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", | 487 | dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", |
@@ -456,7 +493,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
456 | 493 | ||
457 | int free_memtype(u64 start, u64 end) | 494 | int free_memtype(u64 start, u64 end) |
458 | { | 495 | { |
459 | struct memtype *entry; | 496 | struct memtype *entry, *saved_entry; |
460 | int err = -EINVAL; | 497 | int err = -EINVAL; |
461 | int is_range_ram; | 498 | int is_range_ram; |
462 | 499 | ||
@@ -474,17 +511,46 @@ int free_memtype(u64 start, u64 end) | |||
474 | return -EINVAL; | 511 | return -EINVAL; |
475 | 512 | ||
476 | spin_lock(&memtype_lock); | 513 | spin_lock(&memtype_lock); |
514 | |||
515 | entry = memtype_rb_search(&memtype_rbroot, start); | ||
516 | if (unlikely(entry == NULL)) | ||
517 | goto unlock_ret; | ||
518 | |||
519 | /* | ||
520 | * Saved entry points to an entry with start same or less than what | ||
521 | * we searched for. Now go through the list in both directions to look | ||
522 | * for the entry that matches with both start and end, with list stored | ||
523 | * in sorted start address | ||
524 | */ | ||
525 | saved_entry = entry; | ||
477 | list_for_each_entry(entry, &memtype_list, nd) { | 526 | list_for_each_entry(entry, &memtype_list, nd) { |
478 | if (entry->start == start && entry->end == end) { | 527 | if (entry->start == start && entry->end == end) { |
479 | if (cached_entry == entry || cached_start == start) | 528 | rb_erase(&entry->rb, &memtype_rbroot); |
480 | cached_entry = NULL; | 529 | list_del(&entry->nd); |
530 | kfree(entry); | ||
531 | err = 0; | ||
532 | break; | ||
533 | } else if (entry->start > start) { | ||
534 | break; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | if (!err) | ||
539 | goto unlock_ret; | ||
481 | 540 | ||
541 | entry = saved_entry; | ||
542 | list_for_each_entry_reverse(entry, &memtype_list, nd) { | ||
543 | if (entry->start == start && entry->end == end) { | ||
544 | rb_erase(&entry->rb, &memtype_rbroot); | ||
482 | list_del(&entry->nd); | 545 | list_del(&entry->nd); |
483 | kfree(entry); | 546 | kfree(entry); |
484 | err = 0; | 547 | err = 0; |
485 | break; | 548 | break; |
549 | } else if (entry->start < start) { | ||
550 | break; | ||
486 | } | 551 | } |
487 | } | 552 | } |
553 | unlock_ret: | ||
488 | spin_unlock(&memtype_lock); | 554 | spin_unlock(&memtype_lock); |
489 | 555 | ||
490 | if (err) { | 556 | if (err) { |