diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2008-08-19 19:28:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-20 06:08:37 -0400 |
commit | 80c5e73d6028e0f03ab0c70e7c4cbf98ac2e0c43 (patch) | |
tree | b1ae13cc96dd94e7090942e5602df6277a4126e4 /arch/x86 | |
parent | c6744955d0ec0cb485c28c51eeb7185e260f6172 (diff) |
x86: fix Xorg startup/shutdown slowdown with PAT
Rene Herman reported significant Xorg startup/shutdown slowdown due
to PAT. It turns out that the memtype list has thousands of entries.
Add cached_entry to list add routine, in order to speed up the
lookup for sequential reserve_memtype calls.
Reported-by: Rene Herman <rene.herman@keyaccess.nl>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/pat.c | 33 |
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 2fe30916d4b6..bb6e8a267bfe 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry, | |||
207 | return -EBUSY; | 207 | return -EBUSY; |
208 | } | 208 | } |
209 | 209 | ||
210 | static struct memtype *cached_entry; | ||
211 | static u64 cached_start; | ||
212 | |||
210 | /* | 213 | /* |
211 | * req_type typically has one of the: | 214 | * req_type typically has one of the: |
212 | * - _PAGE_CACHE_WB | 215 | * - _PAGE_CACHE_WB |
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
280 | 283 | ||
281 | spin_lock(&memtype_lock); | 284 | spin_lock(&memtype_lock); |
282 | 285 | ||
286 | if (cached_entry && start >= cached_start) | ||
287 | entry = cached_entry; | ||
288 | else | ||
289 | entry = list_entry(&memtype_list, struct memtype, nd); | ||
290 | |||
283 | /* Search for existing mapping that overlaps the current range */ | 291 | /* Search for existing mapping that overlaps the current range */ |
284 | where = NULL; | 292 | where = NULL; |
285 | list_for_each_entry(entry, &memtype_list, nd) { | 293 | list_for_each_entry_continue(entry, &memtype_list, nd) { |
286 | if (end <= entry->start) { | 294 | if (end <= entry->start) { |
287 | where = entry->nd.prev; | 295 | where = entry->nd.prev; |
296 | cached_entry = list_entry(where, struct memtype, nd); | ||
288 | break; | 297 | break; |
289 | } else if (start <= entry->start) { /* end > entry->start */ | 298 | } else if (start <= entry->start) { /* end > entry->start */ |
290 | err = chk_conflict(new, entry, new_type); | 299 | err = chk_conflict(new, entry, new_type); |
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
292 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 301 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
293 | entry->start, entry->end); | 302 | entry->start, entry->end); |
294 | where = entry->nd.prev; | 303 | where = entry->nd.prev; |
304 | cached_entry = list_entry(where, | ||
305 | struct memtype, nd); | ||
295 | } | 306 | } |
296 | break; | 307 | break; |
297 | } else if (start < entry->end) { /* start > entry->start */ | 308 | } else if (start < entry->end) { /* start > entry->start */ |
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
299 | if (!err) { | 310 | if (!err) { |
300 | dprintk("Overlap at 0x%Lx-0x%Lx\n", | 311 | dprintk("Overlap at 0x%Lx-0x%Lx\n", |
301 | entry->start, entry->end); | 312 | entry->start, entry->end); |
302 | where = &entry->nd; | 313 | cached_entry = list_entry(entry->nd.prev, |
314 | struct memtype, nd); | ||
315 | |||
316 | /* | ||
317 | * Move to right position in the linked | ||
318 | * list to add this new entry | ||
319 | */ | ||
320 | list_for_each_entry_continue(entry, | ||
321 | &memtype_list, nd) { | ||
322 | if (start <= entry->start) { | ||
323 | where = entry->nd.prev; | ||
324 | break; | ||
325 | } | ||
326 | } | ||
303 | } | 327 | } |
304 | break; | 328 | break; |
305 | } | 329 | } |
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
314 | return err; | 338 | return err; |
315 | } | 339 | } |
316 | 340 | ||
341 | cached_start = start; | ||
342 | |||
317 | if (where) | 343 | if (where) |
318 | list_add(&new->nd, where); | 344 | list_add(&new->nd, where); |
319 | else | 345 | else |
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end) | |||
343 | spin_lock(&memtype_lock); | 369 | spin_lock(&memtype_lock); |
344 | list_for_each_entry(entry, &memtype_list, nd) { | 370 | list_for_each_entry(entry, &memtype_list, nd) { |
345 | if (entry->start == start && entry->end == end) { | 371 | if (entry->start == start && entry->end == end) { |
372 | if (cached_entry == entry || cached_start == start) | ||
373 | cached_entry = NULL; | ||
374 | |||
346 | list_del(&entry->nd); | 375 | list_del(&entry->nd); |
347 | kfree(entry); | 376 | kfree(entry); |
348 | err = 0; | 377 | err = 0; |