diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2008-09-24 11:53:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-10 13:29:18 -0400 |
commit | 9542ada803198e6eba29d3289abb39ea82047b92 (patch) | |
tree | 3a99406988d1e8643b91840614cfd3340408a42f /arch | |
parent | ad5ca55f6bdb47c957b681c7358bb3719ba4ee82 (diff) |
x86: track memtype for RAM in page struct
Track the memtype for RAM pages in page struct instead of using the
memtype list. This avoids the explosion in the number of entries in
memtype list (of the order of 20,000 with AGP) and makes the PAT
tracking simpler.
We are using PG_arch_1 bit in page->flags.
We still use the memtype list for non RAM pages.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/ioremap.c | 19 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 83 |
2 files changed, 102 insertions, 0 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d4b6e6a29ae3..d03c461e045e 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -83,6 +83,25 @@ int page_is_ram(unsigned long pagenr) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | int pagerange_is_ram(unsigned long start, unsigned long end) | ||
87 | { | ||
88 | int ram_page = 0, not_rampage = 0; | ||
89 | unsigned long page_nr; | ||
90 | |||
91 | for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); | ||
92 | ++page_nr) { | ||
93 | if (page_is_ram(page_nr)) | ||
94 | ram_page = 1; | ||
95 | else | ||
96 | not_rampage = 1; | ||
97 | |||
98 | if (ram_page == not_rampage) | ||
99 | return -1; | ||
100 | } | ||
101 | |||
102 | return ram_page; | ||
103 | } | ||
104 | |||
86 | /* | 105 | /* |
87 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 106 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
88 | * conflicts. | 107 | * conflicts. |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index f049b1d6ebdf..aceb6c7c6dba 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -211,6 +211,75 @@ static struct memtype *cached_entry; | |||
211 | static u64 cached_start; | 211 | static u64 cached_start; |
212 | 212 | ||
213 | /* | 213 | /* |
214 | * RED-PEN: TODO: Add PageReserved() check as well here, | ||
215 | * once we add SetPageReserved() to all the drivers using | ||
216 | * set_memory_* or set_pages_*. | ||
217 | * | ||
218 | * This will help prevent accidentally freeing pages | ||
219 | * before setting the attribute back to WB. | ||
220 | */ | ||
221 | |||
222 | /* | ||
223 | * For RAM pages, mark the pages as non WB memory type using | ||
224 | * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or | ||
225 | * set_memory_wc() on a RAM page at a time before marking it as WB again. | ||
226 | * This is ok, because only one driver will be owning the page and | ||
227 | * doing set_memory_*() calls. | ||
228 | * | ||
229 | * For now, we use PageNonWB to track that the RAM page is being mapped | ||
230 | * as non WB. In future, we will have to use one more flag | ||
231 | * (or some other mechanism in page_struct) to distinguish between | ||
232 | * UC and WC mapping. | ||
233 | */ | ||
234 | static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, | ||
235 | unsigned long *new_type) | ||
236 | { | ||
237 | struct page *page; | ||
238 | u64 pfn, end_pfn; | ||
239 | |||
240 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | ||
241 | page = pfn_to_page(pfn); | ||
242 | if (page_mapped(page) || PageNonWB(page)) | ||
243 | goto out; | ||
244 | |||
245 | SetPageNonWB(page); | ||
246 | } | ||
247 | return 0; | ||
248 | |||
249 | out: | ||
250 | end_pfn = pfn; | ||
251 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | ||
252 | page = pfn_to_page(pfn); | ||
253 | ClearPageNonWB(page); | ||
254 | } | ||
255 | |||
256 | return -EINVAL; | ||
257 | } | ||
258 | |||
259 | static int free_ram_pages_type(u64 start, u64 end) | ||
260 | { | ||
261 | struct page *page; | ||
262 | u64 pfn, end_pfn; | ||
263 | |||
264 | for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { | ||
265 | page = pfn_to_page(pfn); | ||
266 | if (page_mapped(page) || !PageNonWB(page)) | ||
267 | goto out; | ||
268 | |||
269 | ClearPageNonWB(page); | ||
270 | } | ||
271 | return 0; | ||
272 | |||
273 | out: | ||
274 | end_pfn = pfn; | ||
275 | for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { | ||
276 | page = pfn_to_page(pfn); | ||
277 | SetPageNonWB(page); | ||
278 | } | ||
279 | return -EINVAL; | ||
280 | } | ||
281 | |||
282 | /* | ||
214 | * req_type typically has one of the: | 283 | * req_type typically has one of the: |
215 | * - _PAGE_CACHE_WB | 284 | * - _PAGE_CACHE_WB |
216 | * - _PAGE_CACHE_WC | 285 | * - _PAGE_CACHE_WC |
@@ -232,6 +301,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
232 | unsigned long actual_type; | 301 | unsigned long actual_type; |
233 | struct list_head *where; | 302 | struct list_head *where; |
234 | int err = 0; | 303 | int err = 0; |
304 | int is_range_ram; | ||
235 | 305 | ||
236 | BUG_ON(start >= end); /* end is exclusive */ | 306 | BUG_ON(start >= end); /* end is exclusive */ |
237 | 307 | ||
@@ -270,6 +340,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
270 | actual_type = pat_x_mtrr_type(start, end, | 340 | actual_type = pat_x_mtrr_type(start, end, |
271 | req_type & _PAGE_CACHE_MASK); | 341 | req_type & _PAGE_CACHE_MASK); |
272 | 342 | ||
343 | is_range_ram = pagerange_is_ram(start, end); | ||
344 | if (is_range_ram == 1) | ||
345 | return reserve_ram_pages_type(start, end, req_type, new_type); | ||
346 | else if (is_range_ram < 0) | ||
347 | return -EINVAL; | ||
348 | |||
273 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 349 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
274 | if (!new) | 350 | if (!new) |
275 | return -ENOMEM; | 351 | return -ENOMEM; |
@@ -358,6 +434,7 @@ int free_memtype(u64 start, u64 end) | |||
358 | { | 434 | { |
359 | struct memtype *entry; | 435 | struct memtype *entry; |
360 | int err = -EINVAL; | 436 | int err = -EINVAL; |
437 | int is_range_ram; | ||
361 | 438 | ||
362 | if (!pat_enabled) | 439 | if (!pat_enabled) |
363 | return 0; | 440 | return 0; |
@@ -366,6 +443,12 @@ int free_memtype(u64 start, u64 end) | |||
366 | if (is_ISA_range(start, end - 1)) | 443 | if (is_ISA_range(start, end - 1)) |
367 | return 0; | 444 | return 0; |
368 | 445 | ||
446 | is_range_ram = pagerange_is_ram(start, end); | ||
447 | if (is_range_ram == 1) | ||
448 | return free_ram_pages_type(start, end); | ||
449 | else if (is_range_ram < 0) | ||
450 | return -EINVAL; | ||
451 | |||
369 | spin_lock(&memtype_lock); | 452 | spin_lock(&memtype_lock); |
370 | list_for_each_entry(entry, &memtype_list, nd) { | 453 | list_for_each_entry(entry, &memtype_list, nd) { |
371 | if (entry->start == start && entry->end == end) { | 454 | if (entry->start == start && entry->end == end) { |