aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaswinder Singh Rajput <jaswinder@kernel.org>2009-07-03 22:21:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-04 05:10:46 -0400
commit63f9600fadb10ea739108ae93e3e842d9843c58b (patch)
tree7005ff149340bf472183694fa0e2b52d0070e32a
parent6c4caa1ab737502190e416b76e6c10d2bf24276a (diff)
x86: Clean up mtrr/cleanup.c
Fix trivial style problems: WARNING: Use #include <linux/uaccess.h> instead of <asm/uaccess.h> WARNING: Use #include <linux/kvm_para.h> instead of <asm/kvm_para.h> Also, nr_mtrr_spare_reg should be unsigned long. arch/x86/kernel/cpu/mtrr/cleanup.o: text data bss dec hex filename 6241 8992 2056 17289 4389 cleanup.o.before 6241 8992 2056 17289 4389 cleanup.o.after The md5 has changed: 1a7a27513aef1825236daf29110fe657 cleanup.o.before.asm bcea358efa2532b6020e338e158447af cleanup.o.after.asm Because a WARN_ON()'s __LINE__ value changed by 3 lines. Suggested-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Jaswinder Singh Rajput <jaswinderrajput@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <20090703164225.GA21447@elte.hu> [ Did lots of other cleanups to make the code look more consistent. ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c350
1 files changed, 176 insertions, 174 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 1d584a18a50d..b8aba811b60e 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -1,51 +1,52 @@
1/* MTRR (Memory Type Range Register) cleanup 1/*
2 2 * MTRR (Memory Type Range Register) cleanup
3 Copyright (C) 2009 Yinghai Lu 3 *
4 4 * Copyright (C) 2009 Yinghai Lu
5 This library is free software; you can redistribute it and/or 5 *
6 modify it under the terms of the GNU Library General Public 6 * This library is free software; you can redistribute it and/or
7 License as published by the Free Software Foundation; either 7 * modify it under the terms of the GNU Library General Public
8 version 2 of the License, or (at your option) any later version. 8 * License as published by the Free Software Foundation; either
9 9 * version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful, 10 *
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * This library is distributed in the hope that it will be useful,
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 Library General Public License for more details. 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 14 * Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public 15 *
16 License along with this library; if not, write to the Free 16 * You should have received a copy of the GNU Library General Public
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * License along with this library; if not, write to the Free
18*/ 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 19 */
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/mutex.h>
26#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/mutex.h>
27#include <linux/uaccess.h>
28#include <linux/kvm_para.h>
27 29
30#include <asm/processor.h>
28#include <asm/e820.h> 31#include <asm/e820.h>
29#include <asm/mtrr.h> 32#include <asm/mtrr.h>
30#include <asm/uaccess.h>
31#include <asm/processor.h>
32#include <asm/msr.h> 33#include <asm/msr.h>
33#include <asm/kvm_para.h> 34
34#include "mtrr.h" 35#include "mtrr.h"
35 36
36/* should be related to MTRR_VAR_RANGES nums */ 37/* Should be related to MTRR_VAR_RANGES nums */
37#define RANGE_NUM 256 38#define RANGE_NUM 256
38 39
39struct res_range { 40struct res_range {
40 unsigned long start; 41 unsigned long start;
41 unsigned long end; 42 unsigned long end;
42}; 43};
43 44
44static int __init 45static int __init
45add_range(struct res_range *range, int nr_range, unsigned long start, 46add_range(struct res_range *range, int nr_range,
46 unsigned long end) 47 unsigned long start, unsigned long end)
47{ 48{
48 /* out of slots */ 49 /* Out of slots: */
49 if (nr_range >= RANGE_NUM) 50 if (nr_range >= RANGE_NUM)
50 return nr_range; 51 return nr_range;
51 52
@@ -58,12 +59,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start,
58} 59}
59 60
60static int __init 61static int __init
61add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, 62add_range_with_merge(struct res_range *range, int nr_range,
62 unsigned long end) 63 unsigned long start, unsigned long end)
63{ 64{
64 int i; 65 int i;
65 66
66 /* try to merge it with old one */ 67 /* Try to merge it with old one: */
67 for (i = 0; i < nr_range; i++) { 68 for (i = 0; i < nr_range; i++) {
68 unsigned long final_start, final_end; 69 unsigned long final_start, final_end;
69 unsigned long common_start, common_end; 70 unsigned long common_start, common_end;
@@ -84,7 +85,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
84 return nr_range; 85 return nr_range;
85 } 86 }
86 87
87 /* need to add that */ 88 /* Need to add it: */
88 return add_range(range, nr_range, start, end); 89 return add_range(range, nr_range, start, end);
89} 90}
90 91
@@ -117,7 +118,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end)
117 } 118 }
118 119
119 if (start > range[j].start && end < range[j].end) { 120 if (start > range[j].start && end < range[j].end) {
120 /* find the new spare */ 121 /* Find the new spare: */
121 for (i = 0; i < RANGE_NUM; i++) { 122 for (i = 0; i < RANGE_NUM; i++) {
122 if (range[i].end == 0) 123 if (range[i].end == 0)
123 break; 124 break;
@@ -147,13 +148,19 @@ static int __init cmp_range(const void *x1, const void *x2)
147} 148}
148 149
149struct var_mtrr_range_state { 150struct var_mtrr_range_state {
150 unsigned long base_pfn; 151 unsigned long base_pfn;
151 unsigned long size_pfn; 152 unsigned long size_pfn;
152 mtrr_type type; 153 mtrr_type type;
153}; 154};
154 155
155static struct var_mtrr_range_state __initdata range_state[RANGE_NUM]; 156static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
157
156static int __initdata debug_print; 158static int __initdata debug_print;
159#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
160
161
162#define BIOS_BUG_MSG KERN_WARNING \
163 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
157 164
158static int __init 165static int __init
159x86_get_mtrr_mem_range(struct res_range *range, int nr_range, 166x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
@@ -180,7 +187,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
180 range[i].start, range[i].end + 1); 187 range[i].start, range[i].end + 1);
181 } 188 }
182 189
183 /* take out UC ranges */ 190 /* Take out UC ranges: */
184 for (i = 0; i < num_var_ranges; i++) { 191 for (i = 0; i < num_var_ranges; i++) {
185 type = range_state[i].type; 192 type = range_state[i].type;
186 if (type != MTRR_TYPE_UNCACHABLE && 193 if (type != MTRR_TYPE_UNCACHABLE &&
@@ -244,10 +251,9 @@ static int __initdata nr_range;
244 251
245static unsigned long __init sum_ranges(struct res_range *range, int nr_range) 252static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
246{ 253{
247 unsigned long sum; 254 unsigned long sum = 0;
248 int i; 255 int i;
249 256
250 sum = 0;
251 for (i = 0; i < nr_range; i++) 257 for (i = 0; i < nr_range; i++)
252 sum += range[i].end + 1 - range[i].start; 258 sum += range[i].end + 1 - range[i].start;
253 259
@@ -288,7 +294,7 @@ struct var_mtrr_state {
288 294
289static void __init 295static void __init
290set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, 296set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
291 unsigned char type, unsigned int address_bits) 297 unsigned char type, unsigned int address_bits)
292{ 298{
293 u32 base_lo, base_hi, mask_lo, mask_hi; 299 u32 base_lo, base_hi, mask_lo, mask_hi;
294 u64 base, mask; 300 u64 base, mask;
@@ -301,7 +307,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
301 mask = (1ULL << address_bits) - 1; 307 mask = (1ULL << address_bits) - 1;
302 mask &= ~((((u64)sizek) << 10) - 1); 308 mask &= ~((((u64)sizek) << 10) - 1);
303 309
304 base = ((u64)basek) << 10; 310 base = ((u64)basek) << 10;
305 311
306 base |= type; 312 base |= type;
307 mask |= 0x800; 313 mask |= 0x800;
@@ -317,15 +323,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
317 323
318static void __init 324static void __init
319save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, 325save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
320 unsigned char type) 326 unsigned char type)
321{ 327{
322 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); 328 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
323 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); 329 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
324 range_state[reg].type = type; 330 range_state[reg].type = type;
325} 331}
326 332
327static void __init 333static void __init set_var_mtrr_all(unsigned int address_bits)
328set_var_mtrr_all(unsigned int address_bits)
329{ 334{
330 unsigned long basek, sizek; 335 unsigned long basek, sizek;
331 unsigned char type; 336 unsigned char type;
@@ -342,11 +347,11 @@ set_var_mtrr_all(unsigned int address_bits)
342 347
343static unsigned long to_size_factor(unsigned long sizek, char *factorp) 348static unsigned long to_size_factor(unsigned long sizek, char *factorp)
344{ 349{
345 char factor;
346 unsigned long base = sizek; 350 unsigned long base = sizek;
351 char factor;
347 352
348 if (base & ((1<<10) - 1)) { 353 if (base & ((1<<10) - 1)) {
349 /* not MB alignment */ 354 /* Not MB-aligned: */
350 factor = 'K'; 355 factor = 'K';
351 } else if (base & ((1<<20) - 1)) { 356 } else if (base & ((1<<20) - 1)) {
352 factor = 'M'; 357 factor = 'M';
@@ -372,11 +377,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
372 unsigned long max_align, align; 377 unsigned long max_align, align;
373 unsigned long sizek; 378 unsigned long sizek;
374 379
375 /* Compute the maximum size I can make a range */ 380 /* Compute the maximum size with which we can make a range: */
376 if (range_startk) 381 if (range_startk)
377 max_align = ffs(range_startk) - 1; 382 max_align = ffs(range_startk) - 1;
378 else 383 else
379 max_align = 32; 384 max_align = 32;
385
380 align = fls(range_sizek) - 1; 386 align = fls(range_sizek) - 1;
381 if (align > max_align) 387 if (align > max_align)
382 align = max_align; 388 align = max_align;
@@ -386,11 +392,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
386 char start_factor = 'K', size_factor = 'K'; 392 char start_factor = 'K', size_factor = 'K';
387 unsigned long start_base, size_base; 393 unsigned long start_base, size_base;
388 394
389 start_base = to_size_factor(range_startk, 395 start_base = to_size_factor(range_startk, &start_factor);
390 &start_factor), 396 size_base = to_size_factor(sizek, &size_factor);
391 size_base = to_size_factor(sizek, &size_factor),
392 397
393 printk(KERN_DEBUG "Setting variable MTRR %d, " 398 Dprintk("Setting variable MTRR %d, "
394 "base: %ld%cB, range: %ld%cB, type %s\n", 399 "base: %ld%cB, range: %ld%cB, type %s\n",
395 reg, start_base, start_factor, 400 reg, start_base, start_factor,
396 size_base, size_factor, 401 size_base, size_factor,
@@ -425,10 +430,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
425 chunk_sizek = state->chunk_sizek; 430 chunk_sizek = state->chunk_sizek;
426 gran_sizek = state->gran_sizek; 431 gran_sizek = state->gran_sizek;
427 432
428 /* align with gran size, prevent small block used up MTRRs */ 433 /* Align with gran size, prevent small block used up MTRRs: */
429 range_basek = ALIGN(state->range_startk, gran_sizek); 434 range_basek = ALIGN(state->range_startk, gran_sizek);
430 if ((range_basek > basek) && basek) 435 if ((range_basek > basek) && basek)
431 return second_sizek; 436 return second_sizek;
437
432 state->range_sizek -= (range_basek - state->range_startk); 438 state->range_sizek -= (range_basek - state->range_startk);
433 range_sizek = ALIGN(state->range_sizek, gran_sizek); 439 range_sizek = ALIGN(state->range_sizek, gran_sizek);
434 440
@@ -439,22 +445,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
439 } 445 }
440 state->range_sizek = range_sizek; 446 state->range_sizek = range_sizek;
441 447
442 /* try to append some small hole */ 448 /* Try to append some small hole: */
443 range0_basek = state->range_startk; 449 range0_basek = state->range_startk;
444 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 450 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
445 451
446 /* no increase */ 452 /* No increase: */
447 if (range0_sizek == state->range_sizek) { 453 if (range0_sizek == state->range_sizek) {
448 if (debug_print) 454 Dprintk("rangeX: %016lx - %016lx\n",
449 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 455 range0_basek<<10,
450 range0_basek<<10, 456 (range0_basek + state->range_sizek)<<10);
451 (range0_basek + state->range_sizek)<<10);
452 state->reg = range_to_mtrr(state->reg, range0_basek, 457 state->reg = range_to_mtrr(state->reg, range0_basek,
453 state->range_sizek, MTRR_TYPE_WRBACK); 458 state->range_sizek, MTRR_TYPE_WRBACK);
454 return 0; 459 return 0;
455 } 460 }
456 461
457 /* only cut back, when it is not the last */ 462 /* Only cut back when it is not the last: */
458 if (sizek) { 463 if (sizek) {
459 while (range0_basek + range0_sizek > (basek + sizek)) { 464 while (range0_basek + range0_sizek > (basek + sizek)) {
460 if (range0_sizek >= chunk_sizek) 465 if (range0_sizek >= chunk_sizek)
@@ -470,16 +475,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
470second_try: 475second_try:
471 range_basek = range0_basek + range0_sizek; 476 range_basek = range0_basek + range0_sizek;
472 477
473 /* one hole in the middle */ 478 /* One hole in the middle: */
474 if (range_basek > basek && range_basek <= (basek + sizek)) 479 if (range_basek > basek && range_basek <= (basek + sizek))
475 second_sizek = range_basek - basek; 480 second_sizek = range_basek - basek;
476 481
477 if (range0_sizek > state->range_sizek) { 482 if (range0_sizek > state->range_sizek) {
478 483
479 /* one hole in middle or at end */ 484 /* One hole in middle or at the end: */
480 hole_sizek = range0_sizek - state->range_sizek - second_sizek; 485 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
481 486
482 /* hole size should be less than half of range0 size */ 487 /* Hole size should be less than half of range0 size: */
483 if (hole_sizek >= (range0_sizek >> 1) && 488 if (hole_sizek >= (range0_sizek >> 1) &&
484 range0_sizek >= chunk_sizek) { 489 range0_sizek >= chunk_sizek) {
485 range0_sizek -= chunk_sizek; 490 range0_sizek -= chunk_sizek;
@@ -491,32 +496,30 @@ second_try:
491 } 496 }
492 497
493 if (range0_sizek) { 498 if (range0_sizek) {
494 if (debug_print) 499 Dprintk("range0: %016lx - %016lx\n",
495 printk(KERN_DEBUG "range0: %016lx - %016lx\n", 500 range0_basek<<10,
496 range0_basek<<10, 501 (range0_basek + range0_sizek)<<10);
497 (range0_basek + range0_sizek)<<10);
498 state->reg = range_to_mtrr(state->reg, range0_basek, 502 state->reg = range_to_mtrr(state->reg, range0_basek,
499 range0_sizek, MTRR_TYPE_WRBACK); 503 range0_sizek, MTRR_TYPE_WRBACK);
500 } 504 }
501 505
502 if (range0_sizek < state->range_sizek) { 506 if (range0_sizek < state->range_sizek) {
503 /* need to handle left over */ 507 /* Need to handle left over range: */
504 range_sizek = state->range_sizek - range0_sizek; 508 range_sizek = state->range_sizek - range0_sizek;
505 509
506 if (debug_print) 510 Dprintk("range: %016lx - %016lx\n",
507 printk(KERN_DEBUG "range: %016lx - %016lx\n", 511 range_basek<<10,
508 range_basek<<10, 512 (range_basek + range_sizek)<<10);
509 (range_basek + range_sizek)<<10); 513
510 state->reg = range_to_mtrr(state->reg, range_basek, 514 state->reg = range_to_mtrr(state->reg, range_basek,
511 range_sizek, MTRR_TYPE_WRBACK); 515 range_sizek, MTRR_TYPE_WRBACK);
512 } 516 }
513 517
514 if (hole_sizek) { 518 if (hole_sizek) {
515 hole_basek = range_basek - hole_sizek - second_sizek; 519 hole_basek = range_basek - hole_sizek - second_sizek;
516 if (debug_print) 520 Dprintk("hole: %016lx - %016lx\n",
517 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 521 hole_basek<<10,
518 hole_basek<<10, 522 (hole_basek + hole_sizek)<<10);
519 (hole_basek + hole_sizek)<<10);
520 state->reg = range_to_mtrr(state->reg, hole_basek, 523 state->reg = range_to_mtrr(state->reg, hole_basek,
521 hole_sizek, MTRR_TYPE_UNCACHABLE); 524 hole_sizek, MTRR_TYPE_UNCACHABLE);
522 } 525 }
@@ -537,23 +540,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
537 basek = base_pfn << (PAGE_SHIFT - 10); 540 basek = base_pfn << (PAGE_SHIFT - 10);
538 sizek = size_pfn << (PAGE_SHIFT - 10); 541 sizek = size_pfn << (PAGE_SHIFT - 10);
539 542
540 /* See if I can merge with the last range */ 543 /* See if I can merge with the last range: */
541 if ((basek <= 1024) || 544 if ((basek <= 1024) ||
542 (state->range_startk + state->range_sizek == basek)) { 545 (state->range_startk + state->range_sizek == basek)) {
543 unsigned long endk = basek + sizek; 546 unsigned long endk = basek + sizek;
544 state->range_sizek = endk - state->range_startk; 547 state->range_sizek = endk - state->range_startk;
545 return; 548 return;
546 } 549 }
547 /* Write the range mtrrs */ 550 /* Write the range mtrrs: */
548 if (state->range_sizek != 0) 551 if (state->range_sizek != 0)
549 second_sizek = range_to_mtrr_with_hole(state, basek, sizek); 552 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
550 553
551 /* Allocate an msr */ 554 /* Allocate an msr: */
552 state->range_startk = basek + second_sizek; 555 state->range_startk = basek + second_sizek;
553 state->range_sizek = sizek - second_sizek; 556 state->range_sizek = sizek - second_sizek;
554} 557}
555 558
556/* mininum size of mtrr block that can take hole */ 559/* Mininum size of mtrr block that can take hole: */
557static u64 mtrr_chunk_size __initdata = (256ULL<<20); 560static u64 mtrr_chunk_size __initdata = (256ULL<<20);
558 561
559static int __init parse_mtrr_chunk_size_opt(char *p) 562static int __init parse_mtrr_chunk_size_opt(char *p)
@@ -565,7 +568,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p)
565} 568}
566early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); 569early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
567 570
568/* granity of mtrr of block */ 571/* Granularity of mtrr of block: */
569static u64 mtrr_gran_size __initdata; 572static u64 mtrr_gran_size __initdata;
570 573
571static int __init parse_mtrr_gran_size_opt(char *p) 574static int __init parse_mtrr_gran_size_opt(char *p)
@@ -577,7 +580,7 @@ static int __init parse_mtrr_gran_size_opt(char *p)
577} 580}
578early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); 581early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
579 582
580static int nr_mtrr_spare_reg __initdata = 583static unsigned long nr_mtrr_spare_reg __initdata =
581 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; 584 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
582 585
583static int __init parse_mtrr_spare_reg(char *arg) 586static int __init parse_mtrr_spare_reg(char *arg)
@@ -586,7 +589,6 @@ static int __init parse_mtrr_spare_reg(char *arg)
586 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); 589 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
587 return 0; 590 return 0;
588} 591}
589
590early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); 592early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
591 593
592static int __init 594static int __init
@@ -594,8 +596,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
594 u64 chunk_size, u64 gran_size) 596 u64 chunk_size, u64 gran_size)
595{ 597{
596 struct var_mtrr_state var_state; 598 struct var_mtrr_state var_state;
597 int i;
598 int num_reg; 599 int num_reg;
600 int i;
599 601
600 var_state.range_startk = 0; 602 var_state.range_startk = 0;
601 var_state.range_sizek = 0; 603 var_state.range_sizek = 0;
@@ -605,17 +607,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
605 607
606 memset(range_state, 0, sizeof(range_state)); 608 memset(range_state, 0, sizeof(range_state));
607 609
608 /* Write the range etc */ 610 /* Write the range: */
609 for (i = 0; i < nr_range; i++) 611 for (i = 0; i < nr_range; i++) {
610 set_var_mtrr_range(&var_state, range[i].start, 612 set_var_mtrr_range(&var_state, range[i].start,
611 range[i].end - range[i].start + 1); 613 range[i].end - range[i].start + 1);
614 }
612 615
613 /* Write the last range */ 616 /* Write the last range: */
614 if (var_state.range_sizek != 0) 617 if (var_state.range_sizek != 0)
615 range_to_mtrr_with_hole(&var_state, 0, 0); 618 range_to_mtrr_with_hole(&var_state, 0, 0);
616 619
617 num_reg = var_state.reg; 620 num_reg = var_state.reg;
618 /* Clear out the extra MTRR's */ 621 /* Clear out the extra MTRR's: */
619 while (var_state.reg < num_var_ranges) { 622 while (var_state.reg < num_var_ranges) {
620 save_var_mtrr(var_state.reg, 0, 0, 0); 623 save_var_mtrr(var_state.reg, 0, 0, 0);
621 var_state.reg++; 624 var_state.reg++;
@@ -625,11 +628,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
625} 628}
626 629
627struct mtrr_cleanup_result { 630struct mtrr_cleanup_result {
628 unsigned long gran_sizek; 631 unsigned long gran_sizek;
629 unsigned long chunk_sizek; 632 unsigned long chunk_sizek;
630 unsigned long lose_cover_sizek; 633 unsigned long lose_cover_sizek;
631 unsigned int num_reg; 634 unsigned int num_reg;
632 int bad; 635 int bad;
633}; 636};
634 637
635/* 638/*
@@ -645,10 +648,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
645 648
646static void __init print_out_mtrr_range_state(void) 649static void __init print_out_mtrr_range_state(void)
647{ 650{
648 int i;
649 char start_factor = 'K', size_factor = 'K'; 651 char start_factor = 'K', size_factor = 'K';
650 unsigned long start_base, size_base; 652 unsigned long start_base, size_base;
651 mtrr_type type; 653 mtrr_type type;
654 int i;
652 655
653 for (i = 0; i < num_var_ranges; i++) { 656 for (i = 0; i < num_var_ranges; i++) {
654 657
@@ -676,10 +679,10 @@ static int __init mtrr_need_cleanup(void)
676 int i; 679 int i;
677 mtrr_type type; 680 mtrr_type type;
678 unsigned long size; 681 unsigned long size;
679 /* extra one for all 0 */ 682 /* Extra one for all 0: */
680 int num[MTRR_NUM_TYPES + 1]; 683 int num[MTRR_NUM_TYPES + 1];
681 684
682 /* check entries number */ 685 /* Check entries number: */
683 memset(num, 0, sizeof(num)); 686 memset(num, 0, sizeof(num));
684 for (i = 0; i < num_var_ranges; i++) { 687 for (i = 0; i < num_var_ranges; i++) {
685 type = range_state[i].type; 688 type = range_state[i].type;
@@ -693,88 +696,86 @@ static int __init mtrr_need_cleanup(void)
693 num[type]++; 696 num[type]++;
694 } 697 }
695 698
696 /* check if we got UC entries */ 699 /* Check if we got UC entries: */
697 if (!num[MTRR_TYPE_UNCACHABLE]) 700 if (!num[MTRR_TYPE_UNCACHABLE])
698 return 0; 701 return 0;
699 702
700 /* check if we only had WB and UC */ 703 /* Check if we only had WB and UC */
701 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != 704 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
702 num_var_ranges - num[MTRR_NUM_TYPES]) 705 num_var_ranges - num[MTRR_NUM_TYPES])
703 return 0; 706 return 0;
704 707
705 return 1; 708 return 1;
706} 709}
707 710
708static unsigned long __initdata range_sums; 711static unsigned long __initdata range_sums;
709static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, 712
710 unsigned long extra_remove_base, 713static void __init
711 unsigned long extra_remove_size, 714mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
712 int i) 715 unsigned long x_remove_base,
716 unsigned long x_remove_size, int i)
713{ 717{
714 int num_reg;
715 static struct res_range range_new[RANGE_NUM]; 718 static struct res_range range_new[RANGE_NUM];
716 static int nr_range_new;
717 unsigned long range_sums_new; 719 unsigned long range_sums_new;
720 static int nr_range_new;
721 int num_reg;
718 722
719 /* convert ranges to var ranges state */ 723 /* Convert ranges to var ranges state: */
720 num_reg = x86_setup_var_mtrrs(range, nr_range, 724 num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
721 chunk_size, gran_size);
722 725
723 /* we got new setting in range_state, check it */ 726 /* We got new setting in range_state, check it: */
724 memset(range_new, 0, sizeof(range_new)); 727 memset(range_new, 0, sizeof(range_new));
725 nr_range_new = x86_get_mtrr_mem_range(range_new, 0, 728 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
726 extra_remove_base, extra_remove_size); 729 x_remove_base, x_remove_size);
727 range_sums_new = sum_ranges(range_new, nr_range_new); 730 range_sums_new = sum_ranges(range_new, nr_range_new);
728 731
729 result[i].chunk_sizek = chunk_size >> 10; 732 result[i].chunk_sizek = chunk_size >> 10;
730 result[i].gran_sizek = gran_size >> 10; 733 result[i].gran_sizek = gran_size >> 10;
731 result[i].num_reg = num_reg; 734 result[i].num_reg = num_reg;
735
732 if (range_sums < range_sums_new) { 736 if (range_sums < range_sums_new) {
733 result[i].lose_cover_sizek = 737 result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
734 (range_sums_new - range_sums) << PSHIFT;
735 result[i].bad = 1; 738 result[i].bad = 1;
736 } else 739 } else {
737 result[i].lose_cover_sizek = 740 result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
738 (range_sums - range_sums_new) << PSHIFT; 741 }
739 742
740 /* double check it */ 743 /* Double check it: */
741 if (!result[i].bad && !result[i].lose_cover_sizek) { 744 if (!result[i].bad && !result[i].lose_cover_sizek) {
742 if (nr_range_new != nr_range || 745 if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
743 memcmp(range, range_new, sizeof(range))) 746 result[i].bad = 1;
744 result[i].bad = 1;
745 } 747 }
746 748
747 if (!result[i].bad && (range_sums - range_sums_new < 749 if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
748 min_loss_pfn[num_reg])) { 750 min_loss_pfn[num_reg] = range_sums - range_sums_new;
749 min_loss_pfn[num_reg] =
750 range_sums - range_sums_new;
751 }
752} 751}
753 752
754static void __init mtrr_print_out_one_result(int i) 753static void __init mtrr_print_out_one_result(int i)
755{ 754{
756 char gran_factor, chunk_factor, lose_factor;
757 unsigned long gran_base, chunk_base, lose_base; 755 unsigned long gran_base, chunk_base, lose_base;
756 char gran_factor, chunk_factor, lose_factor;
758 757
759 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), 758 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
760 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), 759 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
761 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), 760 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
762 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", 761
763 result[i].bad ? "*BAD*" : " ", 762 pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
764 gran_base, gran_factor, chunk_base, chunk_factor); 763 result[i].bad ? "*BAD*" : " ",
765 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", 764 gran_base, gran_factor, chunk_base, chunk_factor);
766 result[i].num_reg, result[i].bad ? "-" : "", 765 pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
767 lose_base, lose_factor); 766 result[i].num_reg, result[i].bad ? "-" : "",
767 lose_base, lose_factor);
768} 768}
769 769
770static int __init mtrr_search_optimal_index(void) 770static int __init mtrr_search_optimal_index(void)
771{ 771{
772 int i;
773 int num_reg_good; 772 int num_reg_good;
774 int index_good; 773 int index_good;
774 int i;
775 775
776 if (nr_mtrr_spare_reg >= num_var_ranges) 776 if (nr_mtrr_spare_reg >= num_var_ranges)
777 nr_mtrr_spare_reg = num_var_ranges - 1; 777 nr_mtrr_spare_reg = num_var_ranges - 1;
778
778 num_reg_good = -1; 779 num_reg_good = -1;
779 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 780 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
780 if (!min_loss_pfn[i]) 781 if (!min_loss_pfn[i])
@@ -796,24 +797,24 @@ static int __init mtrr_search_optimal_index(void)
796 return index_good; 797 return index_good;
797} 798}
798 799
799
800int __init mtrr_cleanup(unsigned address_bits) 800int __init mtrr_cleanup(unsigned address_bits)
801{ 801{
802 unsigned long extra_remove_base, extra_remove_size; 802 unsigned long x_remove_base, x_remove_size;
803 unsigned long base, size, def, dummy; 803 unsigned long base, size, def, dummy;
804 mtrr_type type;
805 u64 chunk_size, gran_size; 804 u64 chunk_size, gran_size;
805 mtrr_type type;
806 int index_good; 806 int index_good;
807 int i; 807 int i;
808 808
809 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) 809 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
810 return 0; 810 return 0;
811
811 rdmsr(MSR_MTRRdefType, def, dummy); 812 rdmsr(MSR_MTRRdefType, def, dummy);
812 def &= 0xff; 813 def &= 0xff;
813 if (def != MTRR_TYPE_UNCACHABLE) 814 if (def != MTRR_TYPE_UNCACHABLE)
814 return 0; 815 return 0;
815 816
816 /* get it and store it aside */ 817 /* Get it and store it aside: */
817 memset(range_state, 0, sizeof(range_state)); 818 memset(range_state, 0, sizeof(range_state));
818 for (i = 0; i < num_var_ranges; i++) { 819 for (i = 0; i < num_var_ranges; i++) {
819 mtrr_if->get(i, &base, &size, &type); 820 mtrr_if->get(i, &base, &size, &type);
@@ -822,29 +823,28 @@ int __init mtrr_cleanup(unsigned address_bits)
822 range_state[i].type = type; 823 range_state[i].type = type;
823 } 824 }
824 825
825 /* check if we need handle it and can handle it */ 826 /* Check if we need handle it and can handle it: */
826 if (!mtrr_need_cleanup()) 827 if (!mtrr_need_cleanup())
827 return 0; 828 return 0;
828 829
829 /* print original var MTRRs at first, for debugging: */ 830 /* Print original var MTRRs at first, for debugging: */
830 printk(KERN_DEBUG "original variable MTRRs\n"); 831 printk(KERN_DEBUG "original variable MTRRs\n");
831 print_out_mtrr_range_state(); 832 print_out_mtrr_range_state();
832 833
833 memset(range, 0, sizeof(range)); 834 memset(range, 0, sizeof(range));
834 extra_remove_size = 0; 835 x_remove_size = 0;
835 extra_remove_base = 1 << (32 - PAGE_SHIFT); 836 x_remove_base = 1 << (32 - PAGE_SHIFT);
836 if (mtrr_tom2) 837 if (mtrr_tom2)
837 extra_remove_size = 838 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
838 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 839
839 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 840 nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
840 extra_remove_size);
841 /* 841 /*
842 * [0, 1M) should always be coverred by var mtrr with WB 842 * [0, 1M) should always be covered by var mtrr with WB
843 * and fixed mtrrs should take effective before var mtrr for it 843 * and fixed mtrrs should take effect before var mtrr for it:
844 */ 844 */
845 nr_range = add_range_with_merge(range, nr_range, 0, 845 nr_range = add_range_with_merge(range, nr_range, 0,
846 (1ULL<<(20 - PAGE_SHIFT)) - 1); 846 (1ULL<<(20 - PAGE_SHIFT)) - 1);
847 /* sort the ranges */ 847 /* Sort the ranges: */
848 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); 848 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
849 849
850 range_sums = sum_ranges(range, nr_range); 850 range_sums = sum_ranges(range, nr_range);
@@ -854,7 +854,7 @@ int __init mtrr_cleanup(unsigned address_bits)
854 if (mtrr_chunk_size && mtrr_gran_size) { 854 if (mtrr_chunk_size && mtrr_gran_size) {
855 i = 0; 855 i = 0;
856 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, 856 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
857 extra_remove_base, extra_remove_size, i); 857 x_remove_base, x_remove_size, i);
858 858
859 mtrr_print_out_one_result(i); 859 mtrr_print_out_one_result(i);
860 860
@@ -880,7 +880,7 @@ int __init mtrr_cleanup(unsigned address_bits)
880 continue; 880 continue;
881 881
882 mtrr_calc_range_state(chunk_size, gran_size, 882 mtrr_calc_range_state(chunk_size, gran_size,
883 extra_remove_base, extra_remove_size, i); 883 x_remove_base, x_remove_size, i);
884 if (debug_print) { 884 if (debug_print) {
885 mtrr_print_out_one_result(i); 885 mtrr_print_out_one_result(i);
886 printk(KERN_INFO "\n"); 886 printk(KERN_INFO "\n");
@@ -890,7 +890,7 @@ int __init mtrr_cleanup(unsigned address_bits)
890 } 890 }
891 } 891 }
892 892
893 /* try to find the optimal index */ 893 /* Try to find the optimal index: */
894 index_good = mtrr_search_optimal_index(); 894 index_good = mtrr_search_optimal_index();
895 895
896 if (index_good != -1) { 896 if (index_good != -1) {
@@ -898,7 +898,7 @@ int __init mtrr_cleanup(unsigned address_bits)
898 i = index_good; 898 i = index_good;
899 mtrr_print_out_one_result(i); 899 mtrr_print_out_one_result(i);
900 900
901 /* convert ranges to var ranges state */ 901 /* Convert ranges to var ranges state: */
902 chunk_size = result[i].chunk_sizek; 902 chunk_size = result[i].chunk_sizek;
903 chunk_size <<= 10; 903 chunk_size <<= 10;
904 gran_size = result[i].gran_sizek; 904 gran_size = result[i].gran_sizek;
@@ -941,8 +941,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
941 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't 941 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
942 * apply to are wrong, but so far we don't know of any such case in the wild. 942 * apply to are wrong, but so far we don't know of any such case in the wild.
943 */ 943 */
944#define Tom2Enabled (1U << 21) 944#define Tom2Enabled (1U << 21)
945#define Tom2ForceMemTypeWB (1U << 22) 945#define Tom2ForceMemTypeWB (1U << 22)
946 946
947int __init amd_special_default_mtrr(void) 947int __init amd_special_default_mtrr(void)
948{ 948{
@@ -952,7 +952,7 @@ int __init amd_special_default_mtrr(void)
952 return 0; 952 return 0;
953 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 953 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
954 return 0; 954 return 0;
955 /* In case some hypervisor doesn't pass SYSCFG through */ 955 /* In case some hypervisor doesn't pass SYSCFG through: */
956 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) 956 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
957 return 0; 957 return 0;
958 /* 958 /*
@@ -965,19 +965,21 @@ int __init amd_special_default_mtrr(void)
965 return 0; 965 return 0;
966} 966}
967 967
968static u64 __init real_trim_memory(unsigned long start_pfn, 968static u64 __init
969 unsigned long limit_pfn) 969real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
970{ 970{
971 u64 trim_start, trim_size; 971 u64 trim_start, trim_size;
972
972 trim_start = start_pfn; 973 trim_start = start_pfn;
973 trim_start <<= PAGE_SHIFT; 974 trim_start <<= PAGE_SHIFT;
975
974 trim_size = limit_pfn; 976 trim_size = limit_pfn;
975 trim_size <<= PAGE_SHIFT; 977 trim_size <<= PAGE_SHIFT;
976 trim_size -= trim_start; 978 trim_size -= trim_start;
977 979
978 return e820_update_range(trim_start, trim_size, E820_RAM, 980 return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
979 E820_RESERVED);
980} 981}
982
981/** 983/**
982 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs 984 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
983 * @end_pfn: ending page frame number 985 * @end_pfn: ending page frame number
@@ -985,7 +987,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
985 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain 987 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
986 * memory configurations. This routine checks that the highest MTRR matches 988 * memory configurations. This routine checks that the highest MTRR matches
987 * the end of memory, to make sure the MTRRs having a write back type cover 989 * the end of memory, to make sure the MTRRs having a write back type cover
988 * all of the memory the kernel is intending to use. If not, it'll trim any 990 * all of the memory the kernel is intending to use. If not, it'll trim any
989 * memory off the end by adjusting end_pfn, removing it from the kernel's 991 * memory off the end by adjusting end_pfn, removing it from the kernel's
990 * allocation pools, warning the user with an obnoxious message. 992 * allocation pools, warning the user with an obnoxious message.
991 */ 993 */
@@ -994,21 +996,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
994 unsigned long i, base, size, highest_pfn = 0, def, dummy; 996 unsigned long i, base, size, highest_pfn = 0, def, dummy;
995 mtrr_type type; 997 mtrr_type type;
996 u64 total_trim_size; 998 u64 total_trim_size;
997
998 /* extra one for all 0 */ 999 /* extra one for all 0 */
999 int num[MTRR_NUM_TYPES + 1]; 1000 int num[MTRR_NUM_TYPES + 1];
1001
1000 /* 1002 /*
1001 * Make sure we only trim uncachable memory on machines that 1003 * Make sure we only trim uncachable memory on machines that
1002 * support the Intel MTRR architecture: 1004 * support the Intel MTRR architecture:
1003 */ 1005 */
1004 if (!is_cpu(INTEL) || disable_mtrr_trim) 1006 if (!is_cpu(INTEL) || disable_mtrr_trim)
1005 return 0; 1007 return 0;
1008
1006 rdmsr(MSR_MTRRdefType, def, dummy); 1009 rdmsr(MSR_MTRRdefType, def, dummy);
1007 def &= 0xff; 1010 def &= 0xff;
1008 if (def != MTRR_TYPE_UNCACHABLE) 1011 if (def != MTRR_TYPE_UNCACHABLE)
1009 return 0; 1012 return 0;
1010 1013
1011 /* get it and store it aside */ 1014 /* Get it and store it aside: */
1012 memset(range_state, 0, sizeof(range_state)); 1015 memset(range_state, 0, sizeof(range_state));
1013 for (i = 0; i < num_var_ranges; i++) { 1016 for (i = 0; i < num_var_ranges; i++) {
1014 mtrr_if->get(i, &base, &size, &type); 1017 mtrr_if->get(i, &base, &size, &type);
@@ -1017,7 +1020,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1017 range_state[i].type = type; 1020 range_state[i].type = type;
1018 } 1021 }
1019 1022
1020 /* Find highest cached pfn */ 1023 /* Find highest cached pfn: */
1021 for (i = 0; i < num_var_ranges; i++) { 1024 for (i = 0; i < num_var_ranges; i++) {
1022 type = range_state[i].type; 1025 type = range_state[i].type;
1023 if (type != MTRR_TYPE_WRBACK) 1026 if (type != MTRR_TYPE_WRBACK)
@@ -1028,13 +1031,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1028 highest_pfn = base + size; 1031 highest_pfn = base + size;
1029 } 1032 }
1030 1033
1031 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1034 /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
1032 if (!highest_pfn) { 1035 if (!highest_pfn) {
1033 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); 1036 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
1034 return 0; 1037 return 0;
1035 } 1038 }
1036 1039
1037 /* check entries number */ 1040 /* Check entries number: */
1038 memset(num, 0, sizeof(num)); 1041 memset(num, 0, sizeof(num));
1039 for (i = 0; i < num_var_ranges; i++) { 1042 for (i = 0; i < num_var_ranges; i++) {
1040 type = range_state[i].type; 1043 type = range_state[i].type;
@@ -1046,11 +1049,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1046 num[type]++; 1049 num[type]++;
1047 } 1050 }
1048 1051
1049 /* no entry for WB? */ 1052 /* No entry for WB? */
1050 if (!num[MTRR_TYPE_WRBACK]) 1053 if (!num[MTRR_TYPE_WRBACK])
1051 return 0; 1054 return 0;
1052 1055
1053 /* check if we only had WB and UC */ 1056 /* Check if we only had WB and UC: */
1054 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != 1057 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1055 num_var_ranges - num[MTRR_NUM_TYPES]) 1058 num_var_ranges - num[MTRR_NUM_TYPES])
1056 return 0; 1059 return 0;
@@ -1066,31 +1069,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1066 } 1069 }
1067 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); 1070 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
1068 1071
1072 /* Check the head: */
1069 total_trim_size = 0; 1073 total_trim_size = 0;
1070 /* check the head */
1071 if (range[0].start) 1074 if (range[0].start)
1072 total_trim_size += real_trim_memory(0, range[0].start); 1075 total_trim_size += real_trim_memory(0, range[0].start);
1073 /* check the holes */ 1076
1077 /* Check the holes: */
1074 for (i = 0; i < nr_range - 1; i++) { 1078 for (i = 0; i < nr_range - 1; i++) {
1075 if (range[i].end + 1 < range[i+1].start) 1079 if (range[i].end + 1 < range[i+1].start)
1076 total_trim_size += real_trim_memory(range[i].end + 1, 1080 total_trim_size += real_trim_memory(range[i].end + 1,
1077 range[i+1].start); 1081 range[i+1].start);
1078 } 1082 }
1079 /* check the top */ 1083
1084 /* Check the top: */
1080 i = nr_range - 1; 1085 i = nr_range - 1;
1081 if (range[i].end + 1 < end_pfn) 1086 if (range[i].end + 1 < end_pfn)
1082 total_trim_size += real_trim_memory(range[i].end + 1, 1087 total_trim_size += real_trim_memory(range[i].end + 1,
1083 end_pfn); 1088 end_pfn);
1084 1089
1085 if (total_trim_size) { 1090 if (total_trim_size) {
1086 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" 1091 pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
1087 " all of memory, losing %lluMB of RAM.\n",
1088 total_trim_size >> 20);
1089 1092
1090 if (!changed_by_mtrr_cleanup) 1093 if (!changed_by_mtrr_cleanup)
1091 WARN_ON(1); 1094 WARN_ON(1);
1092 1095
1093 printk(KERN_INFO "update e820 for mtrr\n"); 1096 pr_info("update e820 for mtrr\n");
1094 update_e820(); 1097 update_e820();
1095 1098
1096 return 1; 1099 return 1;
@@ -1098,4 +1101,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1098 1101
1099 return 0; 1102 return 0;
1100} 1103}
1101