diff options
author | Yinghai Lu <yinghai@kernel.org> | 2010-02-10 04:20:13 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2010-02-10 20:47:17 -0500 |
commit | e9a0064ad03b899938059bb576615ad9ed0f27f9 (patch) | |
tree | b1c7dd0c7844fb4b8f56bb991684ece56bcd6e1c /arch/x86/kernel/cpu/mtrr | |
parent | 284f933d45a1e60404328440910bc2651c0fb51d (diff) |
x86: Change range end to start+size
So make interface more consistent with early_res.
Later we can share some code with early_res.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1265793639-15071-10-git-send-email-yinghai@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/cpu/mtrr')
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/cleanup.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 669da09ab9a8..06130b52f012 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -78,13 +78,13 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
78 | base = range_state[i].base_pfn; | 78 | base = range_state[i].base_pfn; |
79 | size = range_state[i].size_pfn; | 79 | size = range_state[i].size_pfn; |
80 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, | 80 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, |
81 | base, base + size - 1); | 81 | base, base + size); |
82 | } | 82 | } |
83 | if (debug_print) { | 83 | if (debug_print) { |
84 | printk(KERN_DEBUG "After WB checking\n"); | 84 | printk(KERN_DEBUG "After WB checking\n"); |
85 | for (i = 0; i < nr_range; i++) | 85 | for (i = 0; i < nr_range; i++) |
86 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 86 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", |
87 | range[i].start, range[i].end + 1); | 87 | range[i].start, range[i].end); |
88 | } | 88 | } |
89 | 89 | ||
90 | /* Take out UC ranges: */ | 90 | /* Take out UC ranges: */ |
@@ -106,11 +106,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
106 | size -= (1<<(20-PAGE_SHIFT)) - base; | 106 | size -= (1<<(20-PAGE_SHIFT)) - base; |
107 | base = 1<<(20-PAGE_SHIFT); | 107 | base = 1<<(20-PAGE_SHIFT); |
108 | } | 108 | } |
109 | subtract_range(range, RANGE_NUM, base, base + size - 1); | 109 | subtract_range(range, RANGE_NUM, base, base + size); |
110 | } | 110 | } |
111 | if (extra_remove_size) | 111 | if (extra_remove_size) |
112 | subtract_range(range, RANGE_NUM, extra_remove_base, | 112 | subtract_range(range, RANGE_NUM, extra_remove_base, |
113 | extra_remove_base + extra_remove_size - 1); | 113 | extra_remove_base + extra_remove_size); |
114 | 114 | ||
115 | if (debug_print) { | 115 | if (debug_print) { |
116 | printk(KERN_DEBUG "After UC checking\n"); | 116 | printk(KERN_DEBUG "After UC checking\n"); |
@@ -118,7 +118,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
118 | if (!range[i].end) | 118 | if (!range[i].end) |
119 | continue; | 119 | continue; |
120 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 120 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", |
121 | range[i].start, range[i].end + 1); | 121 | range[i].start, range[i].end); |
122 | } | 122 | } |
123 | } | 123 | } |
124 | 124 | ||
@@ -128,7 +128,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range, | |||
128 | printk(KERN_DEBUG "After sorting\n"); | 128 | printk(KERN_DEBUG "After sorting\n"); |
129 | for (i = 0; i < nr_range; i++) | 129 | for (i = 0; i < nr_range; i++) |
130 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", | 130 | printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n", |
131 | range[i].start, range[i].end + 1); | 131 | range[i].start, range[i].end); |
132 | } | 132 | } |
133 | 133 | ||
134 | return nr_range; | 134 | return nr_range; |
@@ -142,7 +142,7 @@ static unsigned long __init sum_ranges(struct range *range, int nr_range) | |||
142 | int i; | 142 | int i; |
143 | 143 | ||
144 | for (i = 0; i < nr_range; i++) | 144 | for (i = 0; i < nr_range; i++) |
145 | sum += range[i].end + 1 - range[i].start; | 145 | sum += range[i].end - range[i].start; |
146 | 146 | ||
147 | return sum; | 147 | return sum; |
148 | } | 148 | } |
@@ -489,7 +489,7 @@ x86_setup_var_mtrrs(struct range *range, int nr_range, | |||
489 | /* Write the range: */ | 489 | /* Write the range: */ |
490 | for (i = 0; i < nr_range; i++) { | 490 | for (i = 0; i < nr_range; i++) { |
491 | set_var_mtrr_range(&var_state, range[i].start, | 491 | set_var_mtrr_range(&var_state, range[i].start, |
492 | range[i].end - range[i].start + 1); | 492 | range[i].end - range[i].start); |
493 | } | 493 | } |
494 | 494 | ||
495 | /* Write the last range: */ | 495 | /* Write the last range: */ |
@@ -720,7 +720,7 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
720 | * and fixed mtrrs should take effect before var mtrr for it: | 720 | * and fixed mtrrs should take effect before var mtrr for it: |
721 | */ | 721 | */ |
722 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, | 722 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, |
723 | (1ULL<<(20 - PAGE_SHIFT)) - 1); | 723 | 1ULL<<(20 - PAGE_SHIFT)); |
724 | /* Sort the ranges: */ | 724 | /* Sort the ranges: */ |
725 | sort_range(range, nr_range); | 725 | sort_range(range, nr_range); |
726 | 726 | ||
@@ -939,9 +939,9 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
939 | nr_range = 0; | 939 | nr_range = 0; |
940 | if (mtrr_tom2) { | 940 | if (mtrr_tom2) { |
941 | range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); | 941 | range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT)); |
942 | range[nr_range].end = (mtrr_tom2 >> PAGE_SHIFT) - 1; | 942 | range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT; |
943 | if (highest_pfn < range[nr_range].end + 1) | 943 | if (highest_pfn < range[nr_range].end) |
944 | highest_pfn = range[nr_range].end + 1; | 944 | highest_pfn = range[nr_range].end; |
945 | nr_range++; | 945 | nr_range++; |
946 | } | 946 | } |
947 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); | 947 | nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); |
@@ -953,15 +953,15 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
953 | 953 | ||
954 | /* Check the holes: */ | 954 | /* Check the holes: */ |
955 | for (i = 0; i < nr_range - 1; i++) { | 955 | for (i = 0; i < nr_range - 1; i++) { |
956 | if (range[i].end + 1 < range[i+1].start) | 956 | if (range[i].end < range[i+1].start) |
957 | total_trim_size += real_trim_memory(range[i].end + 1, | 957 | total_trim_size += real_trim_memory(range[i].end, |
958 | range[i+1].start); | 958 | range[i+1].start); |
959 | } | 959 | } |
960 | 960 | ||
961 | /* Check the top: */ | 961 | /* Check the top: */ |
962 | i = nr_range - 1; | 962 | i = nr_range - 1; |
963 | if (range[i].end + 1 < end_pfn) | 963 | if (range[i].end < end_pfn) |
964 | total_trim_size += real_trim_memory(range[i].end + 1, | 964 | total_trim_size += real_trim_memory(range[i].end, |
965 | end_pfn); | 965 | end_pfn); |
966 | 966 | ||
967 | if (total_trim_size) { | 967 | if (total_trim_size) { |