aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJaswinder Singh Rajput <jaswinder@kernel.org>2009-07-03 22:26:28 -0400
committerIngo Molnar <mingo@elte.hu>2009-07-04 05:19:55 -0400
commitdbd51be026eaf84088fdee7fab9f38fa92eef26d (patch)
treed3f2f2084c76627a2c9c86707658b051e5228f3f
parent09b22c85d59dd935fdfa71655a443785e3f99c18 (diff)
x86: Clean up mtrr/main.c
Fix following trivial style problems: ERROR: trailing whitespace X 25 WARNING: Use #include <linux/uaccess.h> instead of <asm/uaccess.h> WARNING: Use #include <linux/kvm_para.h> instead of <asm/kvm_para.h> ERROR: do not initialise externals to 0 or NULL X 2 ERROR: "foo * bar" should be "foo *bar" X 5 ERROR: do not use assignment in if condition X 2 WARNING: line over 80 characters X 8 ERROR: return is not a function, parentheses are not required WARNING: braces {} are not necessary for any arm of this statement ERROR: space required before the open parenthesis '(' X 2 ERROR: open brace '{' following function declarations go on the next line ERROR: space required after that ',' (ctx:VxV) X 8 ERROR: space required before the open parenthesis '(' X 3 ERROR: else should follow close brace '}' WARNING: space prohibited between function name and open parenthesis '(' WARNING: EXPORT_SYMBOL(foo); should immediately follow its function/variable X 2 Also use pr_debug and pr_warning where possible. total: 50 errors, 14 warnings arch/x86/kernel/cpu/mtrr/main.o: text data bss dec hex filename 3668 116 4156 7940 1f04 main.o.before 3668 116 4156 7940 1f04 main.o.after md5: e01af2fd28deef77c8d01e71acfbd365 main.o.before.asm e01af2fd28deef77c8d01e71acfbd365 main.o.after.asm Suggested-by: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Jaswinder Singh Rajput <jaswinderrajput@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Yinghai Lu <yinghai@kernel.org> LKML-Reference: <20090703164225.GA21447@elte.hu> Cc: Avi Kivity <avi@redhat.com> # Avi, please have a look at the kvm_para.h bit [ More cleanups ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c455
1 files changed, 242 insertions, 213 deletions
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 8fc248b5aeaf..7af0f88a4163 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -25,43 +25,48 @@
25 Operating System Writer's Guide" (Intel document number 242692), 25 Operating System Writer's Guide" (Intel document number 242692),
26 section 11.11.7 26 section 11.11.7
27 27
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29 on 6-7 March 2002. 29 on 6-7 March 2002.
30 Source: Intel Architecture Software Developers Manual, Volume 3: 30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro). 31 System Programming Guide; Section 9.11. (1997 edition - PPro).
32*/ 32*/
33 33
34#define DEBUG
35
36#include <linux/types.h> /* FIXME: kvm_para.h needs this */
37
38#include <linux/kvm_para.h>
39#include <linux/uaccess.h>
34#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/mutex.h>
35#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/sort.h>
44#include <linux/cpu.h>
36#include <linux/pci.h> 45#include <linux/pci.h>
37#include <linux/smp.h> 46#include <linux/smp.h>
38#include <linux/cpu.h>
39#include <linux/mutex.h>
40#include <linux/sort.h>
41 47
48#include <asm/processor.h>
42#include <asm/e820.h> 49#include <asm/e820.h>
43#include <asm/mtrr.h> 50#include <asm/mtrr.h>
44#include <asm/uaccess.h>
45#include <asm/processor.h>
46#include <asm/msr.h> 51#include <asm/msr.h>
47#include <asm/kvm_para.h> 52
48#include "mtrr.h" 53#include "mtrr.h"
49 54
50u32 num_var_ranges = 0; 55u32 num_var_ranges;
51 56
52unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 57unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
53static DEFINE_MUTEX(mtrr_mutex); 58static DEFINE_MUTEX(mtrr_mutex);
54 59
55u64 size_or_mask, size_and_mask; 60u64 size_or_mask, size_and_mask;
56 61
57static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; 62static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
58 63
59struct mtrr_ops * mtrr_if = NULL; 64struct mtrr_ops *mtrr_if;
60 65
61static void set_mtrr(unsigned int reg, unsigned long base, 66static void set_mtrr(unsigned int reg, unsigned long base,
62 unsigned long size, mtrr_type type); 67 unsigned long size, mtrr_type type);
63 68
64void set_mtrr_ops(struct mtrr_ops * ops) 69void set_mtrr_ops(struct mtrr_ops *ops)
65{ 70{
66 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) 71 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
67 mtrr_ops[ops->vendor] = ops; 72 mtrr_ops[ops->vendor] = ops;
@@ -72,30 +77,36 @@ static int have_wrcomb(void)
72{ 77{
73 struct pci_dev *dev; 78 struct pci_dev *dev;
74 u8 rev; 79 u8 rev;
75 80
76 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { 81 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining 82 if (dev != NULL) {
78 Don't allow it and leave room for other chipsets to be tagged */ 83 /*
84 * ServerWorks LE chipsets < rev 6 have problems with
85 * write-combining. Don't allow it and leave room for other
86 * chipsets to be tagged
87 */
79 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 88 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
80 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { 89 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
81 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 90 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
82 if (rev <= 5) { 91 if (rev <= 5) {
83 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 92 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
84 pci_dev_put(dev); 93 pci_dev_put(dev);
85 return 0; 94 return 0;
86 } 95 }
87 } 96 }
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to 97 /*
89 write combining memory may resulting in data corruption */ 98 * Intel 450NX errata # 23. Non ascending cacheline evictions to
99 * write combining memory may resulting in data corruption
100 */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL && 101 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX) { 102 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
92 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); 103 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
93 pci_dev_put(dev); 104 pci_dev_put(dev);
94 return 0; 105 return 0;
95 } 106 }
96 pci_dev_put(dev); 107 pci_dev_put(dev);
97 } 108 }
98 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); 109 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
99} 110}
100 111
101/* This function returns the number of variable MTRRs */ 112/* This function returns the number of variable MTRRs */
@@ -103,12 +114,13 @@ static void __init set_num_var_ranges(void)
103{ 114{
104 unsigned long config = 0, dummy; 115 unsigned long config = 0, dummy;
105 116
106 if (use_intel()) { 117 if (use_intel())
107 rdmsr(MSR_MTRRcap, config, dummy); 118 rdmsr(MSR_MTRRcap, config, dummy);
108 } else if (is_cpu(AMD)) 119 else if (is_cpu(AMD))
109 config = 2; 120 config = 2;
110 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) 121 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
111 config = 8; 122 config = 8;
123
112 num_var_ranges = config & 0xff; 124 num_var_ranges = config & 0xff;
113} 125}
114 126
@@ -130,10 +142,12 @@ struct set_mtrr_data {
130 mtrr_type smp_type; 142 mtrr_type smp_type;
131}; 143};
132 144
145/**
146 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
147 *
148 * Returns nothing.
149 */
133static void ipi_handler(void *info) 150static void ipi_handler(void *info)
134/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
135 [RETURNS] Nothing.
136*/
137{ 151{
138#ifdef CONFIG_SMP 152#ifdef CONFIG_SMP
139 struct set_mtrr_data *data = info; 153 struct set_mtrr_data *data = info;
@@ -142,18 +156,19 @@ static void ipi_handler(void *info)
142 local_irq_save(flags); 156 local_irq_save(flags);
143 157
144 atomic_dec(&data->count); 158 atomic_dec(&data->count);
145 while(!atomic_read(&data->gate)) 159 while (!atomic_read(&data->gate))
146 cpu_relax(); 160 cpu_relax();
147 161
148 /* The master has cleared me to execute */ 162 /* The master has cleared me to execute */
149 if (data->smp_reg != ~0U) 163 if (data->smp_reg != ~0U) {
150 mtrr_if->set(data->smp_reg, data->smp_base, 164 mtrr_if->set(data->smp_reg, data->smp_base,
151 data->smp_size, data->smp_type); 165 data->smp_size, data->smp_type);
152 else 166 } else {
153 mtrr_if->set_all(); 167 mtrr_if->set_all();
168 }
154 169
155 atomic_dec(&data->count); 170 atomic_dec(&data->count);
156 while(atomic_read(&data->gate)) 171 while (atomic_read(&data->gate))
157 cpu_relax(); 172 cpu_relax();
158 173
159 atomic_dec(&data->count); 174 atomic_dec(&data->count);
@@ -161,7 +176,8 @@ static void ipi_handler(void *info)
161#endif 176#endif
162} 177}
163 178
164static inline int types_compatible(mtrr_type type1, mtrr_type type2) { 179static inline int types_compatible(mtrr_type type1, mtrr_type type2)
180{
165 return type1 == MTRR_TYPE_UNCACHABLE || 181 return type1 == MTRR_TYPE_UNCACHABLE ||
166 type2 == MTRR_TYPE_UNCACHABLE || 182 type2 == MTRR_TYPE_UNCACHABLE ||
167 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 183 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
@@ -176,10 +192,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
176 * @type: mtrr type 192 * @type: mtrr type
177 * 193 *
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: 194 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
179 * 195 *
180 * 1. Send IPI to do the following: 196 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts 197 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so 198 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode 199 * 4. Enter no-fill cache mode
184 * 5. Flush caches 200 * 5. Flush caches
185 * 6. Clear PGE bit 201 * 6. Clear PGE bit
@@ -189,26 +205,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
189 * 10. Enable all range registers 205 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again 206 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching 207 * 12. Enter normal cache mode and reenable caching
192 * 13. Set PGE 208 * 13. Set PGE
193 * 14. Wait for buddies to catch up 209 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts. 210 * 15. Enable interrupts.
195 * 211 *
196 * What does that mean for us? Well, first we set data.count to the number 212 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait 213 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count. 214 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each 215 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 216 * CPU goes through the transition of updating MTRRs.
201 * differently, so we call mtrr_if->set() callback and let them take care of it. 217 * The CPU vendors may each do it differently,
202 * When they're done, they again decrement data->count and wait for data.gate to 218 * so we call mtrr_if->set() callback and let them take care of it.
203 * be reset. 219 * When they're done, they again decrement data->count and wait for data.gate
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. 220 * to be reset.
221 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
205 * Everyone then enables interrupts and we all continue on. 222 * Everyone then enables interrupts and we all continue on.
206 * 223 *
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 224 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
208 * becomes nops. 225 * becomes nops.
209 */ 226 */
210static void set_mtrr(unsigned int reg, unsigned long base, 227static void
211 unsigned long size, mtrr_type type) 228set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
212{ 229{
213 struct set_mtrr_data data; 230 struct set_mtrr_data data;
214 unsigned long flags; 231 unsigned long flags;
@@ -218,121 +235,122 @@ static void set_mtrr(unsigned int reg, unsigned long base,
218 data.smp_size = size; 235 data.smp_size = size;
219 data.smp_type = type; 236 data.smp_type = type;
220 atomic_set(&data.count, num_booting_cpus() - 1); 237 atomic_set(&data.count, num_booting_cpus() - 1);
221 /* make sure data.count is visible before unleashing other CPUs */ 238
239 /* Make sure data.count is visible before unleashing other CPUs */
222 smp_wmb(); 240 smp_wmb();
223 atomic_set(&data.gate,0); 241 atomic_set(&data.gate, 0);
224 242
225 /* Start the ball rolling on other CPUs */ 243 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 0) != 0) 244 if (smp_call_function(ipi_handler, &data, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n"); 245 panic("mtrr: timed out waiting for other CPUs\n");
228 246
229 local_irq_save(flags); 247 local_irq_save(flags);
230 248
231 while(atomic_read(&data.count)) 249 while (atomic_read(&data.count))
232 cpu_relax(); 250 cpu_relax();
233 251
234 /* ok, reset count and toggle gate */ 252 /* Ok, reset count and toggle gate */
235 atomic_set(&data.count, num_booting_cpus() - 1); 253 atomic_set(&data.count, num_booting_cpus() - 1);
236 smp_wmb(); 254 smp_wmb();
237 atomic_set(&data.gate,1); 255 atomic_set(&data.gate, 1);
238 256
239 /* do our MTRR business */ 257 /* Do our MTRR business */
240 258
241 /* HACK! 259 /*
260 * HACK!
242 * We use this same function to initialize the mtrrs on boot. 261 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want 262 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs. 263 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special... 264 * If we're doing that @reg is set to something special...
246 */ 265 */
247 if (reg != ~0U) 266 if (reg != ~0U)
248 mtrr_if->set(reg,base,size,type); 267 mtrr_if->set(reg, base, size, type);
249 268
250 /* wait for the others */ 269 /* Wait for the others */
251 while(atomic_read(&data.count)) 270 while (atomic_read(&data.count))
252 cpu_relax(); 271 cpu_relax();
253 272
254 atomic_set(&data.count, num_booting_cpus() - 1); 273 atomic_set(&data.count, num_booting_cpus() - 1);
255 smp_wmb(); 274 smp_wmb();
256 atomic_set(&data.gate,0); 275 atomic_set(&data.gate, 0);
257 276
258 /* 277 /*
259 * Wait here for everyone to have seen the gate change 278 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data' 279 * So we're the last ones to touch 'data'
261 */ 280 */
262 while(atomic_read(&data.count)) 281 while (atomic_read(&data.count))
263 cpu_relax(); 282 cpu_relax();
264 283
265 local_irq_restore(flags); 284 local_irq_restore(flags);
266} 285}
267 286
268/** 287/**
269 * mtrr_add_page - Add a memory type region 288 * mtrr_add_page - Add a memory type region
270 * @base: Physical base address of region in pages (in units of 4 kB!) 289 * @base: Physical base address of region in pages (in units of 4 kB!)
271 * @size: Physical size of region in pages (4 kB) 290 * @size: Physical size of region in pages (4 kB)
272 * @type: Type of MTRR desired 291 * @type: Type of MTRR desired
273 * @increment: If this is true do usage counting on the region 292 * @increment: If this is true do usage counting on the region
274 * 293 *
275 * Memory type region registers control the caching on newer Intel and 294 * Memory type region registers control the caching on newer Intel and
276 * non Intel processors. This function allows drivers to request an 295 * non Intel processors. This function allows drivers to request an
277 * MTRR is added. The details and hardware specifics of each processor's 296 * MTRR is added. The details and hardware specifics of each processor's
278 * implementation are hidden from the caller, but nevertheless the 297 * implementation are hidden from the caller, but nevertheless the
279 * caller should expect to need to provide a power of two size on an 298 * caller should expect to need to provide a power of two size on an
280 * equivalent power of two boundary. 299 * equivalent power of two boundary.
281 * 300 *
282 * If the region cannot be added either because all regions are in use 301 * If the region cannot be added either because all regions are in use
283 * or the CPU cannot support it a negative value is returned. On success 302 * or the CPU cannot support it a negative value is returned. On success
284 * the register number for this entry is returned, but should be treated 303 * the register number for this entry is returned, but should be treated
285 * as a cookie only. 304 * as a cookie only.
286 * 305 *
287 * On a multiprocessor machine the changes are made to all processors. 306 * On a multiprocessor machine the changes are made to all processors.
288 * This is required on x86 by the Intel processors. 307 * This is required on x86 by the Intel processors.
289 * 308 *
290 * The available types are 309 * The available types are
291 * 310 *
292 * %MTRR_TYPE_UNCACHABLE - No caching 311 * %MTRR_TYPE_UNCACHABLE - No caching
293 * 312 *
294 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 313 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
295 * 314 *
296 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 315 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
297 * 316 *
298 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 317 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
299 * 318 *
300 * BUGS: Needs a quiet flag for the cases where drivers do not mind 319 * BUGS: Needs a quiet flag for the cases where drivers do not mind
301 * failures and do not wish system log messages to be sent. 320 * failures and do not wish system log messages to be sent.
302 */ 321 */
303 322int mtrr_add_page(unsigned long base, unsigned long size,
304int mtrr_add_page(unsigned long base, unsigned long size,
305 unsigned int type, bool increment) 323 unsigned int type, bool increment)
306{ 324{
325 unsigned long lbase, lsize;
307 int i, replace, error; 326 int i, replace, error;
308 mtrr_type ltype; 327 mtrr_type ltype;
309 unsigned long lbase, lsize;
310 328
311 if (!mtrr_if) 329 if (!mtrr_if)
312 return -ENXIO; 330 return -ENXIO;
313 331
314 if ((error = mtrr_if->validate_add_page(base,size,type))) 332 error = mtrr_if->validate_add_page(base, size, type);
333 if (error)
315 return error; 334 return error;
316 335
317 if (type >= MTRR_NUM_TYPES) { 336 if (type >= MTRR_NUM_TYPES) {
318 printk(KERN_WARNING "mtrr: type: %u invalid\n", type); 337 pr_warning("mtrr: type: %u invalid\n", type);
319 return -EINVAL; 338 return -EINVAL;
320 } 339 }
321 340
322 /* If the type is WC, check that this processor supports it */ 341 /* If the type is WC, check that this processor supports it */
323 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { 342 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
324 printk(KERN_WARNING 343 pr_warning("mtrr: your processor doesn't support write-combining\n");
325 "mtrr: your processor doesn't support write-combining\n");
326 return -ENOSYS; 344 return -ENOSYS;
327 } 345 }
328 346
329 if (!size) { 347 if (!size) {
330 printk(KERN_WARNING "mtrr: zero sized request\n"); 348 pr_warning("mtrr: zero sized request\n");
331 return -EINVAL; 349 return -EINVAL;
332 } 350 }
333 351
334 if (base & size_or_mask || size & size_or_mask) { 352 if (base & size_or_mask || size & size_or_mask) {
335 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); 353 pr_warning("mtrr: base or size exceeds the MTRR width\n");
336 return -EINVAL; 354 return -EINVAL;
337 } 355 }
338 356
@@ -341,36 +359,40 @@ int mtrr_add_page(unsigned long base, unsigned long size,
341 359
342 /* No CPU hotplug when we change MTRR entries */ 360 /* No CPU hotplug when we change MTRR entries */
343 get_online_cpus(); 361 get_online_cpus();
344 /* Search for existing MTRR */ 362
363 /* Search for existing MTRR */
345 mutex_lock(&mtrr_mutex); 364 mutex_lock(&mtrr_mutex);
346 for (i = 0; i < num_var_ranges; ++i) { 365 for (i = 0; i < num_var_ranges; ++i) {
347 mtrr_if->get(i, &lbase, &lsize, &ltype); 366 mtrr_if->get(i, &lbase, &lsize, &ltype);
348 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) 367 if (!lsize || base > lbase + lsize - 1 ||
368 base + size - 1 < lbase)
349 continue; 369 continue;
350 /* At this point we know there is some kind of overlap/enclosure */ 370 /*
371 * At this point we know there is some kind of
372 * overlap/enclosure
373 */
351 if (base < lbase || base + size - 1 > lbase + lsize - 1) { 374 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
352 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { 375 if (base <= lbase &&
376 base + size - 1 >= lbase + lsize - 1) {
353 /* New region encloses an existing region */ 377 /* New region encloses an existing region */
354 if (type == ltype) { 378 if (type == ltype) {
355 replace = replace == -1 ? i : -2; 379 replace = replace == -1 ? i : -2;
356 continue; 380 continue;
357 } 381 } else if (types_compatible(type, ltype))
358 else if (types_compatible(type, ltype))
359 continue; 382 continue;
360 } 383 }
361 printk(KERN_WARNING 384 pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
362 "mtrr: 0x%lx000,0x%lx000 overlaps existing" 385 " 0x%lx000,0x%lx000\n", base, size, lbase,
363 " 0x%lx000,0x%lx000\n", base, size, lbase, 386 lsize);
364 lsize);
365 goto out; 387 goto out;
366 } 388 }
367 /* New region is enclosed by an existing region */ 389 /* New region is enclosed by an existing region */
368 if (ltype != type) { 390 if (ltype != type) {
369 if (types_compatible(type, ltype)) 391 if (types_compatible(type, ltype))
370 continue; 392 continue;
371 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", 393 pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
372 base, size, mtrr_attrib_to_str(ltype), 394 base, size, mtrr_attrib_to_str(ltype),
373 mtrr_attrib_to_str(type)); 395 mtrr_attrib_to_str(type));
374 goto out; 396 goto out;
375 } 397 }
376 if (increment) 398 if (increment)
@@ -378,7 +400,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
378 error = i; 400 error = i;
379 goto out; 401 goto out;
380 } 402 }
381 /* Search for an empty MTRR */ 403 /* Search for an empty MTRR */
382 i = mtrr_if->get_free_region(base, size, replace); 404 i = mtrr_if->get_free_region(base, size, replace);
383 if (i >= 0) { 405 if (i >= 0) {
384 set_mtrr(i, base, size, type); 406 set_mtrr(i, base, size, type);
@@ -393,8 +415,9 @@ int mtrr_add_page(unsigned long base, unsigned long size,
393 mtrr_usage_table[replace] = 0; 415 mtrr_usage_table[replace] = 0;
394 } 416 }
395 } 417 }
396 } else 418 } else {
397 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 419 pr_info("mtrr: no more MTRRs available\n");
420 }
398 error = i; 421 error = i;
399 out: 422 out:
400 mutex_unlock(&mtrr_mutex); 423 mutex_unlock(&mtrr_mutex);
@@ -405,10 +428,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
405static int mtrr_check(unsigned long base, unsigned long size) 428static int mtrr_check(unsigned long base, unsigned long size)
406{ 429{
407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 430 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
408 printk(KERN_WARNING 431 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
409 "mtrr: size and base must be multiples of 4 kiB\n"); 432 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
410 printk(KERN_DEBUG
411 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
412 dump_stack(); 433 dump_stack();
413 return -1; 434 return -1;
414 } 435 }
@@ -416,66 +437,64 @@ static int mtrr_check(unsigned long base, unsigned long size)
416} 437}
417 438
418/** 439/**
419 * mtrr_add - Add a memory type region 440 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region 441 * @base: Physical base address of region
421 * @size: Physical size of region 442 * @size: Physical size of region
422 * @type: Type of MTRR desired 443 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region 444 * @increment: If this is true do usage counting on the region
424 * 445 *
425 * Memory type region registers control the caching on newer Intel and 446 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an 447 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's 448 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the 449 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an 450 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary. 451 * equivalent power of two boundary.
431 * 452 *
432 * If the region cannot be added either because all regions are in use 453 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success 454 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated 455 * the register number for this entry is returned, but should be treated
435 * as a cookie only. 456 * as a cookie only.
436 * 457 *
437 * On a multiprocessor machine the changes are made to all processors. 458 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors. 459 * This is required on x86 by the Intel processors.
439 * 460 *
440 * The available types are 461 * The available types are
441 * 462 *
442 * %MTRR_TYPE_UNCACHABLE - No caching 463 * %MTRR_TYPE_UNCACHABLE - No caching
443 * 464 *
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 465 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
445 * 466 *
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 467 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
447 * 468 *
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 469 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
449 * 470 *
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind 471 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent. 472 * failures and do not wish system log messages to be sent.
452 */ 473 */
453 474int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
454int 475 bool increment)
455mtrr_add(unsigned long base, unsigned long size, unsigned int type,
456 bool increment)
457{ 476{
458 if (mtrr_check(base, size)) 477 if (mtrr_check(base, size))
459 return -EINVAL; 478 return -EINVAL;
460 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 479 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
461 increment); 480 increment);
462} 481}
482EXPORT_SYMBOL(mtrr_add);
463 483
464/** 484/**
465 * mtrr_del_page - delete a memory type region 485 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add 486 * @reg: Register returned by mtrr_add
467 * @base: Physical base address 487 * @base: Physical base address
468 * @size: Size of region 488 * @size: Size of region
469 * 489 *
470 * If register is supplied then base and size are ignored. This is 490 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it. 491 * how drivers should call it.
472 * 492 *
473 * Releases an MTRR region. If the usage count drops to zero the 493 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state. 494 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error 495 * On success the register is returned, on failure a negative error
476 * code. 496 * code.
477 */ 497 */
478
479int mtrr_del_page(int reg, unsigned long base, unsigned long size) 498int mtrr_del_page(int reg, unsigned long base, unsigned long size)
480{ 499{
481 int i, max; 500 int i, max;
@@ -500,22 +519,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
500 } 519 }
501 } 520 }
502 if (reg < 0) { 521 if (reg < 0) {
503 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, 522 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
504 size); 523 base, size);
505 goto out; 524 goto out;
506 } 525 }
507 } 526 }
508 if (reg >= max) { 527 if (reg >= max) {
509 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); 528 pr_warning("mtrr: register: %d too big\n", reg);
510 goto out; 529 goto out;
511 } 530 }
512 mtrr_if->get(reg, &lbase, &lsize, &ltype); 531 mtrr_if->get(reg, &lbase, &lsize, &ltype);
513 if (lsize < 1) { 532 if (lsize < 1) {
514 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); 533 pr_warning("mtrr: MTRR %d not used\n", reg);
515 goto out; 534 goto out;
516 } 535 }
517 if (mtrr_usage_table[reg] < 1) { 536 if (mtrr_usage_table[reg] < 1) {
518 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); 537 pr_warning("mtrr: reg: %d has count=0\n", reg);
519 goto out; 538 goto out;
520 } 539 }
521 if (--mtrr_usage_table[reg] < 1) 540 if (--mtrr_usage_table[reg] < 1)
@@ -526,33 +545,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
526 put_online_cpus(); 545 put_online_cpus();
527 return error; 546 return error;
528} 547}
548
529/** 549/**
530 * mtrr_del - delete a memory type region 550 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add 551 * @reg: Register returned by mtrr_add
532 * @base: Physical base address 552 * @base: Physical base address
533 * @size: Size of region 553 * @size: Size of region
534 * 554 *
535 * If register is supplied then base and size are ignored. This is 555 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it. 556 * how drivers should call it.
537 * 557 *
538 * Releases an MTRR region. If the usage count drops to zero the 558 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state. 559 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error 560 * On success the register is returned, on failure a negative error
541 * code. 561 * code.
542 */ 562 */
543 563int mtrr_del(int reg, unsigned long base, unsigned long size)
544int
545mtrr_del(int reg, unsigned long base, unsigned long size)
546{ 564{
547 if (mtrr_check(base, size)) 565 if (mtrr_check(base, size))
548 return -EINVAL; 566 return -EINVAL;
549 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 567 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
550} 568}
551
552EXPORT_SYMBOL(mtrr_add);
553EXPORT_SYMBOL(mtrr_del); 569EXPORT_SYMBOL(mtrr_del);
554 570
555/* HACK ALERT! 571/*
572 * HACK ALERT!
556 * These should be called implicitly, but we can't yet until all the initcall 573 * These should be called implicitly, but we can't yet until all the initcall
557 * stuff is done... 574 * stuff is done...
558 */ 575 */
@@ -576,29 +593,28 @@ struct mtrr_value {
576 593
577static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 594static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
578 595
579static int mtrr_save(struct sys_device * sysdev, pm_message_t state) 596static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
580{ 597{
581 int i; 598 int i;
582 599
583 for (i = 0; i < num_var_ranges; i++) { 600 for (i = 0; i < num_var_ranges; i++) {
584 mtrr_if->get(i, 601 mtrr_if->get(i, &mtrr_value[i].lbase,
585 &mtrr_value[i].lbase, 602 &mtrr_value[i].lsize,
586 &mtrr_value[i].lsize, 603 &mtrr_value[i].ltype);
587 &mtrr_value[i].ltype);
588 } 604 }
589 return 0; 605 return 0;
590} 606}
591 607
592static int mtrr_restore(struct sys_device * sysdev) 608static int mtrr_restore(struct sys_device *sysdev)
593{ 609{
594 int i; 610 int i;
595 611
596 for (i = 0; i < num_var_ranges; i++) { 612 for (i = 0; i < num_var_ranges; i++) {
597 if (mtrr_value[i].lsize) 613 if (mtrr_value[i].lsize) {
598 set_mtrr(i, 614 set_mtrr(i, mtrr_value[i].lbase,
599 mtrr_value[i].lbase, 615 mtrr_value[i].lsize,
600 mtrr_value[i].lsize, 616 mtrr_value[i].ltype);
601 mtrr_value[i].ltype); 617 }
602 } 618 }
603 return 0; 619 return 0;
604} 620}
@@ -615,26 +631,29 @@ int __initdata changed_by_mtrr_cleanup;
615/** 631/**
616 * mtrr_bp_init - initialize mtrrs on the boot CPU 632 * mtrr_bp_init - initialize mtrrs on the boot CPU
617 * 633 *
618 * This needs to be called early; before any of the other CPUs are 634 * This needs to be called early; before any of the other CPUs are
619 * initialized (i.e. before smp_init()). 635 * initialized (i.e. before smp_init()).
620 * 636 *
621 */ 637 */
622void __init mtrr_bp_init(void) 638void __init mtrr_bp_init(void)
623{ 639{
624 u32 phys_addr; 640 u32 phys_addr;
641
625 init_ifs(); 642 init_ifs();
626 643
627 phys_addr = 32; 644 phys_addr = 32;
628 645
629 if (cpu_has_mtrr) { 646 if (cpu_has_mtrr) {
630 mtrr_if = &generic_mtrr_ops; 647 mtrr_if = &generic_mtrr_ops;
631 size_or_mask = 0xff000000; /* 36 bits */ 648 size_or_mask = 0xff000000; /* 36 bits */
632 size_and_mask = 0x00f00000; 649 size_and_mask = 0x00f00000;
633 phys_addr = 36; 650 phys_addr = 36;
634 651
635 /* This is an AMD specific MSR, but we assume(hope?) that 652 /*
636 Intel will implement it to when they extend the address 653 * This is an AMD specific MSR, but we assume(hope?) that
637 bus of the Xeon. */ 654 * Intel will implement it to when they extend the address
655 * bus of the Xeon.
656 */
638 if (cpuid_eax(0x80000000) >= 0x80000008) { 657 if (cpuid_eax(0x80000000) >= 0x80000008) {
639 phys_addr = cpuid_eax(0x80000008) & 0xff; 658 phys_addr = cpuid_eax(0x80000008) & 0xff;
640 /* CPUID workaround for Intel 0F33/0F34 CPU */ 659 /* CPUID workaround for Intel 0F33/0F34 CPU */
@@ -649,9 +668,11 @@ void __init mtrr_bp_init(void)
649 size_and_mask = ~size_or_mask & 0xfffff00000ULL; 668 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
650 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 669 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
651 boot_cpu_data.x86 == 6) { 670 boot_cpu_data.x86 == 6) {
652 /* VIA C* family have Intel style MTRRs, but 671 /*
653 don't support PAE */ 672 * VIA C* family have Intel style MTRRs,
654 size_or_mask = 0xfff00000; /* 32 bits */ 673 * but don't support PAE
674 */
675 size_or_mask = 0xfff00000; /* 32 bits */
655 size_and_mask = 0; 676 size_and_mask = 0;
656 phys_addr = 32; 677 phys_addr = 32;
657 } 678 }
@@ -694,7 +715,6 @@ void __init mtrr_bp_init(void)
694 changed_by_mtrr_cleanup = 1; 715 changed_by_mtrr_cleanup = 1;
695 mtrr_if->set_all(); 716 mtrr_if->set_all();
696 } 717 }
697
698 } 718 }
699 } 719 }
700} 720}
@@ -706,12 +726,17 @@ void mtrr_ap_init(void)
706 if (!mtrr_if || !use_intel()) 726 if (!mtrr_if || !use_intel())
707 return; 727 return;
708 /* 728 /*
709 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, 729 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
710 * but this routine will be called in cpu boot time, holding the lock 730 * changed, but this routine will be called in cpu boot time,
711 * breaks it. This routine is called in two cases: 1.very earily time 731 * holding the lock breaks it.
712 * of software resume, when there absolutely isn't mtrr entry changes; 732 *
713 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to 733 * This routine is called in two cases:
714 * prevent mtrr entry changes 734 *
735 * 1. very earily time of software resume, when there absolutely
736 * isn't mtrr entry changes;
737 *
738 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
739 * lock to prevent mtrr entry changes
715 */ 740 */
716 local_irq_save(flags); 741 local_irq_save(flags);
717 742
@@ -732,19 +757,23 @@ static int __init mtrr_init_finialize(void)
732{ 757{
733 if (!mtrr_if) 758 if (!mtrr_if)
734 return 0; 759 return 0;
760
735 if (use_intel()) { 761 if (use_intel()) {
736 if (!changed_by_mtrr_cleanup) 762 if (!changed_by_mtrr_cleanup)
737 mtrr_state_warn(); 763 mtrr_state_warn();
738 } else { 764 return 0;
739 /* The CPUs haven't MTRR and seem to not support SMP. They have
740 * specific drivers, we use a tricky method to support
741 * suspend/resume for them.
742 * TBD: is there any system with such CPU which supports
743 * suspend/resume? if no, we should remove the code.
744 */
745 sysdev_driver_register(&cpu_sysdev_class,
746 &mtrr_sysdev_driver);
747 } 765 }
766
767 /*
768 * The CPU has no MTRR and seems to not support SMP. They have
769 * specific drivers, we use a tricky method to support
770 * suspend/resume for them.
771 *
772 * TBD: is there any system with such CPU which supports
773 * suspend/resume? If no, we should remove the code.
774 */
775 sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
776
748 return 0; 777 return 0;
749} 778}
750subsys_initcall(mtrr_init_finialize); 779subsys_initcall(mtrr_init_finialize);