diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /arch/x86/include | |
parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include')
40 files changed, 290 insertions, 433 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 4518dc500903..20d1465a2ab0 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -144,6 +144,7 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
144 | 144 | ||
145 | #else /* !CONFIG_ACPI */ | 145 | #else /* !CONFIG_ACPI */ |
146 | 146 | ||
147 | #define acpi_disabled 1 | ||
147 | #define acpi_lapic 0 | 148 | #define acpi_lapic 0 |
148 | #define acpi_ioapic 0 | 149 | #define acpi_ioapic 0 |
149 | static inline void acpi_noirq_set(void) { } | 150 | static inline void acpi_noirq_set(void) { } |
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h index 262e02820049..bdf96f119f06 100644 --- a/arch/x86/include/asm/amd_iommu.h +++ b/arch/x86/include/asm/amd_iommu.h | |||
@@ -29,9 +29,11 @@ extern void amd_iommu_detect(void); | |||
29 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); | 29 | extern irqreturn_t amd_iommu_int_handler(int irq, void *data); |
30 | extern void amd_iommu_flush_all_domains(void); | 30 | extern void amd_iommu_flush_all_domains(void); |
31 | extern void amd_iommu_flush_all_devices(void); | 31 | extern void amd_iommu_flush_all_devices(void); |
32 | extern void amd_iommu_shutdown(void); | ||
32 | #else | 33 | #else |
33 | static inline int amd_iommu_init(void) { return -ENODEV; } | 34 | static inline int amd_iommu_init(void) { return -ENODEV; } |
34 | static inline void amd_iommu_detect(void) { } | 35 | static inline void amd_iommu_detect(void) { } |
36 | static inline void amd_iommu_shutdown(void) { } | ||
35 | #endif | 37 | #endif |
36 | 38 | ||
37 | #endif /* _ASM_X86_AMD_IOMMU_H */ | 39 | #endif /* _ASM_X86_AMD_IOMMU_H */ |
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 8cb9c814e120..dc5a667ff791 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h | |||
@@ -19,7 +19,10 @@ | |||
19 | * | 19 | * |
20 | * Atomically reads the value of @v. | 20 | * Atomically reads the value of @v. |
21 | */ | 21 | */ |
22 | #define atomic_read(v) ((v)->counter) | 22 | static inline int atomic_read(const atomic_t *v) |
23 | { | ||
24 | return v->counter; | ||
25 | } | ||
23 | 26 | ||
24 | /** | 27 | /** |
25 | * atomic_set - set atomic variable | 28 | * atomic_set - set atomic variable |
@@ -28,7 +31,10 @@ | |||
28 | * | 31 | * |
29 | * Atomically sets the value of @v to @i. | 32 | * Atomically sets the value of @v to @i. |
30 | */ | 33 | */ |
31 | #define atomic_set(v, i) (((v)->counter) = (i)) | 34 | static inline void atomic_set(atomic_t *v, int i) |
35 | { | ||
36 | v->counter = i; | ||
37 | } | ||
32 | 38 | ||
33 | /** | 39 | /** |
34 | * atomic_add - add integer to atomic variable | 40 | * atomic_add - add integer to atomic variable |
@@ -200,8 +206,15 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
200 | return atomic_add_return(-i, v); | 206 | return atomic_add_return(-i, v); |
201 | } | 207 | } |
202 | 208 | ||
203 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 209 | static inline int atomic_cmpxchg(atomic_t *v, int old, int new) |
204 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) | 210 | { |
211 | return cmpxchg(&v->counter, old, new); | ||
212 | } | ||
213 | |||
214 | static inline int atomic_xchg(atomic_t *v, int new) | ||
215 | { | ||
216 | return xchg(&v->counter, new); | ||
217 | } | ||
205 | 218 | ||
206 | /** | 219 | /** |
207 | * atomic_add_unless - add unless the number is already a given value | 220 | * atomic_add_unless - add unless the number is already a given value |
@@ -250,67 +263,22 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
250 | /* An 64bit atomic type */ | 263 | /* An 64bit atomic type */ |
251 | 264 | ||
252 | typedef struct { | 265 | typedef struct { |
253 | unsigned long long counter; | 266 | u64 __aligned(8) counter; |
254 | } atomic64_t; | 267 | } atomic64_t; |
255 | 268 | ||
256 | #define ATOMIC64_INIT(val) { (val) } | 269 | #define ATOMIC64_INIT(val) { (val) } |
257 | 270 | ||
258 | /** | 271 | extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val); |
259 | * atomic64_read - read atomic64 variable | ||
260 | * @v: pointer of type atomic64_t | ||
261 | * | ||
262 | * Atomically reads the value of @v. | ||
263 | * Doesn't imply a read memory barrier. | ||
264 | */ | ||
265 | #define __atomic64_read(ptr) ((ptr)->counter) | ||
266 | |||
267 | static inline unsigned long long | ||
268 | cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new) | ||
269 | { | ||
270 | asm volatile( | ||
271 | |||
272 | LOCK_PREFIX "cmpxchg8b (%[ptr])\n" | ||
273 | |||
274 | : "=A" (old) | ||
275 | |||
276 | : [ptr] "D" (ptr), | ||
277 | "A" (old), | ||
278 | "b" (ll_low(new)), | ||
279 | "c" (ll_high(new)) | ||
280 | |||
281 | : "memory"); | ||
282 | |||
283 | return old; | ||
284 | } | ||
285 | |||
286 | static inline unsigned long long | ||
287 | atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, | ||
288 | unsigned long long new_val) | ||
289 | { | ||
290 | return cmpxchg8b(&ptr->counter, old_val, new_val); | ||
291 | } | ||
292 | 272 | ||
293 | /** | 273 | /** |
294 | * atomic64_xchg - xchg atomic64 variable | 274 | * atomic64_xchg - xchg atomic64 variable |
295 | * @ptr: pointer to type atomic64_t | 275 | * @ptr: pointer to type atomic64_t |
296 | * @new_val: value to assign | 276 | * @new_val: value to assign |
297 | * @old_val: old value that was there | ||
298 | * | 277 | * |
299 | * Atomically xchgs the value of @ptr to @new_val and returns | 278 | * Atomically xchgs the value of @ptr to @new_val and returns |
300 | * the old value. | 279 | * the old value. |
301 | */ | 280 | */ |
302 | 281 | extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val); | |
303 | static inline unsigned long long | ||
304 | atomic64_xchg(atomic64_t *ptr, unsigned long long new_val) | ||
305 | { | ||
306 | unsigned long long old_val; | ||
307 | |||
308 | do { | ||
309 | old_val = atomic_read(ptr); | ||
310 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
311 | |||
312 | return old_val; | ||
313 | } | ||
314 | 282 | ||
315 | /** | 283 | /** |
316 | * atomic64_set - set atomic64 variable | 284 | * atomic64_set - set atomic64 variable |
@@ -319,10 +287,7 @@ atomic64_xchg(atomic64_t *ptr, unsigned long long new_val) | |||
319 | * | 287 | * |
320 | * Atomically sets the value of @ptr to @new_val. | 288 | * Atomically sets the value of @ptr to @new_val. |
321 | */ | 289 | */ |
322 | static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) | 290 | extern void atomic64_set(atomic64_t *ptr, u64 new_val); |
323 | { | ||
324 | atomic64_xchg(ptr, new_val); | ||
325 | } | ||
326 | 291 | ||
327 | /** | 292 | /** |
328 | * atomic64_read - read atomic64 variable | 293 | * atomic64_read - read atomic64 variable |
@@ -330,17 +295,30 @@ static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) | |||
330 | * | 295 | * |
331 | * Atomically reads the value of @ptr and returns it. | 296 | * Atomically reads the value of @ptr and returns it. |
332 | */ | 297 | */ |
333 | static inline unsigned long long atomic64_read(atomic64_t *ptr) | 298 | static inline u64 atomic64_read(atomic64_t *ptr) |
334 | { | 299 | { |
335 | unsigned long long curr_val; | 300 | u64 res; |
336 | 301 | ||
337 | do { | 302 | /* |
338 | curr_val = __atomic64_read(ptr); | 303 | * Note, we inline this atomic64_t primitive because |
339 | } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val); | 304 | * it only clobbers EAX/EDX and leaves the others |
340 | 305 | * untouched. We also (somewhat subtly) rely on the | |
341 | return curr_val; | 306 | * fact that cmpxchg8b returns the current 64-bit value |
307 | * of the memory location we are touching: | ||
308 | */ | ||
309 | asm volatile( | ||
310 | "mov %%ebx, %%eax\n\t" | ||
311 | "mov %%ecx, %%edx\n\t" | ||
312 | LOCK_PREFIX "cmpxchg8b %1\n" | ||
313 | : "=&A" (res) | ||
314 | : "m" (*ptr) | ||
315 | ); | ||
316 | |||
317 | return res; | ||
342 | } | 318 | } |
343 | 319 | ||
320 | extern u64 atomic64_read(atomic64_t *ptr); | ||
321 | |||
344 | /** | 322 | /** |
345 | * atomic64_add_return - add and return | 323 | * atomic64_add_return - add and return |
346 | * @delta: integer value to add | 324 | * @delta: integer value to add |
@@ -348,34 +326,14 @@ static inline unsigned long long atomic64_read(atomic64_t *ptr) | |||
348 | * | 326 | * |
349 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | 327 | * Atomically adds @delta to @ptr and returns @delta + *@ptr |
350 | */ | 328 | */ |
351 | static inline unsigned long long | 329 | extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr); |
352 | atomic64_add_return(unsigned long long delta, atomic64_t *ptr) | ||
353 | { | ||
354 | unsigned long long old_val, new_val; | ||
355 | |||
356 | do { | ||
357 | old_val = atomic_read(ptr); | ||
358 | new_val = old_val + delta; | ||
359 | |||
360 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
361 | |||
362 | return new_val; | ||
363 | } | ||
364 | |||
365 | static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr) | ||
366 | { | ||
367 | return atomic64_add_return(-delta, ptr); | ||
368 | } | ||
369 | 330 | ||
370 | static inline long atomic64_inc_return(atomic64_t *ptr) | 331 | /* |
371 | { | 332 | * Other variants with different arithmetic operators: |
372 | return atomic64_add_return(1, ptr); | 333 | */ |
373 | } | 334 | extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr); |
374 | 335 | extern u64 atomic64_inc_return(atomic64_t *ptr); | |
375 | static inline long atomic64_dec_return(atomic64_t *ptr) | 336 | extern u64 atomic64_dec_return(atomic64_t *ptr); |
376 | { | ||
377 | return atomic64_sub_return(1, ptr); | ||
378 | } | ||
379 | 337 | ||
380 | /** | 338 | /** |
381 | * atomic64_add - add integer to atomic64 variable | 339 | * atomic64_add - add integer to atomic64 variable |
@@ -384,10 +342,7 @@ static inline long atomic64_dec_return(atomic64_t *ptr) | |||
384 | * | 342 | * |
385 | * Atomically adds @delta to @ptr. | 343 | * Atomically adds @delta to @ptr. |
386 | */ | 344 | */ |
387 | static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr) | 345 | extern void atomic64_add(u64 delta, atomic64_t *ptr); |
388 | { | ||
389 | atomic64_add_return(delta, ptr); | ||
390 | } | ||
391 | 346 | ||
392 | /** | 347 | /** |
393 | * atomic64_sub - subtract the atomic64 variable | 348 | * atomic64_sub - subtract the atomic64 variable |
@@ -396,10 +351,7 @@ static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr) | |||
396 | * | 351 | * |
397 | * Atomically subtracts @delta from @ptr. | 352 | * Atomically subtracts @delta from @ptr. |
398 | */ | 353 | */ |
399 | static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr) | 354 | extern void atomic64_sub(u64 delta, atomic64_t *ptr); |
400 | { | ||
401 | atomic64_add(-delta, ptr); | ||
402 | } | ||
403 | 355 | ||
404 | /** | 356 | /** |
405 | * atomic64_sub_and_test - subtract value from variable and test result | 357 | * atomic64_sub_and_test - subtract value from variable and test result |
@@ -410,13 +362,7 @@ static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr) | |||
410 | * true if the result is zero, or false for all | 362 | * true if the result is zero, or false for all |
411 | * other cases. | 363 | * other cases. |
412 | */ | 364 | */ |
413 | static inline int | 365 | extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr); |
414 | atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr) | ||
415 | { | ||
416 | unsigned long long old_val = atomic64_sub_return(delta, ptr); | ||
417 | |||
418 | return old_val == 0; | ||
419 | } | ||
420 | 366 | ||
421 | /** | 367 | /** |
422 | * atomic64_inc - increment atomic64 variable | 368 | * atomic64_inc - increment atomic64 variable |
@@ -424,10 +370,7 @@ atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr) | |||
424 | * | 370 | * |
425 | * Atomically increments @ptr by 1. | 371 | * Atomically increments @ptr by 1. |
426 | */ | 372 | */ |
427 | static inline void atomic64_inc(atomic64_t *ptr) | 373 | extern void atomic64_inc(atomic64_t *ptr); |
428 | { | ||
429 | atomic64_add(1, ptr); | ||
430 | } | ||
431 | 374 | ||
432 | /** | 375 | /** |
433 | * atomic64_dec - decrement atomic64 variable | 376 | * atomic64_dec - decrement atomic64 variable |
@@ -435,10 +378,7 @@ static inline void atomic64_inc(atomic64_t *ptr) | |||
435 | * | 378 | * |
436 | * Atomically decrements @ptr by 1. | 379 | * Atomically decrements @ptr by 1. |
437 | */ | 380 | */ |
438 | static inline void atomic64_dec(atomic64_t *ptr) | 381 | extern void atomic64_dec(atomic64_t *ptr); |
439 | { | ||
440 | atomic64_sub(1, ptr); | ||
441 | } | ||
442 | 382 | ||
443 | /** | 383 | /** |
444 | * atomic64_dec_and_test - decrement and test | 384 | * atomic64_dec_and_test - decrement and test |
@@ -448,10 +388,7 @@ static inline void atomic64_dec(atomic64_t *ptr) | |||
448 | * returns true if the result is 0, or false for all other | 388 | * returns true if the result is 0, or false for all other |
449 | * cases. | 389 | * cases. |
450 | */ | 390 | */ |
451 | static inline int atomic64_dec_and_test(atomic64_t *ptr) | 391 | extern int atomic64_dec_and_test(atomic64_t *ptr); |
452 | { | ||
453 | return atomic64_sub_and_test(1, ptr); | ||
454 | } | ||
455 | 392 | ||
456 | /** | 393 | /** |
457 | * atomic64_inc_and_test - increment and test | 394 | * atomic64_inc_and_test - increment and test |
@@ -461,10 +398,7 @@ static inline int atomic64_dec_and_test(atomic64_t *ptr) | |||
461 | * and returns true if the result is zero, or false for all | 398 | * and returns true if the result is zero, or false for all |
462 | * other cases. | 399 | * other cases. |
463 | */ | 400 | */ |
464 | static inline int atomic64_inc_and_test(atomic64_t *ptr) | 401 | extern int atomic64_inc_and_test(atomic64_t *ptr); |
465 | { | ||
466 | return atomic64_sub_and_test(-1, ptr); | ||
467 | } | ||
468 | 402 | ||
469 | /** | 403 | /** |
470 | * atomic64_add_negative - add and test if negative | 404 | * atomic64_add_negative - add and test if negative |
@@ -475,13 +409,7 @@ static inline int atomic64_inc_and_test(atomic64_t *ptr) | |||
475 | * if the result is negative, or false when | 409 | * if the result is negative, or false when |
476 | * result is greater than or equal to zero. | 410 | * result is greater than or equal to zero. |
477 | */ | 411 | */ |
478 | static inline int | 412 | extern int atomic64_add_negative(u64 delta, atomic64_t *ptr); |
479 | atomic64_add_negative(unsigned long long delta, atomic64_t *ptr) | ||
480 | { | ||
481 | long long old_val = atomic64_add_return(delta, ptr); | ||
482 | |||
483 | return old_val < 0; | ||
484 | } | ||
485 | 413 | ||
486 | #include <asm-generic/atomic-long.h> | 414 | #include <asm-generic/atomic-long.h> |
487 | #endif /* _ASM_X86_ATOMIC_32_H */ | 415 | #endif /* _ASM_X86_ATOMIC_32_H */ |
diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h index 0d6360220007..d605dc268e79 100644 --- a/arch/x86/include/asm/atomic_64.h +++ b/arch/x86/include/asm/atomic_64.h | |||
@@ -18,7 +18,10 @@ | |||
18 | * | 18 | * |
19 | * Atomically reads the value of @v. | 19 | * Atomically reads the value of @v. |
20 | */ | 20 | */ |
21 | #define atomic_read(v) ((v)->counter) | 21 | static inline int atomic_read(const atomic_t *v) |
22 | { | ||
23 | return v->counter; | ||
24 | } | ||
22 | 25 | ||
23 | /** | 26 | /** |
24 | * atomic_set - set atomic variable | 27 | * atomic_set - set atomic variable |
@@ -27,7 +30,10 @@ | |||
27 | * | 30 | * |
28 | * Atomically sets the value of @v to @i. | 31 | * Atomically sets the value of @v to @i. |
29 | */ | 32 | */ |
30 | #define atomic_set(v, i) (((v)->counter) = (i)) | 33 | static inline void atomic_set(atomic_t *v, int i) |
34 | { | ||
35 | v->counter = i; | ||
36 | } | ||
31 | 37 | ||
32 | /** | 38 | /** |
33 | * atomic_add - add integer to atomic variable | 39 | * atomic_add - add integer to atomic variable |
@@ -192,7 +198,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
192 | * Atomically reads the value of @v. | 198 | * Atomically reads the value of @v. |
193 | * Doesn't imply a read memory barrier. | 199 | * Doesn't imply a read memory barrier. |
194 | */ | 200 | */ |
195 | #define atomic64_read(v) ((v)->counter) | 201 | static inline long atomic64_read(const atomic64_t *v) |
202 | { | ||
203 | return v->counter; | ||
204 | } | ||
196 | 205 | ||
197 | /** | 206 | /** |
198 | * atomic64_set - set atomic64 variable | 207 | * atomic64_set - set atomic64 variable |
@@ -201,7 +210,10 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
201 | * | 210 | * |
202 | * Atomically sets the value of @v to @i. | 211 | * Atomically sets the value of @v to @i. |
203 | */ | 212 | */ |
204 | #define atomic64_set(v, i) (((v)->counter) = (i)) | 213 | static inline void atomic64_set(atomic64_t *v, long i) |
214 | { | ||
215 | v->counter = i; | ||
216 | } | ||
205 | 217 | ||
206 | /** | 218 | /** |
207 | * atomic64_add - add integer to atomic64 variable | 219 | * atomic64_add - add integer to atomic64 variable |
@@ -355,11 +367,25 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) | |||
355 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) | 367 | #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) |
356 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) | 368 | #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) |
357 | 369 | ||
358 | #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 370 | static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) |
359 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) | 371 | { |
372 | return cmpxchg(&v->counter, old, new); | ||
373 | } | ||
374 | |||
375 | static inline long atomic64_xchg(atomic64_t *v, long new) | ||
376 | { | ||
377 | return xchg(&v->counter, new); | ||
378 | } | ||
360 | 379 | ||
361 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | 380 | static inline long atomic_cmpxchg(atomic_t *v, int old, int new) |
362 | #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) | 381 | { |
382 | return cmpxchg(&v->counter, old, new); | ||
383 | } | ||
384 | |||
385 | static inline long atomic_xchg(atomic_t *v, int new) | ||
386 | { | ||
387 | return xchg(&v->counter, new); | ||
388 | } | ||
363 | 389 | ||
364 | /** | 390 | /** |
365 | * atomic_add_unless - add unless the number is a given value | 391 | * atomic_add_unless - add unless the number is a given value |
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h index 418e632d4a80..7a1065958ba9 100644 --- a/arch/x86/include/asm/boot.h +++ b/arch/x86/include/asm/boot.h | |||
@@ -8,7 +8,7 @@ | |||
8 | 8 | ||
9 | #ifdef __KERNEL__ | 9 | #ifdef __KERNEL__ |
10 | 10 | ||
11 | #include <asm/page_types.h> | 11 | #include <asm/pgtable_types.h> |
12 | 12 | ||
13 | /* Physical address where kernel should be loaded. */ | 13 | /* Physical address where kernel should be loaded. */ |
14 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ | 14 | #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ |
@@ -16,10 +16,10 @@ | |||
16 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | 16 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) |
17 | 17 | ||
18 | /* Minimum kernel alignment, as a power of two */ | 18 | /* Minimum kernel alignment, as a power of two */ |
19 | #ifdef CONFIG_x86_64 | 19 | #ifdef CONFIG_X86_64 |
20 | #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT | 20 | #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT |
21 | #else | 21 | #else |
22 | #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT+1) | 22 | #define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_ORDER) |
23 | #endif | 23 | #endif |
24 | #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) | 24 | #define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2) |
25 | 25 | ||
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index c45f415ce315..c993e9e0fed4 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_X86_DESC_H | 1 | #ifndef _ASM_X86_DESC_H |
2 | #define _ASM_X86_DESC_H | 2 | #define _ASM_X86_DESC_H |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
5 | #include <asm/desc_defs.h> | 4 | #include <asm/desc_defs.h> |
6 | #include <asm/ldt.h> | 5 | #include <asm/ldt.h> |
7 | #include <asm/mmu.h> | 6 | #include <asm/mmu.h> |
@@ -380,29 +379,4 @@ static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) | |||
380 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); | 379 | _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); |
381 | } | 380 | } |
382 | 381 | ||
383 | #else | ||
384 | /* | ||
385 | * GET_DESC_BASE reads the descriptor base of the specified segment. | ||
386 | * | ||
387 | * Args: | ||
388 | * idx - descriptor index | ||
389 | * gdt - GDT pointer | ||
390 | * base - 32bit register to which the base will be written | ||
391 | * lo_w - lo word of the "base" register | ||
392 | * lo_b - lo byte of the "base" register | ||
393 | * hi_b - hi byte of the low word of the "base" register | ||
394 | * | ||
395 | * Example: | ||
396 | * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | ||
397 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | ||
398 | */ | ||
399 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | ||
400 | movb idx * 8 + 4(gdt), lo_b; \ | ||
401 | movb idx * 8 + 7(gdt), hi_b; \ | ||
402 | shll $16, base; \ | ||
403 | movw idx * 8 + 2(gdt), lo_w; | ||
404 | |||
405 | |||
406 | #endif /* __ASSEMBLY__ */ | ||
407 | |||
408 | #endif /* _ASM_X86_DESC_H */ | 382 | #endif /* _ASM_X86_DESC_H */ |
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index b93405b228b4..1c3f9435f1c9 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h | |||
@@ -33,6 +33,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) | |||
33 | #endif | 33 | #endif |
34 | } | 34 | } |
35 | 35 | ||
36 | #include <asm-generic/dma-mapping-common.h> | ||
37 | |||
36 | /* Make sure we keep the same behaviour */ | 38 | /* Make sure we keep the same behaviour */ |
37 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | 39 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
38 | { | 40 | { |
@@ -53,177 +55,6 @@ extern int dma_set_mask(struct device *dev, u64 mask); | |||
53 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 55 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
54 | dma_addr_t *dma_addr, gfp_t flag); | 56 | dma_addr_t *dma_addr, gfp_t flag); |
55 | 57 | ||
56 | static inline dma_addr_t | ||
57 | dma_map_single(struct device *hwdev, void *ptr, size_t size, | ||
58 | enum dma_data_direction dir) | ||
59 | { | ||
60 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
61 | dma_addr_t addr; | ||
62 | |||
63 | BUG_ON(!valid_dma_direction(dir)); | ||
64 | kmemcheck_mark_initialized(ptr, size); | ||
65 | addr = ops->map_page(hwdev, virt_to_page(ptr), | ||
66 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
67 | dir, NULL); | ||
68 | debug_dma_map_page(hwdev, virt_to_page(ptr), | ||
69 | (unsigned long)ptr & ~PAGE_MASK, size, | ||
70 | dir, addr, true); | ||
71 | return addr; | ||
72 | } | ||
73 | |||
74 | static inline void | ||
75 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, | ||
76 | enum dma_data_direction dir) | ||
77 | { | ||
78 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
79 | |||
80 | BUG_ON(!valid_dma_direction(dir)); | ||
81 | if (ops->unmap_page) | ||
82 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
83 | debug_dma_unmap_page(dev, addr, size, dir, true); | ||
84 | } | ||
85 | |||
86 | static inline int | ||
87 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
88 | int nents, enum dma_data_direction dir) | ||
89 | { | ||
90 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
91 | int ents; | ||
92 | struct scatterlist *s; | ||
93 | int i; | ||
94 | |||
95 | BUG_ON(!valid_dma_direction(dir)); | ||
96 | for_each_sg(sg, s, nents, i) | ||
97 | kmemcheck_mark_initialized(sg_virt(s), s->length); | ||
98 | ents = ops->map_sg(hwdev, sg, nents, dir, NULL); | ||
99 | debug_dma_map_sg(hwdev, sg, nents, ents, dir); | ||
100 | |||
101 | return ents; | ||
102 | } | ||
103 | |||
104 | static inline void | ||
105 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | ||
106 | enum dma_data_direction dir) | ||
107 | { | ||
108 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
109 | |||
110 | BUG_ON(!valid_dma_direction(dir)); | ||
111 | debug_dma_unmap_sg(hwdev, sg, nents, dir); | ||
112 | if (ops->unmap_sg) | ||
113 | ops->unmap_sg(hwdev, sg, nents, dir, NULL); | ||
114 | } | ||
115 | |||
116 | static inline void | ||
117 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | ||
118 | size_t size, enum dma_data_direction dir) | ||
119 | { | ||
120 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
121 | |||
122 | BUG_ON(!valid_dma_direction(dir)); | ||
123 | if (ops->sync_single_for_cpu) | ||
124 | ops->sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
125 | debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir); | ||
126 | flush_write_buffers(); | ||
127 | } | ||
128 | |||
129 | static inline void | ||
130 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, | ||
131 | size_t size, enum dma_data_direction dir) | ||
132 | { | ||
133 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
134 | |||
135 | BUG_ON(!valid_dma_direction(dir)); | ||
136 | if (ops->sync_single_for_device) | ||
137 | ops->sync_single_for_device(hwdev, dma_handle, size, dir); | ||
138 | debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir); | ||
139 | flush_write_buffers(); | ||
140 | } | ||
141 | |||
142 | static inline void | ||
143 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, | ||
144 | unsigned long offset, size_t size, | ||
145 | enum dma_data_direction dir) | ||
146 | { | ||
147 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
148 | |||
149 | BUG_ON(!valid_dma_direction(dir)); | ||
150 | if (ops->sync_single_range_for_cpu) | ||
151 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, | ||
152 | size, dir); | ||
153 | debug_dma_sync_single_range_for_cpu(hwdev, dma_handle, | ||
154 | offset, size, dir); | ||
155 | flush_write_buffers(); | ||
156 | } | ||
157 | |||
158 | static inline void | ||
159 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, | ||
160 | unsigned long offset, size_t size, | ||
161 | enum dma_data_direction dir) | ||
162 | { | ||
163 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
164 | |||
165 | BUG_ON(!valid_dma_direction(dir)); | ||
166 | if (ops->sync_single_range_for_device) | ||
167 | ops->sync_single_range_for_device(hwdev, dma_handle, | ||
168 | offset, size, dir); | ||
169 | debug_dma_sync_single_range_for_device(hwdev, dma_handle, | ||
170 | offset, size, dir); | ||
171 | flush_write_buffers(); | ||
172 | } | ||
173 | |||
174 | static inline void | ||
175 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | ||
176 | int nelems, enum dma_data_direction dir) | ||
177 | { | ||
178 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
179 | |||
180 | BUG_ON(!valid_dma_direction(dir)); | ||
181 | if (ops->sync_sg_for_cpu) | ||
182 | ops->sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
183 | debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir); | ||
184 | flush_write_buffers(); | ||
185 | } | ||
186 | |||
187 | static inline void | ||
188 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | ||
189 | int nelems, enum dma_data_direction dir) | ||
190 | { | ||
191 | struct dma_map_ops *ops = get_dma_ops(hwdev); | ||
192 | |||
193 | BUG_ON(!valid_dma_direction(dir)); | ||
194 | if (ops->sync_sg_for_device) | ||
195 | ops->sync_sg_for_device(hwdev, sg, nelems, dir); | ||
196 | debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir); | ||
197 | |||
198 | flush_write_buffers(); | ||
199 | } | ||
200 | |||
201 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
202 | size_t offset, size_t size, | ||
203 | enum dma_data_direction dir) | ||
204 | { | ||
205 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
206 | dma_addr_t addr; | ||
207 | |||
208 | BUG_ON(!valid_dma_direction(dir)); | ||
209 | kmemcheck_mark_initialized(page_address(page) + offset, size); | ||
210 | addr = ops->map_page(dev, page, offset, size, dir, NULL); | ||
211 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); | ||
212 | |||
213 | return addr; | ||
214 | } | ||
215 | |||
216 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
217 | size_t size, enum dma_data_direction dir) | ||
218 | { | ||
219 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
220 | |||
221 | BUG_ON(!valid_dma_direction(dir)); | ||
222 | if (ops->unmap_page) | ||
223 | ops->unmap_page(dev, addr, size, dir, NULL); | ||
224 | debug_dma_unmap_page(dev, addr, size, dir, false); | ||
225 | } | ||
226 | |||
227 | static inline void | 58 | static inline void |
228 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 59 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
229 | enum dma_data_direction dir) | 60 | enum dma_data_direction dir) |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index edc90f23e708..8406ed7f9926 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -33,7 +33,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); | |||
33 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ | 33 | #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ |
34 | efi_call_virt(f, a1, a2, a3, a4, a5, a6) | 34 | efi_call_virt(f, a1, a2, a3, a4, a5, a6) |
35 | 35 | ||
36 | #define efi_ioremap(addr, size) ioremap_cache(addr, size) | 36 | #define efi_ioremap(addr, size, type) ioremap_cache(addr, size) |
37 | 37 | ||
38 | #else /* !CONFIG_X86_32 */ | 38 | #else /* !CONFIG_X86_32 */ |
39 | 39 | ||
@@ -84,7 +84,8 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3, | |||
84 | efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ | 84 | efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \ |
85 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) | 85 | (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6)) |
86 | 86 | ||
87 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size); | 87 | extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, |
88 | u32 type); | ||
88 | 89 | ||
89 | #endif /* CONFIG_X86_32 */ | 90 | #endif /* CONFIG_X86_32 */ |
90 | 91 | ||
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 2d81af3974a0..7b2d71df39a6 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -111,12 +111,9 @@ enum fixed_addresses { | |||
111 | #ifdef CONFIG_PARAVIRT | 111 | #ifdef CONFIG_PARAVIRT |
112 | FIX_PARAVIRT_BOOTMAP, | 112 | FIX_PARAVIRT_BOOTMAP, |
113 | #endif | 113 | #endif |
114 | FIX_TEXT_POKE0, /* reserve 2 pages for text_poke() */ | 114 | FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ |
115 | FIX_TEXT_POKE1, | 115 | FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ |
116 | __end_of_permanent_fixed_addresses, | 116 | __end_of_permanent_fixed_addresses, |
117 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
118 | FIX_OHCI1394_BASE, | ||
119 | #endif | ||
120 | /* | 117 | /* |
121 | * 256 temporary boot-time mappings, used by early_ioremap(), | 118 | * 256 temporary boot-time mappings, used by early_ioremap(), |
122 | * before ioremap() is functional. | 119 | * before ioremap() is functional. |
@@ -129,6 +126,9 @@ enum fixed_addresses { | |||
129 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - | 126 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 - |
130 | (__end_of_permanent_fixed_addresses & 255), | 127 | (__end_of_permanent_fixed_addresses & 255), |
131 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, | 128 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1, |
129 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
130 | FIX_OHCI1394_BASE, | ||
131 | #endif | ||
132 | #ifdef CONFIG_X86_32 | 132 | #ifdef CONFIG_X86_32 |
133 | FIX_WP_TEST, | 133 | FIX_WP_TEST, |
134 | #endif | 134 | #endif |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index bd2c6511c887..db24c2278be0 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -28,13 +28,6 @@ | |||
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /* FIXME: I don't want to stay hardcoded */ | ||
32 | #ifdef CONFIG_X86_64 | ||
33 | # define FTRACE_SYSCALL_MAX 296 | ||
34 | #else | ||
35 | # define FTRACE_SYSCALL_MAX 333 | ||
36 | #endif | ||
37 | |||
38 | #ifdef CONFIG_FUNCTION_TRACER | 31 | #ifdef CONFIG_FUNCTION_TRACER |
39 | #define MCOUNT_ADDR ((long)(mcount)) | 32 | #define MCOUNT_ADDR ((long)(mcount)) |
40 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ | 33 | #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ |
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index daf866ed0612..330ee807f89e 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h | |||
@@ -161,6 +161,7 @@ extern int io_apic_set_pci_routing(struct device *dev, int irq, | |||
161 | struct io_apic_irq_attr *irq_attr); | 161 | struct io_apic_irq_attr *irq_attr); |
162 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 162 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
163 | extern void ioapic_init_mappings(void); | 163 | extern void ioapic_init_mappings(void); |
164 | extern void ioapic_insert_resources(void); | ||
164 | 165 | ||
165 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); | 166 | extern struct IO_APIC_route_entry **alloc_ioapic_entries(void); |
166 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); | 167 | extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries); |
@@ -180,6 +181,7 @@ extern void ioapic_write_entry(int apic, int pin, | |||
180 | #define io_apic_assign_pci_irqs 0 | 181 | #define io_apic_assign_pci_irqs 0 |
181 | static const int timer_through_8259 = 0; | 182 | static const int timer_through_8259 = 0; |
182 | static inline void ioapic_init_mappings(void) { } | 183 | static inline void ioapic_init_mappings(void) { } |
184 | static inline void ioapic_insert_resources(void) { } | ||
183 | 185 | ||
184 | static inline void probe_nr_irqs_gsi(void) { } | 186 | static inline void probe_nr_irqs_gsi(void) { } |
185 | #endif | 187 | #endif |
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index af326a2975b5..fd6d21bbee6c 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h | |||
@@ -6,6 +6,7 @@ extern void no_iommu_init(void); | |||
6 | extern struct dma_map_ops nommu_dma_ops; | 6 | extern struct dma_map_ops nommu_dma_ops; |
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int iommu_pass_through; | ||
9 | 10 | ||
10 | /* 10 seconds */ | 11 | /* 10 seconds */ |
11 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | 12 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 2bdab21f0898..c6ccbe7e81ad 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -12,9 +12,15 @@ static inline unsigned long native_save_fl(void) | |||
12 | { | 12 | { |
13 | unsigned long flags; | 13 | unsigned long flags; |
14 | 14 | ||
15 | /* | ||
16 | * Note: this needs to be "=r" not "=rm", because we have the | ||
17 | * stack offset from what gcc expects at the time the "pop" is | ||
18 | * executed, and so a memory reference with respect to the stack | ||
19 | * would end up using the wrong address. | ||
20 | */ | ||
15 | asm volatile("# __raw_save_flags\n\t" | 21 | asm volatile("# __raw_save_flags\n\t" |
16 | "pushf ; pop %0" | 22 | "pushf ; pop %0" |
17 | : "=g" (flags) | 23 | : "=r" (flags) |
18 | : /* no input */ | 24 | : /* no input */ |
19 | : "memory"); | 25 | : "memory"); |
20 | 26 | ||
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h index 313389cd50d2..5136dad57cbb 100644 --- a/arch/x86/include/asm/lguest.h +++ b/arch/x86/include/asm/lguest.h | |||
@@ -17,8 +17,7 @@ | |||
17 | /* Pages for switcher itself, then two pages per cpu */ | 17 | /* Pages for switcher itself, then two pages per cpu */ |
18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) | 18 | #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) |
19 | 19 | ||
20 | /* We map at -4M (-2M when PAE is activated) for ease of mapping | 20 | /* We map at -4M (-2M for PAE) for ease of mapping (one PTE page). */ |
21 | * into the guest (one PTE page). */ | ||
22 | #ifdef CONFIG_X86_PAE | 21 | #ifdef CONFIG_X86_PAE |
23 | #define SWITCHER_ADDR 0xFFE00000 | 22 | #define SWITCHER_ADDR 0xFFE00000 |
24 | #else | 23 | #else |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index d31c4a684078..ba0eed8aa1a6 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -30,27 +30,27 @@ | |||
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | 31 | #include <asm/kvm_para.h> |
32 | 32 | ||
33 | /*G:031 But first, how does our Guest contact the Host to ask for privileged | 33 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | ||
34 | * operations? There are two ways: the direct way is to make a "hypercall", | 35 | * operations? There are two ways: the direct way is to make a "hypercall", |
35 | * to make requests of the Host Itself. | 36 | * to make requests of the Host Itself. |
36 | * | 37 | * |
37 | * We use the KVM hypercall mechanism. Seventeen hypercalls are | 38 | * We use the KVM hypercall mechanism, though completely different hypercall |
38 | * available: the hypercall number is put in the %eax register, and the | 39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in |
39 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. | 40 | * the %eax register, and the arguments (when required) are placed in %ebx, |
40 | * If a return value makes sense, it's returned in %eax. | 41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. |
41 | * | 42 | * |
42 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
43 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
44 | * definition of a gentleman: "someone who is only rude intentionally". */ | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
45 | /*:*/ | 46 | :*/ |
46 | 47 | ||
47 | /* Can't use our min() macro here: needs to be a constant */ | 48 | /* Can't use our min() macro here: needs to be a constant */ |
48 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
49 | 50 | ||
50 | #define LHCALL_RING_SIZE 64 | 51 | #define LHCALL_RING_SIZE 64 |
51 | struct hcall_args { | 52 | struct hcall_args { |
52 | /* These map directly onto eax, ebx, ecx, edx and esi | 53 | /* These map directly onto eax/ebx/ecx/edx/esi in struct lguest_regs */ |
53 | * in struct lguest_regs */ | ||
54 | unsigned long arg0, arg1, arg2, arg3, arg4; | 54 | unsigned long arg0, arg1, arg2, arg3, arg4; |
55 | }; | 55 | }; |
56 | 56 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 540a466e50f5..5cdd8d100ec9 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -102,15 +102,39 @@ struct mce_log { | |||
102 | 102 | ||
103 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
104 | 104 | ||
105 | #include <linux/percpu.h> | ||
106 | #include <linux/init.h> | ||
107 | #include <asm/atomic.h> | ||
108 | |||
105 | extern int mce_disabled; | 109 | extern int mce_disabled; |
110 | extern int mce_p5_enabled; | ||
106 | 111 | ||
107 | #include <asm/atomic.h> | 112 | #ifdef CONFIG_X86_MCE |
108 | #include <linux/percpu.h> | 113 | void mcheck_init(struct cpuinfo_x86 *c); |
114 | #else | ||
115 | static inline void mcheck_init(struct cpuinfo_x86 *c) {} | ||
116 | #endif | ||
117 | |||
118 | #ifdef CONFIG_X86_OLD_MCE | ||
119 | extern int nr_mce_banks; | ||
120 | void amd_mcheck_init(struct cpuinfo_x86 *c); | ||
121 | void intel_p4_mcheck_init(struct cpuinfo_x86 *c); | ||
122 | void intel_p6_mcheck_init(struct cpuinfo_x86 *c); | ||
123 | #endif | ||
124 | |||
125 | #ifdef CONFIG_X86_ANCIENT_MCE | ||
126 | void intel_p5_mcheck_init(struct cpuinfo_x86 *c); | ||
127 | void winchip_mcheck_init(struct cpuinfo_x86 *c); | ||
128 | static inline void enable_p5_mce(void) { mce_p5_enabled = 1; } | ||
129 | #else | ||
130 | static inline void intel_p5_mcheck_init(struct cpuinfo_x86 *c) {} | ||
131 | static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {} | ||
132 | static inline void enable_p5_mce(void) {} | ||
133 | #endif | ||
109 | 134 | ||
110 | void mce_setup(struct mce *m); | 135 | void mce_setup(struct mce *m); |
111 | void mce_log(struct mce *m); | 136 | void mce_log(struct mce *m); |
112 | DECLARE_PER_CPU(struct sys_device, mce_dev); | 137 | DECLARE_PER_CPU(struct sys_device, mce_dev); |
113 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | ||
114 | 138 | ||
115 | /* | 139 | /* |
116 | * To support more than 128 would need to escape the predefined | 140 | * To support more than 128 would need to escape the predefined |
@@ -145,12 +169,8 @@ int mce_available(struct cpuinfo_x86 *c); | |||
145 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 169 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
146 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 170 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
147 | 171 | ||
148 | void mce_log_therm_throt_event(__u64 status); | ||
149 | |||
150 | extern atomic_t mce_entry; | 172 | extern atomic_t mce_entry; |
151 | 173 | ||
152 | void do_machine_check(struct pt_regs *, long); | ||
153 | |||
154 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | 174 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); |
155 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | 175 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); |
156 | 176 | ||
@@ -167,13 +187,32 @@ void mce_notify_process(void); | |||
167 | DECLARE_PER_CPU(struct mce, injectm); | 187 | DECLARE_PER_CPU(struct mce, injectm); |
168 | extern struct file_operations mce_chrdev_ops; | 188 | extern struct file_operations mce_chrdev_ops; |
169 | 189 | ||
170 | #ifdef CONFIG_X86_MCE | 190 | /* |
171 | void mcheck_init(struct cpuinfo_x86 *c); | 191 | * Exception handler |
172 | #else | 192 | */ |
173 | #define mcheck_init(c) do { } while (0) | 193 | |
174 | #endif | 194 | /* Call the installed machine check handler for this CPU setup. */ |
195 | extern void (*machine_check_vector)(struct pt_regs *, long error_code); | ||
196 | void do_machine_check(struct pt_regs *, long); | ||
197 | |||
198 | /* | ||
199 | * Threshold handler | ||
200 | */ | ||
175 | 201 | ||
176 | extern void (*mce_threshold_vector)(void); | 202 | extern void (*mce_threshold_vector)(void); |
203 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | ||
204 | |||
205 | /* | ||
206 | * Thermal handler | ||
207 | */ | ||
208 | |||
209 | void intel_init_thermal(struct cpuinfo_x86 *c); | ||
210 | |||
211 | #ifdef CONFIG_X86_NEW_MCE | ||
212 | void mce_log_therm_throt_event(__u64 status); | ||
213 | #else | ||
214 | static inline void mce_log_therm_throt_event(__u64 status) {} | ||
215 | #endif | ||
177 | 216 | ||
178 | #endif /* __KERNEL__ */ | 217 | #endif /* __KERNEL__ */ |
179 | #endif /* _ASM_X86_MCE_H */ | 218 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1692fb5050e3..6be7fc254b59 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -246,10 +246,6 @@ | |||
246 | #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) | 246 | #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) |
247 | #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) | 247 | #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) |
248 | 248 | ||
249 | /* Intel Model 6 */ | ||
250 | #define MSR_P6_EVNTSEL0 0x00000186 | ||
251 | #define MSR_P6_EVNTSEL1 0x00000187 | ||
252 | |||
253 | /* P4/Xeon+ specific */ | 249 | /* P4/Xeon+ specific */ |
254 | #define MSR_IA32_MCG_EAX 0x00000180 | 250 | #define MSR_IA32_MCG_EAX 0x00000180 |
255 | #define MSR_IA32_MCG_EBX 0x00000181 | 251 | #define MSR_IA32_MCG_EBX 0x00000181 |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 22603764e7db..48ad9d29484a 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
@@ -3,13 +3,10 @@ | |||
3 | 3 | ||
4 | #include <asm/msr-index.h> | 4 | #include <asm/msr-index.h> |
5 | 5 | ||
6 | #ifndef __ASSEMBLY__ | ||
7 | # include <linux/types.h> | ||
8 | #endif | ||
9 | |||
10 | #ifdef __KERNEL__ | 6 | #ifdef __KERNEL__ |
11 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
12 | 8 | ||
9 | #include <linux/types.h> | ||
13 | #include <asm/asm.h> | 10 | #include <asm/asm.h> |
14 | #include <asm/errno.h> | 11 | #include <asm/errno.h> |
15 | #include <asm/cpumask.h> | 12 | #include <asm/cpumask.h> |
@@ -264,6 +261,4 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) | |||
264 | #endif /* CONFIG_SMP */ | 261 | #endif /* CONFIG_SMP */ |
265 | #endif /* __ASSEMBLY__ */ | 262 | #endif /* __ASSEMBLY__ */ |
266 | #endif /* __KERNEL__ */ | 263 | #endif /* __KERNEL__ */ |
267 | |||
268 | |||
269 | #endif /* _ASM_X86_MSR_H */ | 264 | #endif /* _ASM_X86_MSR_H */ |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c97264409934..c86e5ed4af51 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -72,7 +72,6 @@ void lapic_watchdog_stop(void); | |||
72 | int lapic_watchdog_init(unsigned nmi_hz); | 72 | int lapic_watchdog_init(unsigned nmi_hz); |
73 | int lapic_wd_event(unsigned nmi_hz); | 73 | int lapic_wd_event(unsigned nmi_hz); |
74 | unsigned lapic_adjust_nmi_hz(unsigned hz); | 74 | unsigned lapic_adjust_nmi_hz(unsigned hz); |
75 | int lapic_watchdog_ok(void); | ||
76 | void disable_lapic_nmi_watchdog(void); | 75 | void disable_lapic_nmi_watchdog(void); |
77 | void enable_lapic_nmi_watchdog(void); | 76 | void enable_lapic_nmi_watchdog(void); |
78 | void stop_nmi(void); | 77 | void stop_nmi(void); |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8d382d3abf38..7639dbf5d223 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ | 42 | /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ |
43 | #define __PHYSICAL_MASK_SHIFT 46 | 43 | #define __PHYSICAL_MASK_SHIFT 46 |
44 | #define __VIRTUAL_MASK_SHIFT 48 | 44 | #define __VIRTUAL_MASK_SHIFT 47 |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in | 47 | * Kernel image size is limited to 512 MB (see level2_kernel_pgt in |
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index b51a1e8b0baf..1ff685ca221c 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h | |||
@@ -91,7 +91,7 @@ extern void pci_iommu_alloc(void); | |||
91 | 91 | ||
92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) | 92 | #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) |
93 | 93 | ||
94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMA_API_DEBUG) | 94 | #if defined(CONFIG_X86_64) || defined(CONFIG_DMAR) || defined(CONFIG_DMA_API_DEBUG) |
95 | 95 | ||
96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ | 96 | #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ |
97 | dma_addr_t ADDR_NAME; | 97 | dma_addr_t ADDR_NAME; |
@@ -130,6 +130,7 @@ extern void pci_iommu_alloc(void); | |||
130 | 130 | ||
131 | /* generic pci stuff */ | 131 | /* generic pci stuff */ |
132 | #include <asm-generic/pci.h> | 132 | #include <asm-generic/pci.h> |
133 | #define PCIBIOS_MAX_MEM_32 0xffffffff | ||
133 | 134 | ||
134 | #ifdef CONFIG_NUMA | 135 | #ifdef CONFIG_NUMA |
135 | /* Returns the node based on pci bus */ | 136 | /* Returns the node based on pci bus */ |
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index e60fd3e14bdf..b399988eee3a 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -121,6 +121,9 @@ extern int __init pcibios_init(void); | |||
121 | extern int __init pci_mmcfg_arch_init(void); | 121 | extern int __init pci_mmcfg_arch_init(void); |
122 | extern void __init pci_mmcfg_arch_free(void); | 122 | extern void __init pci_mmcfg_arch_free(void); |
123 | 123 | ||
124 | extern struct acpi_mcfg_allocation *pci_mmcfg_config; | ||
125 | extern int pci_mmcfg_config_num; | ||
126 | |||
124 | /* | 127 | /* |
125 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space | 128 | * AMD Fam10h CPUs are buggy, and cannot access MMIO config space |
126 | * on their northbrige except through the * %eax register. As such, you MUST | 129 | * on their northbrige except through the * %eax register. As such, you MUST |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 02ecb30982a3..103f1ddb0d85 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | #else /* ...!ASSEMBLY */ | 43 | #else /* ...!ASSEMBLY */ |
44 | 44 | ||
45 | #include <linux/kernel.h> | ||
45 | #include <linux/stringify.h> | 46 | #include <linux/stringify.h> |
46 | 47 | ||
47 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
@@ -155,6 +156,15 @@ do { \ | |||
155 | /* We can use this directly for local CPU (faster). */ | 156 | /* We can use this directly for local CPU (faster). */ |
156 | DECLARE_PER_CPU(unsigned long, this_cpu_off); | 157 | DECLARE_PER_CPU(unsigned long, this_cpu_off); |
157 | 158 | ||
159 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
160 | void *pcpu_lpage_remapped(void *kaddr); | ||
161 | #else | ||
162 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
163 | { | ||
164 | return NULL; | ||
165 | } | ||
166 | #endif | ||
167 | |||
158 | #endif /* !__ASSEMBLY__ */ | 168 | #endif /* !__ASSEMBLY__ */ |
159 | 169 | ||
160 | #ifdef CONFIG_SMP | 170 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index 876ed97147b3..fa64e401589d 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,14 +84,12 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | extern void set_perf_counter_pending(void); | ||
88 | |||
89 | #define clear_perf_counter_pending() do { } while (0) | ||
90 | #define test_perf_counter_pending() (0) | ||
91 | |||
92 | #ifdef CONFIG_PERF_COUNTERS | 87 | #ifdef CONFIG_PERF_COUNTERS |
93 | extern void init_hw_perf_counters(void); | 88 | extern void init_hw_perf_counters(void); |
94 | extern void perf_counters_lapic_init(void); | 89 | extern void perf_counters_lapic_init(void); |
90 | |||
91 | #define PERF_COUNTER_INDEX_OFFSET 0 | ||
92 | |||
95 | #else | 93 | #else |
96 | static inline void init_hw_perf_counters(void) { } | 94 | static inline void init_hw_perf_counters(void) { } |
97 | static inline void perf_counters_lapic_init(void) { } | 95 | static inline void perf_counters_lapic_init(void) { } |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index dd14c54ac718..0e8c2a0fd922 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -46,7 +46,13 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte) | |||
46 | __free_page(pte); | 46 | __free_page(pte); |
47 | } | 47 | } |
48 | 48 | ||
49 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte); | 49 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
50 | |||
51 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
52 | unsigned long address) | ||
53 | { | ||
54 | ___pte_free_tlb(tlb, pte); | ||
55 | } | ||
50 | 56 | ||
51 | static inline void pmd_populate_kernel(struct mm_struct *mm, | 57 | static inline void pmd_populate_kernel(struct mm_struct *mm, |
52 | pmd_t *pmd, pte_t *pte) | 58 | pmd_t *pmd, pte_t *pte) |
@@ -78,7 +84,13 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
78 | free_page((unsigned long)pmd); | 84 | free_page((unsigned long)pmd); |
79 | } | 85 | } |
80 | 86 | ||
81 | extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); | 87 | extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); |
88 | |||
89 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | ||
90 | unsigned long adddress) | ||
91 | { | ||
92 | ___pmd_free_tlb(tlb, pmd); | ||
93 | } | ||
82 | 94 | ||
83 | #ifdef CONFIG_X86_PAE | 95 | #ifdef CONFIG_X86_PAE |
84 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); | 96 | extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd); |
@@ -108,7 +120,14 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) | |||
108 | free_page((unsigned long)pud); | 120 | free_page((unsigned long)pud); |
109 | } | 121 | } |
110 | 122 | ||
111 | extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); | 123 | extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); |
124 | |||
125 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | ||
126 | unsigned long address) | ||
127 | { | ||
128 | ___pud_free_tlb(tlb, pud); | ||
129 | } | ||
130 | |||
112 | #endif /* PAGETABLE_LEVELS > 3 */ | 131 | #endif /* PAGETABLE_LEVELS > 3 */ |
113 | #endif /* PAGETABLE_LEVELS > 2 */ | 132 | #endif /* PAGETABLE_LEVELS > 2 */ |
114 | 133 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3cc06e3fceb8..16748077559a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_X86_PGTABLE_H | 2 | #define _ASM_X86_PGTABLE_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm/e820.h> | ||
5 | 6 | ||
6 | #include <asm/pgtable_types.h> | 7 | #include <asm/pgtable_types.h> |
7 | 8 | ||
@@ -269,10 +270,17 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |||
269 | 270 | ||
270 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) | 271 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
271 | 272 | ||
272 | static inline int is_new_memtype_allowed(unsigned long flags, | 273 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
273 | unsigned long new_flags) | 274 | unsigned long flags, |
275 | unsigned long new_flags) | ||
274 | { | 276 | { |
275 | /* | 277 | /* |
278 | * PAT type is always WB for ISA. So no need to check. | ||
279 | */ | ||
280 | if (is_ISA_range(paddr, paddr + size - 1)) | ||
281 | return 1; | ||
282 | |||
283 | /* | ||
276 | * Certain new memtypes are not allowed with certain | 284 | * Certain new memtypes are not allowed with certain |
277 | * requested memtype: | 285 | * requested memtype: |
278 | * - request is uncached, return cannot be write-back | 286 | * - request is uncached, return cannot be write-back |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 31bd120cf2a2..01fd9461d323 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -49,13 +49,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t); | |||
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | #if defined(CONFIG_HIGHPTE) | 51 | #if defined(CONFIG_HIGHPTE) |
52 | #define __KM_PTE \ | ||
53 | (in_nmi() ? KM_NMI_PTE : \ | ||
54 | in_irq() ? KM_IRQ_PTE : \ | ||
55 | KM_PTE0) | ||
52 | #define pte_offset_map(dir, address) \ | 56 | #define pte_offset_map(dir, address) \ |
53 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) + \ | 57 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \ |
54 | pte_index((address))) | 58 | pte_index((address))) |
55 | #define pte_offset_map_nested(dir, address) \ | 59 | #define pte_offset_map_nested(dir, address) \ |
56 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ | 60 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \ |
57 | pte_index((address))) | 61 | pte_index((address))) |
58 | #define pte_unmap(pte) kunmap_atomic((pte), KM_PTE0) | 62 | #define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE) |
59 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) | 63 | #define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1) |
60 | #else | 64 | #else |
61 | #define pte_offset_map(dir, address) \ | 65 | #define pte_offset_map(dir, address) \ |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index abde308fdb0f..c57a30117149 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -165,10 +165,7 @@ extern void cleanup_highmap(void); | |||
165 | 165 | ||
166 | /* fs/proc/kcore.c */ | 166 | /* fs/proc/kcore.c */ |
167 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) | 167 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) |
168 | #define kc_offset_to_vaddr(o) \ | 168 | #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK) |
169 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT - 1))) \ | ||
170 | ? ((o) | ~__VIRTUAL_MASK) \ | ||
171 | : (o)) | ||
172 | 169 | ||
173 | #define __HAVE_ARCH_PTE_SAME | 170 | #define __HAVE_ARCH_PTE_SAME |
174 | #endif /* !__ASSEMBLY__ */ | 171 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h index 49fb3ecf3bb3..621f56d73121 100644 --- a/arch/x86/include/asm/proto.h +++ b/arch/x86/include/asm/proto.h | |||
@@ -22,7 +22,14 @@ extern int reboot_force; | |||
22 | 22 | ||
23 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); | 23 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr); |
24 | 24 | ||
25 | #define round_up(x, y) (((x) + (y) - 1) & ~((y) - 1)) | 25 | /* |
26 | #define round_down(x, y) ((x) & ~((y) - 1)) | 26 | * This looks more complex than it should be. But we need to |
27 | * get the type for the ~ right in round_down (it needs to be | ||
28 | * as wide as the result!), and we want to evaluate the macro | ||
29 | * arguments just once each. | ||
30 | */ | ||
31 | #define __round_mask(x,y) ((__typeof__(x))((y)-1)) | ||
32 | #define round_up(x,y) ((((x)-1) | __round_mask(x,y))+1) | ||
33 | #define round_down(x,y) ((x) & ~__round_mask(x,y)) | ||
27 | 34 | ||
28 | #endif /* _ASM_X86_PROTO_H */ | 35 | #endif /* _ASM_X86_PROTO_H */ |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index b7e5db876399..4e77853321db 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -302,4 +302,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
302 | #define _raw_read_relax(lock) cpu_relax() | 302 | #define _raw_read_relax(lock) cpu_relax() |
303 | #define _raw_write_relax(lock) cpu_relax() | 303 | #define _raw_write_relax(lock) cpu_relax() |
304 | 304 | ||
305 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
306 | static inline void smp_mb__after_lock(void) { } | ||
307 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
308 | |||
305 | #endif /* _ASM_X86_SPINLOCK_H */ | 309 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f517944b2b17..cf86a5e73815 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | extern int kstack_depth_to_print; | 4 | extern int kstack_depth_to_print; |
5 | 5 | ||
6 | int x86_is_stack_id(int id, char *name); | ||
7 | |||
6 | /* Generic stack tracer with callbacks */ | 8 | /* Generic stack tracer with callbacks */ |
7 | 9 | ||
8 | struct stacktrace_ops { | 10 | struct stacktrace_ops { |
diff --git a/arch/x86/include/asm/therm_throt.h b/arch/x86/include/asm/therm_throt.h deleted file mode 100644 index c62349ee7860..000000000000 --- a/arch/x86/include/asm/therm_throt.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _ASM_X86_THERM_THROT_H | ||
2 | #define _ASM_X86_THERM_THROT_H | ||
3 | |||
4 | #include <asm/atomic.h> | ||
5 | |||
6 | extern atomic_t therm_throt_en; | ||
7 | int therm_throt_process(int curr); | ||
8 | |||
9 | #endif /* _ASM_X86_THERM_THROT_H */ | ||
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index b0783520988b..6f7786aea4fc 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
@@ -49,7 +49,7 @@ struct thread_info { | |||
49 | .exec_domain = &default_exec_domain, \ | 49 | .exec_domain = &default_exec_domain, \ |
50 | .flags = 0, \ | 50 | .flags = 0, \ |
51 | .cpu = 0, \ | 51 | .cpu = 0, \ |
52 | .preempt_count = 1, \ | 52 | .preempt_count = INIT_PREEMPT_COUNT, \ |
53 | .addr_limit = KERNEL_DS, \ | 53 | .addr_limit = KERNEL_DS, \ |
54 | .restart_block = { \ | 54 | .restart_block = { \ |
55 | .fn = do_no_restart_syscall, \ | 55 | .fn = do_no_restart_syscall, \ |
@@ -95,7 +95,7 @@ struct thread_info { | |||
95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ | 95 | #define TIF_DEBUGCTLMSR 25 /* uses thread_struct.debugctlmsr */ |
96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ | 96 | #define TIF_DS_AREA_MSR 26 /* uses thread_struct.ds_area_msr */ |
97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ | 97 | #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */ |
98 | #define TIF_SYSCALL_FTRACE 28 /* for ftrace syscall instrumentation */ | 98 | #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */ |
99 | 99 | ||
100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 100 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 101 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
@@ -118,17 +118,17 @@ struct thread_info { | |||
118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) | 118 | #define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR) |
119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) | 119 | #define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR) |
120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) | 120 | #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES) |
121 | #define _TIF_SYSCALL_FTRACE (1 << TIF_SYSCALL_FTRACE) | 121 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
122 | 122 | ||
123 | /* work to do in syscall_trace_enter() */ | 123 | /* work to do in syscall_trace_enter() */ |
124 | #define _TIF_WORK_SYSCALL_ENTRY \ | 124 | #define _TIF_WORK_SYSCALL_ENTRY \ |
125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_FTRACE | \ | 125 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ |
126 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | _TIF_SINGLESTEP) | 126 | _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
127 | 127 | ||
128 | /* work to do in syscall_trace_leave() */ | 128 | /* work to do in syscall_trace_leave() */ |
129 | #define _TIF_WORK_SYSCALL_EXIT \ | 129 | #define _TIF_WORK_SYSCALL_EXIT \ |
130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ | 130 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ |
131 | _TIF_SYSCALL_FTRACE) | 131 | _TIF_SYSCALL_TRACEPOINT) |
132 | 132 | ||
133 | /* work to do on interrupt/exception return */ | 133 | /* work to do on interrupt/exception return */ |
134 | #define _TIF_WORK_MASK \ | 134 | #define _TIF_WORK_MASK \ |
@@ -137,7 +137,8 @@ struct thread_info { | |||
137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) | 137 | _TIF_SINGLESTEP|_TIF_SECCOMP|_TIF_SYSCALL_EMU)) |
138 | 138 | ||
139 | /* work to do on any return to user space */ | 139 | /* work to do on any return to user space */ |
140 | #define _TIF_ALLWORK_MASK ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_FTRACE) | 140 | #define _TIF_ALLWORK_MASK \ |
141 | ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) | ||
141 | 142 | ||
142 | /* Only used for 64 bit */ | 143 | /* Only used for 64 bit */ |
143 | #define _TIF_DO_NOTIFY_MASK \ | 144 | #define _TIF_DO_NOTIFY_MASK \ |
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h index bd37ed444a21..20ca9c4d4686 100644 --- a/arch/x86/include/asm/timer.h +++ b/arch/x86/include/asm/timer.h | |||
@@ -45,12 +45,16 @@ extern int no_timer_check; | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | DECLARE_PER_CPU(unsigned long, cyc2ns); | 47 | DECLARE_PER_CPU(unsigned long, cyc2ns); |
48 | DECLARE_PER_CPU(unsigned long long, cyc2ns_offset); | ||
48 | 49 | ||
49 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 50 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
50 | 51 | ||
51 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) | 52 | static inline unsigned long long __cycles_2_ns(unsigned long long cyc) |
52 | { | 53 | { |
53 | return cyc * per_cpu(cyc2ns, smp_processor_id()) >> CYC2NS_SCALE_FACTOR; | 54 | int cpu = smp_processor_id(); |
55 | unsigned long long ns = per_cpu(cyc2ns_offset, cpu); | ||
56 | ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR; | ||
57 | return ns; | ||
54 | } | 58 | } |
55 | 59 | ||
56 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | 60 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index b685ece89d5c..d2c6c930b491 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | 25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) |
26 | 26 | ||
27 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | 27 | #define KERNEL_DS MAKE_MM_SEG(-1UL) |
28 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | 28 | #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX) |
29 | 29 | ||
30 | #define get_ds() (KERNEL_DS) | 30 | #define get_ds() (KERNEL_DS) |
31 | #define get_fs() (current_thread_info()->addr_limit) | 31 | #define get_fs() (current_thread_info()->addr_limit) |
@@ -212,9 +212,9 @@ extern int __get_user_bad(void); | |||
212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | 212 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") |
213 | #else | 213 | #else |
214 | #define __put_user_asm_u64(x, ptr, retval, errret) \ | 214 | #define __put_user_asm_u64(x, ptr, retval, errret) \ |
215 | __put_user_asm(x, ptr, retval, "q", "", "Zr", errret) | 215 | __put_user_asm(x, ptr, retval, "q", "", "er", errret) |
216 | #define __put_user_asm_ex_u64(x, addr) \ | 216 | #define __put_user_asm_ex_u64(x, addr) \ |
217 | __put_user_asm_ex(x, addr, "q", "", "Zr") | 217 | __put_user_asm_ex(x, addr, "q", "", "er") |
218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) | 218 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) |
219 | #endif | 219 | #endif |
220 | 220 | ||
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 8cc687326eb8..db24b215fc50 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h | |||
@@ -88,11 +88,11 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) | |||
88 | ret, "l", "k", "ir", 4); | 88 | ret, "l", "k", "ir", 4); |
89 | return ret; | 89 | return ret; |
90 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, | 90 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, |
91 | ret, "q", "", "ir", 8); | 91 | ret, "q", "", "er", 8); |
92 | return ret; | 92 | return ret; |
93 | case 10: | 93 | case 10: |
94 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, | 94 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
95 | ret, "q", "", "ir", 10); | 95 | ret, "q", "", "er", 10); |
96 | if (unlikely(ret)) | 96 | if (unlikely(ret)) |
97 | return ret; | 97 | return ret; |
98 | asm("":::"memory"); | 98 | asm("":::"memory"); |
@@ -101,12 +101,12 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) | |||
101 | return ret; | 101 | return ret; |
102 | case 16: | 102 | case 16: |
103 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, | 103 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
104 | ret, "q", "", "ir", 16); | 104 | ret, "q", "", "er", 16); |
105 | if (unlikely(ret)) | 105 | if (unlikely(ret)) |
106 | return ret; | 106 | return ret; |
107 | asm("":::"memory"); | 107 | asm("":::"memory"); |
108 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, | 108 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
109 | ret, "q", "", "ir", 8); | 109 | ret, "q", "", "er", 8); |
110 | return ret; | 110 | return ret; |
111 | default: | 111 | default: |
112 | return copy_user_generic((__force void *)dst, src, size); | 112 | return copy_user_generic((__force void *)dst, src, size); |
@@ -157,7 +157,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size) | |||
157 | ret, "q", "", "=r", 8); | 157 | ret, "q", "", "=r", 8); |
158 | if (likely(!ret)) | 158 | if (likely(!ret)) |
159 | __put_user_asm(tmp, (u64 __user *)dst, | 159 | __put_user_asm(tmp, (u64 __user *)dst, |
160 | ret, "q", "", "ir", 8); | 160 | ret, "q", "", "er", 8); |
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | default: | 163 | default: |
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 732a30706153..8deaada61bc8 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -345,6 +345,8 @@ | |||
345 | 345 | ||
346 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
347 | 347 | ||
348 | #define NR_syscalls 337 | ||
349 | |||
348 | #define __ARCH_WANT_IPC_PARSE_VERSION | 350 | #define __ARCH_WANT_IPC_PARSE_VERSION |
349 | #define __ARCH_WANT_OLD_READDIR | 351 | #define __ARCH_WANT_OLD_READDIR |
350 | #define __ARCH_WANT_OLD_STAT | 352 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 900e1617e672..b9f3c60de5f7 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -688,6 +688,12 @@ __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | |||
688 | #endif /* __NO_STUBS */ | 688 | #endif /* __NO_STUBS */ |
689 | 689 | ||
690 | #ifdef __KERNEL__ | 690 | #ifdef __KERNEL__ |
691 | |||
692 | #ifndef COMPILE_OFFSETS | ||
693 | #include <asm/asm-offsets.h> | ||
694 | #define NR_syscalls (__NR_syscall_max + 1) | ||
695 | #endif | ||
696 | |||
691 | /* | 697 | /* |
692 | * "Conditional" syscalls | 698 | * "Conditional" syscalls |
693 | * | 699 | * |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index bddd44f2f0ab..80e2984f521c 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -133,7 +133,7 @@ struct bau_msg_payload { | |||
133 | * see table 4.2.3.0.1 in broacast_assist spec. | 133 | * see table 4.2.3.0.1 in broacast_assist spec. |
134 | */ | 134 | */ |
135 | struct bau_msg_header { | 135 | struct bau_msg_header { |
136 | unsigned int dest_subnodeid:6; /* must be zero */ | 136 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
137 | /* bits 5:0 */ | 137 | /* bits 5:0 */ |
138 | unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ | 138 | unsigned int base_dest_nodeid:15; /* nasid>>1 (pnode) of */ |
139 | /* bits 20:6 */ /* first bit in node_map */ | 139 | /* bits 20:6 */ /* first bit in node_map */ |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index 341070f7ad5c..77a68505419a 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -175,7 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); | |||
175 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) | 175 | #define UV_GLOBAL_MMR32_PNODE_BITS(p) ((p) << (UV_GLOBAL_MMR32_PNODE_SHIFT)) |
176 | 176 | ||
177 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ | 177 | #define UV_GLOBAL_MMR64_PNODE_BITS(p) \ |
178 | ((unsigned long)(UV_PNODE_TO_GNODE(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) | 178 | (((unsigned long)(p)) << UV_GLOBAL_MMR64_PNODE_SHIFT) |
179 | 179 | ||
180 | #define UV_APIC_PNODE_SHIFT 6 | 180 | #define UV_APIC_PNODE_SHIFT 6 |
181 | 181 | ||
@@ -327,6 +327,7 @@ struct uv_blade_info { | |||
327 | unsigned short nr_possible_cpus; | 327 | unsigned short nr_possible_cpus; |
328 | unsigned short nr_online_cpus; | 328 | unsigned short nr_online_cpus; |
329 | unsigned short pnode; | 329 | unsigned short pnode; |
330 | short memory_nid; | ||
330 | }; | 331 | }; |
331 | extern struct uv_blade_info *uv_blade_info; | 332 | extern struct uv_blade_info *uv_blade_info; |
332 | extern short *uv_node_to_blade; | 333 | extern short *uv_node_to_blade; |
@@ -363,6 +364,12 @@ static inline int uv_blade_to_pnode(int bid) | |||
363 | return uv_blade_info[bid].pnode; | 364 | return uv_blade_info[bid].pnode; |
364 | } | 365 | } |
365 | 366 | ||
367 | /* Nid of memory node on blade. -1 if no blade-local memory */ | ||
368 | static inline int uv_blade_to_memory_nid(int bid) | ||
369 | { | ||
370 | return uv_blade_info[bid].memory_nid; | ||
371 | } | ||
372 | |||
366 | /* Determine the number of possible cpus on a blade */ | 373 | /* Determine the number of possible cpus on a blade */ |
367 | static inline int uv_blade_nr_possible_cpus(int bid) | 374 | static inline int uv_blade_nr_possible_cpus(int bid) |
368 | { | 375 | { |