diff options
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/preempt.h | 11 |
2 files changed, 20 insertions, 2 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
455 | static inline int pte_accessible(pte_t a) | 455 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
456 | { | 456 | { |
457 | return pte_flags(a) & _PAGE_PRESENT; | 457 | if (pte_flags(a) & _PAGE_PRESENT) |
458 | return true; | ||
459 | |||
460 | if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||
461 | mm_tlb_flush_pending(mm)) | ||
462 | return true; | ||
463 | |||
464 | return false; | ||
458 | } | 465 | } |
459 | 466 | ||
460 | static inline int pte_hidden(pte_t pte) | 467 | static inline int pte_hidden(pte_t pte) |
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723636fd..c8b051933b1b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h | |||
@@ -8,6 +8,12 @@ | |||
8 | DECLARE_PER_CPU(int, __preempt_count); | 8 | DECLARE_PER_CPU(int, __preempt_count); |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | ||
12 | * that a decrement hitting 0 means we can and should reschedule. | ||
13 | */ | ||
14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) | ||
15 | |||
16 | /* | ||
11 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
12 | * that think a non-zero value indicates we cannot preempt. | 18 | * that think a non-zero value indicates we cannot preempt. |
13 | */ | 19 | */ |
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) | |||
74 | __this_cpu_add_4(__preempt_count, -val); | 80 | __this_cpu_add_4(__preempt_count, -val); |
75 | } | 81 | } |
76 | 82 | ||
83 | /* | ||
84 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | ||
85 | * a decrement which hits zero means we have no preempt_count and should | ||
86 | * reschedule. | ||
87 | */ | ||
77 | static __always_inline bool __preempt_count_dec_and_test(void) | 88 | static __always_inline bool __preempt_count_dec_and_test(void) |
78 | { | 89 | { |
79 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | 90 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |