diff options
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/local.h | 2 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 21 |
2 files changed, 12 insertions, 11 deletions
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index dbd6150763e9..fc218444e315 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h | |||
@@ -42,7 +42,7 @@ typedef struct | |||
42 | 42 | ||
43 | #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) | 43 | #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) |
44 | #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) | 44 | #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) |
45 | #define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u)) | 45 | #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) |
46 | #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) | 46 | #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) |
47 | 47 | ||
48 | /* Non-atomic variants, ie. preemption disabled and won't be touched | 48 | /* Non-atomic variants, ie. preemption disabled and won't be touched |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 8e6d0ca70aba..e410f602cab1 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -280,17 +280,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
280 | #endif | 280 | #endif |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * A facility to provide batching of the reload of page tables with the | 283 | * A facility to provide batching of the reload of page tables and |
284 | * actual context switch code for paravirtualized guests. By convention, | 284 | * other process state with the actual context switch code for |
285 | * only one of the lazy modes (CPU, MMU) should be active at any given | 285 | * paravirtualized guests. By convention, only one of the batched |
286 | * time, entry should never be nested, and entry and exits should always | 286 | * update (lazy) modes (CPU, MMU) should be active at any given time, |
287 | * be paired. This is for sanity of maintaining and reasoning about the | 287 | * entry should never be nested, and entry and exits should always be |
288 | * kernel code. | 288 | * paired. This is for sanity of maintaining and reasoning about the |
289 | * kernel code. In this case, the exit (end of the context switch) is | ||
290 | * in architecture-specific code, and so doesn't need a generic | ||
291 | * definition. | ||
289 | */ | 292 | */ |
290 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 293 | #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
291 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 294 | #define arch_start_context_switch(prev) do {} while (0) |
292 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
293 | #define arch_flush_lazy_cpu_mode() do {} while (0) | ||
294 | #endif | 295 | #endif |
295 | 296 | ||
296 | #ifndef __HAVE_PFNMAP_TRACKING | 297 | #ifndef __HAVE_PFNMAP_TRACKING |