diff options
| author | David S. Miller <davem@davemloft.net> | 2019-09-15 08:17:27 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-09-15 08:17:27 -0400 |
| commit | aa2eaa8c272a3211dec07ce9c6c863a7e355c10e (patch) | |
| tree | 8454a23d36b2ff36133c276ee0ba80eabc00850e /arch/powerpc | |
| parent | a3d3c74da49c65fc63a937fa559186b0e16adca3 (diff) | |
| parent | 1609d7604b847a9820e63393d1a3b6cac7286d40 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/kernel/process.c | 21 | ||||
| -rw-r--r-- | arch/powerpc/mm/nohash/tlb.c | 1 |
2 files changed, 4 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8fc4de0d22b4..7a84c9f1778e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) | |||
| 101 | } | 101 | } |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | static bool tm_active_with_fp(struct task_struct *tsk) | ||
| 105 | { | ||
| 106 | return MSR_TM_ACTIVE(tsk->thread.regs->msr) && | ||
| 107 | (tsk->thread.ckpt_regs.msr & MSR_FP); | ||
| 108 | } | ||
| 109 | |||
| 110 | static bool tm_active_with_altivec(struct task_struct *tsk) | ||
| 111 | { | ||
| 112 | return MSR_TM_ACTIVE(tsk->thread.regs->msr) && | ||
| 113 | (tsk->thread.ckpt_regs.msr & MSR_VEC); | ||
| 114 | } | ||
| 115 | #else | 104 | #else |
| 116 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } | 105 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } |
| 117 | static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } | ||
| 118 | static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } | ||
| 119 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 106 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
| 120 | 107 | ||
| 121 | bool strict_msr_control; | 108 | bool strict_msr_control; |
| @@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp); | |||
| 252 | 239 | ||
| 253 | static int restore_fp(struct task_struct *tsk) | 240 | static int restore_fp(struct task_struct *tsk) |
| 254 | { | 241 | { |
| 255 | if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { | 242 | if (tsk->thread.load_fp) { |
| 256 | load_fp_state(¤t->thread.fp_state); | 243 | load_fp_state(¤t->thread.fp_state); |
| 257 | current->thread.load_fp++; | 244 | current->thread.load_fp++; |
| 258 | return 1; | 245 | return 1; |
| @@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | |||
| 334 | 321 | ||
| 335 | static int restore_altivec(struct task_struct *tsk) | 322 | static int restore_altivec(struct task_struct *tsk) |
| 336 | { | 323 | { |
| 337 | if (cpu_has_feature(CPU_FTR_ALTIVEC) && | 324 | if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { |
| 338 | (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { | ||
| 339 | load_vr_state(&tsk->thread.vr_state); | 325 | load_vr_state(&tsk->thread.vr_state); |
| 340 | tsk->thread.used_vr = 1; | 326 | tsk->thread.used_vr = 1; |
| 341 | tsk->thread.load_vec++; | 327 | tsk->thread.load_vec++; |
| @@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk) | |||
| 497 | if (!tsk->thread.regs) | 483 | if (!tsk->thread.regs) |
| 498 | return; | 484 | return; |
| 499 | 485 | ||
| 486 | check_if_tm_restore_required(tsk); | ||
| 487 | |||
| 500 | usermsr = tsk->thread.regs->msr; | 488 | usermsr = tsk->thread.regs->msr; |
| 501 | 489 | ||
| 502 | if ((usermsr & msr_all_available) == 0) | 490 | if ((usermsr & msr_all_available) == 0) |
| 503 | return; | 491 | return; |
| 504 | 492 | ||
| 505 | msr_check_and_set(msr_all_available); | 493 | msr_check_and_set(msr_all_available); |
| 506 | check_if_tm_restore_required(tsk); | ||
| 507 | 494 | ||
| 508 | WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); | 495 | WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); |
| 509 | 496 | ||
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index d4acf6fa0596..bf60983a58c7 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c | |||
| @@ -630,7 +630,6 @@ static void early_init_this_mmu(void) | |||
| 630 | #ifdef CONFIG_PPC_FSL_BOOK3E | 630 | #ifdef CONFIG_PPC_FSL_BOOK3E |
| 631 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { | 631 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { |
| 632 | unsigned int num_cams; | 632 | unsigned int num_cams; |
| 633 | int __maybe_unused cpu = smp_processor_id(); | ||
| 634 | bool map = true; | 633 | bool map = true; |
| 635 | 634 | ||
| 636 | /* use a quarter of the TLBCAM for bolted linear map */ | 635 | /* use a quarter of the TLBCAM for bolted linear map */ |
