aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/atomic.h22
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h4
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/mips/include/asm/pgalloc.h33
-rw-r--r--arch/mips/include/asm/pgtable.h3
-rw-r--r--arch/mips/include/asm/ptrace.h5
-rw-r--r--arch/mips/include/asm/switch_to.h4
7 files changed, 20 insertions, 54 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 94096299fc56..9a82dd11c0e9 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -254,10 +254,10 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
254#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) 254#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
255 255
256#define ATOMIC64_OP(op, c_op, asm_op) \ 256#define ATOMIC64_OP(op, c_op, asm_op) \
257static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 257static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
258{ \ 258{ \
259 if (kernel_uses_llsc) { \ 259 if (kernel_uses_llsc) { \
260 long temp; \ 260 s64 temp; \
261 \ 261 \
262 loongson_llsc_mb(); \ 262 loongson_llsc_mb(); \
263 __asm__ __volatile__( \ 263 __asm__ __volatile__( \
@@ -280,12 +280,12 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
280} 280}
281 281
282#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 282#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
283static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ 283static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
284{ \ 284{ \
285 long result; \ 285 s64 result; \
286 \ 286 \
287 if (kernel_uses_llsc) { \ 287 if (kernel_uses_llsc) { \
288 long temp; \ 288 s64 temp; \
289 \ 289 \
290 loongson_llsc_mb(); \ 290 loongson_llsc_mb(); \
291 __asm__ __volatile__( \ 291 __asm__ __volatile__( \
@@ -314,12 +314,12 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
314} 314}
315 315
316#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \ 316#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
317static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ 317static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
318{ \ 318{ \
319 long result; \ 319 s64 result; \
320 \ 320 \
321 if (kernel_uses_llsc) { \ 321 if (kernel_uses_llsc) { \
322 long temp; \ 322 s64 temp; \
323 \ 323 \
324 loongson_llsc_mb(); \ 324 loongson_llsc_mb(); \
325 __asm__ __volatile__( \ 325 __asm__ __volatile__( \
@@ -386,14 +386,14 @@ ATOMIC64_OPS(xor, ^=, xor)
386 * Atomically test @v and subtract @i if @v is greater or equal than @i. 386 * Atomically test @v and subtract @i if @v is greater or equal than @i.
387 * The function returns the old value of @v minus @i. 387 * The function returns the old value of @v minus @i.
388 */ 388 */
389static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) 389static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
390{ 390{
391 long result; 391 s64 result;
392 392
393 smp_mb__before_llsc(); 393 smp_mb__before_llsc();
394 394
395 if (kernel_uses_llsc) { 395 if (kernel_uses_llsc) {
396 long temp; 396 s64 temp;
397 397
398 __asm__ __volatile__( 398 __asm__ __volatile__(
399 " .set push \n" 399 " .set push \n"
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index b8f8af7dc47c..cacf3545e018 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -24,8 +24,8 @@
24#define AR933X_UART_CS_PARITY_S 0 24#define AR933X_UART_CS_PARITY_S 0
25#define AR933X_UART_CS_PARITY_M 0x3 25#define AR933X_UART_CS_PARITY_M 0x3
26#define AR933X_UART_CS_PARITY_NONE 0 26#define AR933X_UART_CS_PARITY_NONE 0
27#define AR933X_UART_CS_PARITY_ODD 1 27#define AR933X_UART_CS_PARITY_ODD 2
28#define AR933X_UART_CS_PARITY_EVEN 2 28#define AR933X_UART_CS_PARITY_EVEN 3
29#define AR933X_UART_CS_IF_MODE_S 2 29#define AR933X_UART_CS_IF_MODE_S 2
30#define AR933X_UART_CS_IF_MODE_M 0x3 30#define AR933X_UART_CS_IF_MODE_M 0x3
31#define AR933X_UART_CS_IF_MODE_NONE 0 31#define AR933X_UART_CS_IF_MODE_NONE 0
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index a25643d258cb..0ba4ce6e2bf3 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -258,9 +258,6 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
258 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 258 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
259 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 259 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
260 260
261#define UNCAC_ADDR(addr) (UNCAC_BASE + __pa(addr))
262#define CAC_ADDR(addr) ((unsigned long)__va((addr) - UNCAC_BASE))
263
264#include <asm-generic/memory_model.h> 261#include <asm-generic/memory_model.h>
265#include <asm-generic/getorder.h> 262#include <asm-generic/getorder.h>
266 263
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 27808d9461f4..aa16b85ddffc 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -13,6 +13,8 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15 15
16#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
17
16static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 18static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
17 pte_t *pte) 19 pte_t *pte)
18{ 20{
@@ -50,37 +52,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
50 free_pages((unsigned long)pgd, PGD_ORDER); 52 free_pages((unsigned long)pgd, PGD_ORDER);
51} 53}
52 54
53static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
54{
55 return (pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, PTE_ORDER);
56}
57
58static inline struct page *pte_alloc_one(struct mm_struct *mm)
59{
60 struct page *pte;
61
62 pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
63 if (!pte)
64 return NULL;
65 clear_highpage(pte);
66 if (!pgtable_page_ctor(pte)) {
67 __free_page(pte);
68 return NULL;
69 }
70 return pte;
71}
72
73static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
74{
75 free_pages((unsigned long)pte, PTE_ORDER);
76}
77
78static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
79{
80 pgtable_page_dtor(pte);
81 __free_pages(pte, PTE_ORDER);
82}
83
84#define __pte_free_tlb(tlb,pte,address) \ 55#define __pte_free_tlb(tlb,pte,address) \
85do { \ 56do { \
86 pgtable_page_dtor(pte); \ 57 pgtable_page_dtor(pte); \
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 4ccb465ef3f2..7d27194e3b45 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -20,6 +20,7 @@
20#include <asm/cmpxchg.h> 20#include <asm/cmpxchg.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/pgtable-bits.h> 22#include <asm/pgtable-bits.h>
23#include <asm/cpu-features.h>
23 24
24struct mm_struct; 25struct mm_struct;
25struct vm_area_struct; 26struct vm_area_struct;
@@ -626,6 +627,8 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
626 627
627#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 628#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
628 629
630#define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
631
629#include <asm-generic/pgtable.h> 632#include <asm-generic/pgtable.h>
630 633
631/* 634/*
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index b6578611dddb..1e76774b36dd 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -56,11 +56,6 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
56 return regs->regs[31]; 56 return regs->regs[31];
57} 57}
58 58
59/*
60 * Don't use asm-generic/ptrace.h it defines FP accessors that don't make
61 * sense on MIPS. We rather want an error if they get invoked.
62 */
63
64static inline void instruction_pointer_set(struct pt_regs *regs, 59static inline void instruction_pointer_set(struct pt_regs *regs,
65 unsigned long val) 60 unsigned long val)
66{ 61{
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 0f813bb753c6..09cbe9042828 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
42 * inline to try to keep the overhead down. If we have been forced to run on 42 * inline to try to keep the overhead down. If we have been forced to run on
43 * a "CPU" with an FPU because of a previous high level of FP computation, 43 * a "CPU" with an FPU because of a previous high level of FP computation,
44 * but did not actually use the FPU during the most recent time-slice (CU1 44 * but did not actually use the FPU during the most recent time-slice (CU1
45 * isn't set), we undo the restriction on cpus_allowed. 45 * isn't set), we undo the restriction on cpus_mask.
46 * 46 *
47 * We're not calling set_cpus_allowed() here, because we have no need to 47 * We're not calling set_cpus_allowed() here, because we have no need to
48 * force prompt migration - we're already switching the current CPU to a 48 * force prompt migration - we're already switching the current CPU to a
@@ -57,7 +57,7 @@ do { \
57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ 57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \ 58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ 59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
60 prev->cpus_allowed = prev->thread.user_cpus_allowed; \ 60 prev->cpus_mask = prev->thread.user_cpus_allowed; \
61 } \ 61 } \
62 next->thread.emulated_fp = 0; \ 62 next->thread.emulated_fp = 0; \
63} while(0) 63} while(0)