diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-13 19:12:23 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-02-13 19:12:23 -0500 |
commit | 463020ce428e2f00d4f33a383d6f39c7453a6854 (patch) | |
tree | c82d90c19e83c32b01c9748b4671640a670324e6 /arch/mips | |
parent | 58a3bb59973e33a428d72fa530a3d1d81feb0e8f (diff) | |
parent | 431dc8040354db65e4f8d4d4e21ae4fab41f5bc3 (diff) |
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
[MIPS] Fix sigset_t endianess swapping issues in 32-bit compat code.
[MIPS] Fix uniprocessor Sibyte builds.
[MIPS] Make entry.S a little more readable.
[MIPS] Remove stray instruction from __get_user_asm_ll32.
[MIPS] 32-bit: Fix warning about cast for fetching pointer from userspace.
[MIPS] DECstation: Fix irq handling
[MIPS] signals: make common _BLOCKABLE macro
[MIPS] signal: Move sigframe definition for native O32/N64 into signal.c
[MIPS] signal: Move {restore,setup}_sigcontext prototypes to their user
[MIPS] signal: Fix warnings in o32 compat code.
[MIPS] IP27: Enable N32 support in defconfig.
Revert "[MIPS] Fix warning in get_user when fetching pointer object from userspace."
[MIPS] Don't claim we support dma_declare_coherent_memory - we don't.
[MIPS] Unify dma-{coherent,noncoherent.ip27,ip32}
[MIPS] Improve branch prediction in ll/sc atomic operations.
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 5 | ||||
-rw-r--r-- | arch/mips/configs/ip27_defconfig | 2 | ||||
-rw-r--r-- | arch/mips/dec/int-handler.S | 4 | ||||
-rw-r--r-- | arch/mips/kernel/entry.S | 19 | ||||
-rw-r--r-- | arch/mips/kernel/linux32.c | 47 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-64.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-n32.S | 2 | ||||
-rw-r--r-- | arch/mips/kernel/signal-common.h | 32 | ||||
-rw-r--r-- | arch/mips/kernel/signal.c | 22 | ||||
-rw-r--r-- | arch/mips/kernel/signal32.c | 39 | ||||
-rw-r--r-- | arch/mips/kernel/signal_n32.c | 11 | ||||
-rw-r--r-- | arch/mips/mm/Makefile | 14 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/dma-coherent.c | 254 | ||||
-rw-r--r-- | arch/mips/mm/dma-default.c (renamed from arch/mips/mm/dma-noncoherent.c) | 209 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip27.c | 257 | ||||
-rw-r--r-- | arch/mips/mm/dma-ip32.c | 383 | ||||
-rw-r--r-- | arch/mips/pci/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/pci/pci-dac.c | 79 |
19 files changed, 305 insertions, 1084 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5fe195a41a80..a92ce6bd7cf1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -598,8 +598,6 @@ config SGI_IP32 | |||
598 | select ARC | 598 | select ARC |
599 | select ARC32 | 599 | select ARC32 |
600 | select BOOT_ELF32 | 600 | select BOOT_ELF32 |
601 | select OWN_DMA | ||
602 | select DMA_IP32 | ||
603 | select DMA_NONCOHERENT | 601 | select DMA_NONCOHERENT |
604 | select HW_HAS_PCI | 602 | select HW_HAS_PCI |
605 | select R5000_CPU_SCACHE | 603 | select R5000_CPU_SCACHE |
@@ -883,9 +881,6 @@ config DMA_NONCOHERENT | |||
883 | config DMA_NEED_PCI_MAP_STATE | 881 | config DMA_NEED_PCI_MAP_STATE |
884 | bool | 882 | bool |
885 | 883 | ||
886 | config OWN_DMA | ||
887 | bool | ||
888 | |||
889 | config EARLY_PRINTK | 884 | config EARLY_PRINTK |
890 | bool | 885 | bool |
891 | 886 | ||
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 96090f28373b..f21186c12d81 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig | |||
@@ -264,7 +264,7 @@ CONFIG_BINFMT_ELF=y | |||
264 | CONFIG_MIPS32_COMPAT=y | 264 | CONFIG_MIPS32_COMPAT=y |
265 | CONFIG_COMPAT=y | 265 | CONFIG_COMPAT=y |
266 | CONFIG_MIPS32_O32=y | 266 | CONFIG_MIPS32_O32=y |
267 | # CONFIG_MIPS32_N32 is not set | 267 | CONFIG_MIPS32_N32=y |
268 | CONFIG_BINFMT_ELF32=y | 268 | CONFIG_BINFMT_ELF32=y |
269 | 269 | ||
270 | # | 270 | # |
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index b251ef864c33..00cecdcc75f2 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S | |||
@@ -264,9 +264,6 @@ | |||
264 | srlv t3,t1,t2 | 264 | srlv t3,t1,t2 |
265 | 265 | ||
266 | handle_it: | 266 | handle_it: |
267 | LONG_L s0, TI_REGS($28) | ||
268 | LONG_S sp, TI_REGS($28) | ||
269 | PTR_LA ra, ret_from_irq | ||
270 | j dec_irq_dispatch | 267 | j dec_irq_dispatch |
271 | nop | 268 | nop |
272 | 269 | ||
@@ -277,7 +274,6 @@ fpu: | |||
277 | #endif | 274 | #endif |
278 | 275 | ||
279 | spurious: | 276 | spurious: |
280 | PTR_LA ra, _ret_from_irq | ||
281 | j spurious_interrupt | 277 | j spurious_interrupt |
282 | nop | 278 | nop |
283 | END(plat_irq_dispatch) | 279 | END(plat_irq_dispatch) |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index f10b6a19f8bf..0b78fcbf044a 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -21,24 +21,21 @@ | |||
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | #ifndef CONFIG_PREEMPT | 23 | #ifndef CONFIG_PREEMPT |
24 | .macro preempt_stop | ||
25 | local_irq_disable | ||
26 | .endm | ||
27 | #define resume_kernel restore_all | 24 | #define resume_kernel restore_all |
25 | #else | ||
26 | #define __ret_from_irq ret_from_exception | ||
28 | #endif | 27 | #endif |
29 | 28 | ||
30 | .text | 29 | .text |
31 | .align 5 | 30 | .align 5 |
32 | FEXPORT(ret_from_irq) | 31 | #ifndef CONFIG_PREEMPT |
33 | LONG_S s0, TI_REGS($28) | ||
34 | #ifdef CONFIG_PREEMPT | ||
35 | FEXPORT(ret_from_exception) | ||
36 | #else | ||
37 | b _ret_from_irq | ||
38 | FEXPORT(ret_from_exception) | 32 | FEXPORT(ret_from_exception) |
39 | preempt_stop | 33 | local_irq_disable # preempt stop |
34 | b __ret_from_irq | ||
40 | #endif | 35 | #endif |
41 | FEXPORT(_ret_from_irq) | 36 | FEXPORT(ret_from_irq) |
37 | LONG_S s0, TI_REGS($28) | ||
38 | FEXPORT(__ret_from_irq) | ||
42 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | 39 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? |
43 | andi t0, t0, KU_USER | 40 | andi t0, t0, KU_USER |
44 | beqz t0, resume_kernel | 41 | beqz t0, resume_kernel |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index ca7ad78f4def..fc4dd6c9dd80 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <net/sock.h> | 39 | #include <net/sock.h> |
40 | #include <net/scm.h> | 40 | #include <net/scm.h> |
41 | 41 | ||
42 | #include <asm/compat-signal.h> | ||
42 | #include <asm/ipc.h> | 43 | #include <asm/ipc.h> |
43 | #include <asm/sim.h> | 44 | #include <asm/sim.h> |
44 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -736,3 +737,49 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs) | |||
736 | return do_fork(clone_flags, newsp, ®s, 0, | 737 | return do_fork(clone_flags, newsp, ®s, 0, |
737 | parent_tidptr, child_tidptr); | 738 | parent_tidptr, child_tidptr); |
738 | } | 739 | } |
740 | |||
741 | /* | ||
742 | * Implement the event wait interface for the eventpoll file. It is the kernel | ||
743 | * part of the user space epoll_pwait(2). | ||
744 | */ | ||
745 | asmlinkage long compat_sys_epoll_pwait(int epfd, | ||
746 | struct epoll_event __user *events, int maxevents, int timeout, | ||
747 | const compat_sigset_t __user *sigmask, size_t sigsetsize) | ||
748 | { | ||
749 | int error; | ||
750 | sigset_t ksigmask, sigsaved; | ||
751 | |||
752 | /* | ||
753 | * If the caller wants a certain signal mask to be set during the wait, | ||
754 | * we apply it here. | ||
755 | */ | ||
756 | if (sigmask) { | ||
757 | if (sigsetsize != sizeof(sigset_t)) | ||
758 | return -EINVAL; | ||
759 | if (!access_ok(VERIFY_READ, sigmask, sizeof(ksigmask))) | ||
760 | return -EFAULT; | ||
761 | if (__copy_conv_sigset_from_user(&ksigmask, sigmask)) | ||
762 | return -EFAULT; | ||
763 | sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | ||
764 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | ||
765 | } | ||
766 | |||
767 | error = sys_epoll_wait(epfd, events, maxevents, timeout); | ||
768 | |||
769 | /* | ||
770 | * If we changed the signal mask, we need to restore the original one. | ||
771 | * In case we've got a signal while waiting, we do not restore the | ||
772 | * signal mask yet, and we allow do_signal() to deliver the signal on | ||
773 | * the way back to userspace, before the signal mask is restored. | ||
774 | */ | ||
775 | if (sigmask) { | ||
776 | if (error == -EINTR) { | ||
777 | memcpy(¤t->saved_sigmask, &sigsaved, | ||
778 | sizeof(sigsaved)); | ||
779 | set_thread_flag(TIF_RESTORE_SIGMASK); | ||
780 | } else | ||
781 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | ||
782 | } | ||
783 | |||
784 | return error; | ||
785 | } | ||
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index e569b846e9a3..10e9a18630aa 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -470,4 +470,4 @@ sys_call_table: | |||
470 | PTR sys_get_robust_list | 470 | PTR sys_get_robust_list |
471 | PTR sys_kexec_load /* 5270 */ | 471 | PTR sys_kexec_load /* 5270 */ |
472 | PTR sys_getcpu | 472 | PTR sys_getcpu |
473 | PTR sys_epoll_pwait | 473 | PTR compat_sys_epoll_pwait |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index ee8802b59758..2ceda4644a4d 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -396,4 +396,4 @@ EXPORT(sysn32_call_table) | |||
396 | PTR compat_sys_get_robust_list | 396 | PTR compat_sys_get_robust_list |
397 | PTR compat_sys_kexec_load | 397 | PTR compat_sys_kexec_load |
398 | PTR sys_getcpu | 398 | PTR sys_getcpu |
399 | PTR sys_epoll_pwait | 399 | PTR compat_sys_epoll_pwait |
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h index 9a8abd67ec5c..fdbdbdc65b54 100644 --- a/arch/mips/kernel/signal-common.h +++ b/arch/mips/kernel/signal-common.h | |||
@@ -19,37 +19,7 @@ | |||
19 | # define DEBUGP(fmt, args...) | 19 | # define DEBUGP(fmt, args...) |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | /* | 22 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
23 | * Horribly complicated - with the bloody RM9000 workarounds enabled | ||
24 | * the signal trampolines is moving to the end of the structure so we can | ||
25 | * increase the alignment without breaking software compatibility. | ||
26 | */ | ||
27 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | ||
28 | |||
29 | struct sigframe { | ||
30 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
31 | u32 sf_code[2]; /* signal trampoline */ | ||
32 | struct sigcontext sf_sc; | ||
33 | sigset_t sf_mask; | ||
34 | }; | ||
35 | |||
36 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | ||
37 | |||
38 | struct sigframe { | ||
39 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
40 | u32 sf_pad[2]; | ||
41 | struct sigcontext sf_sc; /* hw context */ | ||
42 | sigset_t sf_mask; | ||
43 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
44 | }; | ||
45 | |||
46 | #endif /* !ICACHE_REFILLS_WORKAROUND_WAR */ | ||
47 | |||
48 | /* | ||
49 | * handle hardware context | ||
50 | */ | ||
51 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
52 | extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
53 | 23 | ||
54 | /* | 24 | /* |
55 | * Determine which stack to use.. | 25 | * Determine which stack to use.. |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 54398af2371f..b2e9ab1bb101 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -34,10 +34,20 @@ | |||
34 | 34 | ||
35 | #include "signal-common.h" | 35 | #include "signal-common.h" |
36 | 36 | ||
37 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 37 | /* |
38 | 38 | * Horribly complicated - with the bloody RM9000 workarounds enabled | |
39 | * the signal trampolines is moving to the end of the structure so we can | ||
40 | * increase the alignment without breaking software compatibility. | ||
41 | */ | ||
39 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | 42 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 |
40 | 43 | ||
44 | struct sigframe { | ||
45 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
46 | u32 sf_code[2]; /* signal trampoline */ | ||
47 | struct sigcontext sf_sc; | ||
48 | sigset_t sf_mask; | ||
49 | }; | ||
50 | |||
41 | struct rt_sigframe { | 51 | struct rt_sigframe { |
42 | u32 rs_ass[4]; /* argument save space for o32 */ | 52 | u32 rs_ass[4]; /* argument save space for o32 */ |
43 | u32 rs_code[2]; /* signal trampoline */ | 53 | u32 rs_code[2]; /* signal trampoline */ |
@@ -47,6 +57,14 @@ struct rt_sigframe { | |||
47 | 57 | ||
48 | #else | 58 | #else |
49 | 59 | ||
60 | struct sigframe { | ||
61 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
62 | u32 sf_pad[2]; | ||
63 | struct sigcontext sf_sc; /* hw context */ | ||
64 | sigset_t sf_mask; | ||
65 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
66 | }; | ||
67 | |||
50 | struct rt_sigframe { | 68 | struct rt_sigframe { |
51 | u32 rs_ass[4]; /* argument save space for o32 */ | 69 | u32 rs_ass[4]; /* argument save space for o32 */ |
52 | u32 rs_pad[2]; | 70 | u32 rs_pad[2]; |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 183fc7e55f34..c28cb21514c8 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/cache.h> | 10 | #include <linux/cache.h> |
11 | #include <linux/compat.h> | ||
11 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
@@ -24,6 +25,7 @@ | |||
24 | 25 | ||
25 | #include <asm/abi.h> | 26 | #include <asm/abi.h> |
26 | #include <asm/asm.h> | 27 | #include <asm/asm.h> |
28 | #include <asm/compat-signal.h> | ||
27 | #include <linux/bitops.h> | 29 | #include <linux/bitops.h> |
28 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
29 | #include <asm/sim.h> | 31 | #include <asm/sim.h> |
@@ -104,8 +106,6 @@ typedef struct compat_siginfo { | |||
104 | #define __NR_O32_rt_sigreturn 4193 | 106 | #define __NR_O32_rt_sigreturn 4193 |
105 | #define __NR_O32_restart_syscall 4253 | 107 | #define __NR_O32_restart_syscall 4253 |
106 | 108 | ||
107 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
108 | |||
109 | /* 32-bit compatibility types */ | 109 | /* 32-bit compatibility types */ |
110 | 110 | ||
111 | #define _NSIG_BPW32 32 | 111 | #define _NSIG_BPW32 32 |
@@ -139,8 +139,20 @@ struct ucontext32 { | |||
139 | sigset_t32 uc_sigmask; /* mask last for extensibility */ | 139 | sigset_t32 uc_sigmask; /* mask last for extensibility */ |
140 | }; | 140 | }; |
141 | 141 | ||
142 | /* | ||
143 | * Horribly complicated - with the bloody RM9000 workarounds enabled | ||
144 | * the signal trampolines is moving to the end of the structure so we can | ||
145 | * increase the alignment without breaking software compatibility. | ||
146 | */ | ||
142 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | 147 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 |
143 | 148 | ||
149 | struct sigframe32 { | ||
150 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
151 | u32 sf_code[2]; /* signal trampoline */ | ||
152 | struct sigcontext32 sf_sc; | ||
153 | sigset_t sf_mask; | ||
154 | }; | ||
155 | |||
144 | struct rt_sigframe32 { | 156 | struct rt_sigframe32 { |
145 | u32 rs_ass[4]; /* argument save space for o32 */ | 157 | u32 rs_ass[4]; /* argument save space for o32 */ |
146 | u32 rs_code[2]; /* signal trampoline */ | 158 | u32 rs_code[2]; /* signal trampoline */ |
@@ -150,6 +162,14 @@ struct rt_sigframe32 { | |||
150 | 162 | ||
151 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ | 163 | #else /* ICACHE_REFILLS_WORKAROUND_WAR */ |
152 | 164 | ||
165 | struct sigframe32 { | ||
166 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
167 | u32 sf_pad[2]; | ||
168 | struct sigcontext32 sf_sc; /* hw context */ | ||
169 | sigset_t sf_mask; | ||
170 | u32 sf_code[8] ____cacheline_aligned; /* signal trampoline */ | ||
171 | }; | ||
172 | |||
153 | struct rt_sigframe32 { | 173 | struct rt_sigframe32 { |
154 | u32 rs_ass[4]; /* argument save space for o32 */ | 174 | u32 rs_ass[4]; /* argument save space for o32 */ |
155 | u32 rs_pad[2]; | 175 | u32 rs_pad[2]; |
@@ -493,13 +513,13 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | |||
493 | 513 | ||
494 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) | 514 | asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) |
495 | { | 515 | { |
496 | struct sigframe __user *frame; | 516 | struct sigframe32 __user *frame; |
497 | sigset_t blocked; | 517 | sigset_t blocked; |
498 | 518 | ||
499 | frame = (struct sigframe __user *) regs.regs[29]; | 519 | frame = (struct sigframe32 __user *) regs.regs[29]; |
500 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 520 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
501 | goto badframe; | 521 | goto badframe; |
502 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) | 522 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) |
503 | goto badframe; | 523 | goto badframe; |
504 | 524 | ||
505 | sigdelsetmask(&blocked, ~_BLOCKABLE); | 525 | sigdelsetmask(&blocked, ~_BLOCKABLE); |
@@ -536,7 +556,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
536 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; | 556 | frame = (struct rt_sigframe32 __user *) regs.regs[29]; |
537 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 557 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
538 | goto badframe; | 558 | goto badframe; |
539 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) | 559 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) |
540 | goto badframe; | 560 | goto badframe; |
541 | 561 | ||
542 | sigdelsetmask(&set, ~_BLOCKABLE); | 562 | sigdelsetmask(&set, ~_BLOCKABLE); |
@@ -581,7 +601,7 @@ badframe: | |||
581 | int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | 601 | int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, |
582 | int signr, sigset_t *set) | 602 | int signr, sigset_t *set) |
583 | { | 603 | { |
584 | struct sigframe __user *frame; | 604 | struct sigframe32 __user *frame; |
585 | int err = 0; | 605 | int err = 0; |
586 | 606 | ||
587 | frame = get_sigframe(ka, regs, sizeof(*frame)); | 607 | frame = get_sigframe(ka, regs, sizeof(*frame)); |
@@ -591,7 +611,8 @@ int setup_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
591 | err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn); | 611 | err |= install_sigtramp(frame->sf_code, __NR_O32_sigreturn); |
592 | 612 | ||
593 | err |= setup_sigcontext32(regs, &frame->sf_sc); | 613 | err |= setup_sigcontext32(regs, &frame->sf_sc); |
594 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | 614 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); |
615 | |||
595 | if (err) | 616 | if (err) |
596 | goto give_sigsegv; | 617 | goto give_sigsegv; |
597 | 618 | ||
@@ -650,7 +671,7 @@ int setup_rt_frame_32(struct k_sigaction * ka, struct pt_regs *regs, | |||
650 | err |= __put_user(current->sas_ss_size, | 671 | err |= __put_user(current->sas_ss_size, |
651 | &frame->rs_uc.uc_stack.ss_size); | 672 | &frame->rs_uc.uc_stack.ss_size); |
652 | err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); | 673 | err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); |
653 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | 674 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); |
654 | 675 | ||
655 | if (err) | 676 | if (err) |
656 | goto give_sigsegv; | 677 | goto give_sigsegv; |
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 57456e6a0c62..7ca2a078841f 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <asm/asm.h> | 32 | #include <asm/asm.h> |
33 | #include <asm/cacheflush.h> | 33 | #include <asm/cacheflush.h> |
34 | #include <asm/compat-signal.h> | ||
34 | #include <asm/sim.h> | 35 | #include <asm/sim.h> |
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include <asm/ucontext.h> | 37 | #include <asm/ucontext.h> |
@@ -47,7 +48,9 @@ | |||
47 | #define __NR_N32_rt_sigreturn 6211 | 48 | #define __NR_N32_rt_sigreturn 6211 |
48 | #define __NR_N32_restart_syscall 6214 | 49 | #define __NR_N32_restart_syscall 6214 |
49 | 50 | ||
50 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 51 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); |
52 | extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
53 | |||
51 | 54 | ||
52 | /* IRIX compatible stack_t */ | 55 | /* IRIX compatible stack_t */ |
53 | typedef struct sigaltstack32 { | 56 | typedef struct sigaltstack32 { |
@@ -61,7 +64,7 @@ struct ucontextn32 { | |||
61 | s32 uc_link; | 64 | s32 uc_link; |
62 | stack32_t uc_stack; | 65 | stack32_t uc_stack; |
63 | struct sigcontext uc_mcontext; | 66 | struct sigcontext uc_mcontext; |
64 | sigset_t uc_sigmask; /* mask last for extensibility */ | 67 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ |
65 | }; | 68 | }; |
66 | 69 | ||
67 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 | 70 | #if ICACHE_REFILLS_WORKAROUND_WAR == 0 |
@@ -127,7 +130,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) | |||
127 | frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; | 130 | frame = (struct rt_sigframe_n32 __user *) regs.regs[29]; |
128 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | 131 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) |
129 | goto badframe; | 132 | goto badframe; |
130 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) | 133 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) |
131 | goto badframe; | 134 | goto badframe; |
132 | 135 | ||
133 | sigdelsetmask(&set, ~_BLOCKABLE); | 136 | sigdelsetmask(&set, ~_BLOCKABLE); |
@@ -193,7 +196,7 @@ int setup_rt_frame_n32(struct k_sigaction * ka, | |||
193 | err |= __put_user(current->sas_ss_size, | 196 | err |= __put_user(current->sas_ss_size, |
194 | &frame->rs_uc.uc_stack.ss_size); | 197 | &frame->rs_uc.uc_stack.ss_size); |
195 | err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); | 198 | err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); |
196 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | 199 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); |
197 | 200 | ||
198 | if (err) | 201 | if (err) |
199 | goto give_sigsegv; | 202 | goto give_sigsegv; |
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 19e41fd186c4..de5727385bc6 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile | |||
@@ -2,8 +2,8 @@ | |||
2 | # Makefile for the Linux/MIPS-specific parts of the memory manager. | 2 | # Makefile for the Linux/MIPS-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += cache.o extable.o fault.o init.o pgtable.o \ | 5 | obj-y += cache.o dma-default.o extable.o fault.o \ |
6 | tlbex.o tlbex-fault.o | 6 | init.o pgtable.o tlbex.o tlbex-fault.o |
7 | 7 | ||
8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o | 8 | obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o |
9 | obj-$(CONFIG_64BIT) += pgtable-64.o | 9 | obj-$(CONFIG_64BIT) += pgtable-64.o |
@@ -32,14 +32,4 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o | |||
32 | obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o | 32 | obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o |
33 | obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o | 33 | obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o |
34 | 34 | ||
35 | # | ||
36 | # Choose one DMA coherency model | ||
37 | # | ||
38 | ifndef CONFIG_OWN_DMA | ||
39 | obj-$(CONFIG_DMA_COHERENT) += dma-coherent.o | ||
40 | obj-$(CONFIG_DMA_NONCOHERENT) += dma-noncoherent.o | ||
41 | endif | ||
42 | obj-$(CONFIG_DMA_IP27) += dma-ip27.o | ||
43 | obj-$(CONFIG_DMA_IP32) += dma-ip32.o | ||
44 | |||
45 | EXTRA_AFLAGS := $(CFLAGS) | 35 | EXTRA_AFLAGS := $(CFLAGS) |
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index 3a8afd47feaa..9ea460b16bda 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c | |||
@@ -259,6 +259,12 @@ static void sb1_flush_cache_data_page(unsigned long addr) | |||
259 | on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); | 259 | on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); |
260 | } | 260 | } |
261 | #else | 261 | #else |
262 | |||
263 | static void local_sb1_flush_cache_data_page(unsigned long addr) | ||
264 | { | ||
265 | __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); | ||
266 | } | ||
267 | |||
262 | void sb1_flush_cache_data_page(unsigned long) | 268 | void sb1_flush_cache_data_page(unsigned long) |
263 | __attribute__((alias("local_sb1_flush_cache_data_page"))); | 269 | __attribute__((alias("local_sb1_flush_cache_data_page"))); |
264 | #endif | 270 | #endif |
diff --git a/arch/mips/mm/dma-coherent.c b/arch/mips/mm/dma-coherent.c deleted file mode 100644 index 5697c6e250a3..000000000000 --- a/arch/mips/mm/dma-coherent.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/string.h> | ||
15 | |||
16 | #include <asm/cache.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
20 | dma_addr_t * dma_handle, gfp_t gfp) | ||
21 | { | ||
22 | void *ret; | ||
23 | /* ignore region specifiers */ | ||
24 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
25 | |||
26 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
27 | gfp |= GFP_DMA; | ||
28 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
29 | |||
30 | if (ret != NULL) { | ||
31 | memset(ret, 0, size); | ||
32 | *dma_handle = virt_to_phys(ret); | ||
33 | } | ||
34 | |||
35 | return ret; | ||
36 | } | ||
37 | |||
38 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
39 | |||
40 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
41 | dma_addr_t * dma_handle, gfp_t gfp) | ||
42 | __attribute__((alias("dma_alloc_noncoherent"))); | ||
43 | |||
44 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
45 | |||
46 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
47 | dma_addr_t dma_handle) | ||
48 | { | ||
49 | unsigned long addr = (unsigned long) vaddr; | ||
50 | |||
51 | free_pages(addr, get_order(size)); | ||
52 | } | ||
53 | |||
54 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
55 | |||
56 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
57 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | ||
58 | |||
59 | EXPORT_SYMBOL(dma_free_coherent); | ||
60 | |||
61 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
62 | enum dma_data_direction direction) | ||
63 | { | ||
64 | BUG_ON(direction == DMA_NONE); | ||
65 | |||
66 | return __pa(ptr); | ||
67 | } | ||
68 | |||
69 | EXPORT_SYMBOL(dma_map_single); | ||
70 | |||
71 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
72 | enum dma_data_direction direction) | ||
73 | { | ||
74 | BUG_ON(direction == DMA_NONE); | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(dma_unmap_single); | ||
78 | |||
79 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
80 | enum dma_data_direction direction) | ||
81 | { | ||
82 | int i; | ||
83 | |||
84 | BUG_ON(direction == DMA_NONE); | ||
85 | |||
86 | for (i = 0; i < nents; i++, sg++) { | ||
87 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) + sg->offset; | ||
88 | } | ||
89 | |||
90 | return nents; | ||
91 | } | ||
92 | |||
93 | EXPORT_SYMBOL(dma_map_sg); | ||
94 | |||
95 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
96 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
97 | { | ||
98 | BUG_ON(direction == DMA_NONE); | ||
99 | |||
100 | return page_to_phys(page) + offset; | ||
101 | } | ||
102 | |||
103 | EXPORT_SYMBOL(dma_map_page); | ||
104 | |||
105 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
106 | enum dma_data_direction direction) | ||
107 | { | ||
108 | BUG_ON(direction == DMA_NONE); | ||
109 | } | ||
110 | |||
111 | EXPORT_SYMBOL(dma_unmap_page); | ||
112 | |||
113 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
114 | enum dma_data_direction direction) | ||
115 | { | ||
116 | BUG_ON(direction == DMA_NONE); | ||
117 | } | ||
118 | |||
119 | EXPORT_SYMBOL(dma_unmap_sg); | ||
120 | |||
121 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
122 | size_t size, enum dma_data_direction direction) | ||
123 | { | ||
124 | BUG_ON(direction == DMA_NONE); | ||
125 | } | ||
126 | |||
127 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
128 | |||
129 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
130 | size_t size, enum dma_data_direction direction) | ||
131 | { | ||
132 | BUG_ON(direction == DMA_NONE); | ||
133 | } | ||
134 | |||
135 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
136 | |||
137 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
138 | unsigned long offset, size_t size, | ||
139 | enum dma_data_direction direction) | ||
140 | { | ||
141 | BUG_ON(direction == DMA_NONE); | ||
142 | } | ||
143 | |||
144 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
145 | |||
146 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
147 | unsigned long offset, size_t size, | ||
148 | enum dma_data_direction direction) | ||
149 | { | ||
150 | BUG_ON(direction == DMA_NONE); | ||
151 | } | ||
152 | |||
153 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
154 | |||
155 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
156 | enum dma_data_direction direction) | ||
157 | { | ||
158 | BUG_ON(direction == DMA_NONE); | ||
159 | } | ||
160 | |||
161 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
162 | |||
163 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
164 | enum dma_data_direction direction) | ||
165 | { | ||
166 | BUG_ON(direction == DMA_NONE); | ||
167 | } | ||
168 | |||
169 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
170 | |||
171 | int dma_mapping_error(dma_addr_t dma_addr) | ||
172 | { | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dma_mapping_error); | ||
177 | |||
178 | int dma_supported(struct device *dev, u64 mask) | ||
179 | { | ||
180 | /* | ||
181 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
182 | * so we can't guarantee allocations that must be | ||
183 | * within a tighter range than GFP_DMA.. | ||
184 | */ | ||
185 | if (mask < 0x00ffffff) | ||
186 | return 0; | ||
187 | |||
188 | return 1; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_supported); | ||
192 | |||
193 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
194 | { | ||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(dma_is_consistent); | ||
199 | |||
200 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | BUG_ON(direction == DMA_NONE); | ||
204 | } | ||
205 | |||
206 | EXPORT_SYMBOL(dma_cache_sync); | ||
207 | |||
208 | /* The DAC routines are a PCIism.. */ | ||
209 | |||
210 | #ifdef CONFIG_PCI | ||
211 | |||
212 | #include <linux/pci.h> | ||
213 | |||
214 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
215 | struct page *page, unsigned long offset, int direction) | ||
216 | { | ||
217 | return (dma64_addr_t)page_to_phys(page) + offset; | ||
218 | } | ||
219 | |||
220 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
221 | |||
222 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
223 | dma64_addr_t dma_addr) | ||
224 | { | ||
225 | return mem_map + (dma_addr >> PAGE_SHIFT); | ||
226 | } | ||
227 | |||
228 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
229 | |||
230 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
231 | dma64_addr_t dma_addr) | ||
232 | { | ||
233 | return dma_addr & ~PAGE_MASK; | ||
234 | } | ||
235 | |||
236 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
237 | |||
238 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
239 | dma64_addr_t dma_addr, size_t len, int direction) | ||
240 | { | ||
241 | BUG_ON(direction == PCI_DMA_NONE); | ||
242 | } | ||
243 | |||
244 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
245 | |||
246 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
247 | dma64_addr_t dma_addr, size_t len, int direction) | ||
248 | { | ||
249 | BUG_ON(direction == PCI_DMA_NONE); | ||
250 | } | ||
251 | |||
252 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
253 | |||
254 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-default.c index 8cecef0957c3..4a32e939698f 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-default.c | |||
@@ -4,28 +4,39 @@ | |||
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | 6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | 7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> |
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | 8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
9 | */ | 9 | */ |
10 | |||
10 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/string.h> | 15 | #include <linux/string.h> |
14 | #include <linux/dma-mapping.h> | ||
15 | 16 | ||
16 | #include <asm/cache.h> | 17 | #include <asm/cache.h> |
17 | #include <asm/io.h> | 18 | #include <asm/io.h> |
18 | 19 | ||
20 | #include <dma-coherence.h> | ||
21 | |||
19 | /* | 22 | /* |
20 | * Warning on the terminology - Linux calls an uncached area coherent; | 23 | * Warning on the terminology - Linux calls an uncached area coherent; |
21 | * MIPS terminology calls memory areas with hardware maintained coherency | 24 | * MIPS terminology calls memory areas with hardware maintained coherency |
22 | * coherent. | 25 | * coherent. |
23 | */ | 26 | */ |
24 | 27 | ||
28 | static inline int cpu_is_noncoherent_r10000(struct device *dev) | ||
29 | { | ||
30 | return !plat_device_is_coherent(dev) && | ||
31 | (current_cpu_data.cputype == CPU_R10000 && | ||
32 | current_cpu_data.cputype == CPU_R12000); | ||
33 | } | ||
34 | |||
25 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | 35 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
26 | dma_addr_t * dma_handle, gfp_t gfp) | 36 | dma_addr_t * dma_handle, gfp_t gfp) |
27 | { | 37 | { |
28 | void *ret; | 38 | void *ret; |
39 | |||
29 | /* ignore region specifiers */ | 40 | /* ignore region specifiers */ |
30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | 41 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); |
31 | 42 | ||
@@ -35,7 +46,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size, | |||
35 | 46 | ||
36 | if (ret != NULL) { | 47 | if (ret != NULL) { |
37 | memset(ret, 0, size); | 48 | memset(ret, 0, size); |
38 | *dma_handle = virt_to_phys(ret); | 49 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
39 | } | 50 | } |
40 | 51 | ||
41 | return ret; | 52 | return ret; |
@@ -48,10 +59,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size, | |||
48 | { | 59 | { |
49 | void *ret; | 60 | void *ret; |
50 | 61 | ||
51 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | 62 | /* ignore region specifiers */ |
63 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
64 | |||
65 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
66 | gfp |= GFP_DMA; | ||
67 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
68 | |||
52 | if (ret) { | 69 | if (ret) { |
53 | dma_cache_wback_inv((unsigned long) ret, size); | 70 | memset(ret, 0, size); |
54 | ret = UNCAC_ADDR(ret); | 71 | *dma_handle = plat_map_dma_mem(dev, ret, size); |
72 | |||
73 | if (!plat_device_is_coherent(dev)) { | ||
74 | dma_cache_wback_inv((unsigned long) ret, size); | ||
75 | ret = UNCAC_ADDR(ret); | ||
76 | } | ||
55 | } | 77 | } |
56 | 78 | ||
57 | return ret; | 79 | return ret; |
@@ -72,7 +94,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
72 | { | 94 | { |
73 | unsigned long addr = (unsigned long) vaddr; | 95 | unsigned long addr = (unsigned long) vaddr; |
74 | 96 | ||
75 | addr = CAC_ADDR(addr); | 97 | if (!plat_device_is_coherent(dev)) |
98 | addr = CAC_ADDR(addr); | ||
99 | |||
76 | free_pages(addr, get_order(size)); | 100 | free_pages(addr, get_order(size)); |
77 | } | 101 | } |
78 | 102 | ||
@@ -104,9 +128,10 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |||
104 | { | 128 | { |
105 | unsigned long addr = (unsigned long) ptr; | 129 | unsigned long addr = (unsigned long) ptr; |
106 | 130 | ||
107 | __dma_sync(addr, size, direction); | 131 | if (!plat_device_is_coherent(dev)) |
132 | __dma_sync(addr, size, direction); | ||
108 | 133 | ||
109 | return virt_to_phys(ptr); | 134 | return plat_map_dma_mem(dev, ptr, size); |
110 | } | 135 | } |
111 | 136 | ||
112 | EXPORT_SYMBOL(dma_map_single); | 137 | EXPORT_SYMBOL(dma_map_single); |
@@ -114,10 +139,11 @@ EXPORT_SYMBOL(dma_map_single); | |||
114 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 139 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
115 | enum dma_data_direction direction) | 140 | enum dma_data_direction direction) |
116 | { | 141 | { |
117 | unsigned long addr; | 142 | if (cpu_is_noncoherent_r10000(dev)) |
118 | addr = dma_addr + PAGE_OFFSET; | 143 | __dma_sync(plat_dma_addr_to_phys(dma_addr) + PAGE_OFFSET, size, |
144 | direction); | ||
119 | 145 | ||
120 | //__dma_sync(addr, size, direction); | 146 | plat_unmap_dma_mem(dma_addr); |
121 | } | 147 | } |
122 | 148 | ||
123 | EXPORT_SYMBOL(dma_unmap_single); | 149 | EXPORT_SYMBOL(dma_unmap_single); |
@@ -133,11 +159,10 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
133 | unsigned long addr; | 159 | unsigned long addr; |
134 | 160 | ||
135 | addr = (unsigned long) page_address(sg->page); | 161 | addr = (unsigned long) page_address(sg->page); |
136 | if (addr) { | 162 | if (!plat_device_is_coherent(dev) && addr) |
137 | __dma_sync(addr + sg->offset, sg->length, direction); | 163 | __dma_sync(addr + sg->offset, sg->length, direction); |
138 | sg->dma_address = (dma_addr_t)page_to_phys(sg->page) | 164 | sg->dma_address = plat_map_dma_mem_page(dev, sg->page) + |
139 | + sg->offset; | 165 | sg->offset; |
140 | } | ||
141 | } | 166 | } |
142 | 167 | ||
143 | return nents; | 168 | return nents; |
@@ -148,14 +173,16 @@ EXPORT_SYMBOL(dma_map_sg); | |||
148 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 173 | dma_addr_t dma_map_page(struct device *dev, struct page *page, |
149 | unsigned long offset, size_t size, enum dma_data_direction direction) | 174 | unsigned long offset, size_t size, enum dma_data_direction direction) |
150 | { | 175 | { |
151 | unsigned long addr; | ||
152 | |||
153 | BUG_ON(direction == DMA_NONE); | 176 | BUG_ON(direction == DMA_NONE); |
154 | 177 | ||
155 | addr = (unsigned long) page_address(page) + offset; | 178 | if (!plat_device_is_coherent(dev)) { |
156 | dma_cache_wback_inv(addr, size); | 179 | unsigned long addr; |
180 | |||
181 | addr = (unsigned long) page_address(page) + offset; | ||
182 | dma_cache_wback_inv(addr, size); | ||
183 | } | ||
157 | 184 | ||
158 | return page_to_phys(page) + offset; | 185 | return plat_map_dma_mem_page(dev, page) + offset; |
159 | } | 186 | } |
160 | 187 | ||
161 | EXPORT_SYMBOL(dma_map_page); | 188 | EXPORT_SYMBOL(dma_map_page); |
@@ -165,12 +192,14 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | |||
165 | { | 192 | { |
166 | BUG_ON(direction == DMA_NONE); | 193 | BUG_ON(direction == DMA_NONE); |
167 | 194 | ||
168 | if (direction != DMA_TO_DEVICE) { | 195 | if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) { |
169 | unsigned long addr; | 196 | unsigned long addr; |
170 | 197 | ||
171 | addr = dma_address + PAGE_OFFSET; | 198 | addr = plat_dma_addr_to_phys(dma_address); |
172 | dma_cache_wback_inv(addr, size); | 199 | dma_cache_wback_inv(addr, size); |
173 | } | 200 | } |
201 | |||
202 | plat_unmap_dma_mem(dma_address); | ||
174 | } | 203 | } |
175 | 204 | ||
176 | EXPORT_SYMBOL(dma_unmap_page); | 205 | EXPORT_SYMBOL(dma_unmap_page); |
@@ -183,13 +212,15 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | |||
183 | 212 | ||
184 | BUG_ON(direction == DMA_NONE); | 213 | BUG_ON(direction == DMA_NONE); |
185 | 214 | ||
186 | if (direction == DMA_TO_DEVICE) | ||
187 | return; | ||
188 | |||
189 | for (i = 0; i < nhwentries; i++, sg++) { | 215 | for (i = 0; i < nhwentries; i++, sg++) { |
190 | addr = (unsigned long) page_address(sg->page); | 216 | if (!plat_device_is_coherent(dev) && |
191 | if (addr) | 217 | direction != DMA_TO_DEVICE) { |
192 | __dma_sync(addr + sg->offset, sg->length, direction); | 218 | addr = (unsigned long) page_address(sg->page); |
219 | if (addr) | ||
220 | __dma_sync(addr + sg->offset, sg->length, | ||
221 | direction); | ||
222 | } | ||
223 | plat_unmap_dma_mem(sg->dma_address); | ||
193 | } | 224 | } |
194 | } | 225 | } |
195 | 226 | ||
@@ -198,12 +229,14 @@ EXPORT_SYMBOL(dma_unmap_sg); | |||
198 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 229 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
199 | size_t size, enum dma_data_direction direction) | 230 | size_t size, enum dma_data_direction direction) |
200 | { | 231 | { |
201 | unsigned long addr; | ||
202 | |||
203 | BUG_ON(direction == DMA_NONE); | 232 | BUG_ON(direction == DMA_NONE); |
204 | 233 | ||
205 | addr = dma_handle + PAGE_OFFSET; | 234 | if (cpu_is_noncoherent_r10000(dev)) { |
206 | __dma_sync(addr, size, direction); | 235 | unsigned long addr; |
236 | |||
237 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
238 | __dma_sync(addr, size, direction); | ||
239 | } | ||
207 | } | 240 | } |
208 | 241 | ||
209 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | 242 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
@@ -211,12 +244,14 @@ EXPORT_SYMBOL(dma_sync_single_for_cpu); | |||
211 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | 244 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, |
212 | size_t size, enum dma_data_direction direction) | 245 | size_t size, enum dma_data_direction direction) |
213 | { | 246 | { |
214 | unsigned long addr; | ||
215 | |||
216 | BUG_ON(direction == DMA_NONE); | 247 | BUG_ON(direction == DMA_NONE); |
217 | 248 | ||
218 | addr = dma_handle + PAGE_OFFSET; | 249 | if (cpu_is_noncoherent_r10000(dev)) { |
219 | __dma_sync(addr, size, direction); | 250 | unsigned long addr; |
251 | |||
252 | addr = plat_dma_addr_to_phys(dma_handle); | ||
253 | __dma_sync(addr, size, direction); | ||
254 | } | ||
220 | } | 255 | } |
221 | 256 | ||
222 | EXPORT_SYMBOL(dma_sync_single_for_device); | 257 | EXPORT_SYMBOL(dma_sync_single_for_device); |
@@ -224,12 +259,14 @@ EXPORT_SYMBOL(dma_sync_single_for_device); | |||
224 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | 259 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, |
225 | unsigned long offset, size_t size, enum dma_data_direction direction) | 260 | unsigned long offset, size_t size, enum dma_data_direction direction) |
226 | { | 261 | { |
227 | unsigned long addr; | ||
228 | |||
229 | BUG_ON(direction == DMA_NONE); | 262 | BUG_ON(direction == DMA_NONE); |
230 | 263 | ||
231 | addr = dma_handle + offset + PAGE_OFFSET; | 264 | if (cpu_is_noncoherent_r10000(dev)) { |
232 | __dma_sync(addr, size, direction); | 265 | unsigned long addr; |
266 | |||
267 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
268 | __dma_sync(addr + offset, size, direction); | ||
269 | } | ||
233 | } | 270 | } |
234 | 271 | ||
235 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | 272 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); |
@@ -237,12 +274,14 @@ EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | |||
237 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | 274 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, |
238 | unsigned long offset, size_t size, enum dma_data_direction direction) | 275 | unsigned long offset, size_t size, enum dma_data_direction direction) |
239 | { | 276 | { |
240 | unsigned long addr; | ||
241 | |||
242 | BUG_ON(direction == DMA_NONE); | 277 | BUG_ON(direction == DMA_NONE); |
243 | 278 | ||
244 | addr = dma_handle + offset + PAGE_OFFSET; | 279 | if (cpu_is_noncoherent_r10000(dev)) { |
245 | __dma_sync(addr, size, direction); | 280 | unsigned long addr; |
281 | |||
282 | addr = PAGE_OFFSET + plat_dma_addr_to_phys(dma_handle); | ||
283 | __dma_sync(addr + offset, size, direction); | ||
284 | } | ||
246 | } | 285 | } |
247 | 286 | ||
248 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | 287 | EXPORT_SYMBOL(dma_sync_single_range_for_device); |
@@ -255,9 +294,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | |||
255 | BUG_ON(direction == DMA_NONE); | 294 | BUG_ON(direction == DMA_NONE); |
256 | 295 | ||
257 | /* Make sure that gcc doesn't leave the empty loop body. */ | 296 | /* Make sure that gcc doesn't leave the empty loop body. */ |
258 | for (i = 0; i < nelems; i++, sg++) | 297 | for (i = 0; i < nelems; i++, sg++) { |
259 | __dma_sync((unsigned long)page_address(sg->page), | 298 | if (!plat_device_is_coherent(dev)) |
260 | sg->length, direction); | 299 | __dma_sync((unsigned long)page_address(sg->page), |
300 | sg->length, direction); | ||
301 | plat_unmap_dma_mem(sg->dma_address); | ||
302 | } | ||
261 | } | 303 | } |
262 | 304 | ||
263 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 305 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
@@ -270,9 +312,12 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele | |||
270 | BUG_ON(direction == DMA_NONE); | 312 | BUG_ON(direction == DMA_NONE); |
271 | 313 | ||
272 | /* Make sure that gcc doesn't leave the empty loop body. */ | 314 | /* Make sure that gcc doesn't leave the empty loop body. */ |
273 | for (i = 0; i < nelems; i++, sg++) | 315 | for (i = 0; i < nelems; i++, sg++) { |
274 | __dma_sync((unsigned long)page_address(sg->page), | 316 | if (!plat_device_is_coherent(dev)) |
275 | sg->length, direction); | 317 | __dma_sync((unsigned long)page_address(sg->page), |
318 | sg->length, direction); | ||
319 | plat_unmap_dma_mem(sg->dma_address); | ||
320 | } | ||
276 | } | 321 | } |
277 | 322 | ||
278 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 323 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
@@ -301,70 +346,18 @@ EXPORT_SYMBOL(dma_supported); | |||
301 | 346 | ||
302 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | 347 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) |
303 | { | 348 | { |
304 | return 1; | 349 | return plat_device_is_coherent(dev); |
305 | } | 350 | } |
306 | 351 | ||
307 | EXPORT_SYMBOL(dma_is_consistent); | 352 | EXPORT_SYMBOL(dma_is_consistent); |
308 | 353 | ||
309 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 354 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
310 | enum dma_data_direction direction) | 355 | enum dma_data_direction direction) |
311 | { | 356 | { |
312 | if (direction == DMA_NONE) | 357 | BUG_ON(direction == DMA_NONE); |
313 | return; | ||
314 | 358 | ||
315 | dma_cache_wback_inv((unsigned long)vaddr, size); | 359 | if (!plat_device_is_coherent(dev)) |
360 | dma_cache_wback_inv((unsigned long)vaddr, size); | ||
316 | } | 361 | } |
317 | 362 | ||
318 | EXPORT_SYMBOL(dma_cache_sync); | 363 | EXPORT_SYMBOL(dma_cache_sync); |
319 | |||
320 | /* The DAC routines are a PCIism.. */ | ||
321 | |||
322 | #ifdef CONFIG_PCI | ||
323 | |||
324 | #include <linux/pci.h> | ||
325 | |||
326 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
327 | struct page *page, unsigned long offset, int direction) | ||
328 | { | ||
329 | return (dma64_addr_t)page_to_phys(page) + offset; | ||
330 | } | ||
331 | |||
332 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
333 | |||
334 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
335 | dma64_addr_t dma_addr) | ||
336 | { | ||
337 | return mem_map + (dma_addr >> PAGE_SHIFT); | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
341 | |||
342 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
343 | dma64_addr_t dma_addr) | ||
344 | { | ||
345 | return dma_addr & ~PAGE_MASK; | ||
346 | } | ||
347 | |||
348 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
349 | |||
350 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
351 | dma64_addr_t dma_addr, size_t len, int direction) | ||
352 | { | ||
353 | BUG_ON(direction == PCI_DMA_NONE); | ||
354 | |||
355 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
356 | } | ||
357 | |||
358 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
359 | |||
360 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
361 | dma64_addr_t dma_addr, size_t len, int direction) | ||
362 | { | ||
363 | BUG_ON(direction == PCI_DMA_NONE); | ||
364 | |||
365 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
366 | } | ||
367 | |||
368 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
369 | |||
370 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/mips/mm/dma-ip27.c b/arch/mips/mm/dma-ip27.c deleted file mode 100644 index f088344db465..000000000000 --- a/arch/mips/mm/dma-ip27.c +++ /dev/null | |||
@@ -1,257 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/pci.h> | ||
15 | |||
16 | #include <asm/cache.h> | ||
17 | #include <asm/pci/bridge.h> | ||
18 | |||
19 | #define pdev_to_baddr(pdev, addr) \ | ||
20 | (BRIDGE_CONTROLLER(pdev->bus)->baddr + (addr)) | ||
21 | #define dev_to_baddr(dev, addr) \ | ||
22 | pdev_to_baddr(to_pci_dev(dev), (addr)) | ||
23 | |||
24 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
25 | dma_addr_t * dma_handle, gfp_t gfp) | ||
26 | { | ||
27 | void *ret; | ||
28 | |||
29 | /* ignore region specifiers */ | ||
30 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
31 | |||
32 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
33 | gfp |= GFP_DMA; | ||
34 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
35 | |||
36 | if (ret != NULL) { | ||
37 | memset(ret, 0, size); | ||
38 | *dma_handle = dev_to_baddr(dev, virt_to_phys(ret)); | ||
39 | } | ||
40 | |||
41 | return ret; | ||
42 | } | ||
43 | |||
44 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
45 | |||
46 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
47 | dma_addr_t * dma_handle, gfp_t gfp) | ||
48 | __attribute__((alias("dma_alloc_noncoherent"))); | ||
49 | |||
50 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
51 | |||
52 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
53 | dma_addr_t dma_handle) | ||
54 | { | ||
55 | unsigned long addr = (unsigned long) vaddr; | ||
56 | |||
57 | free_pages(addr, get_order(size)); | ||
58 | } | ||
59 | |||
60 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
61 | |||
62 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
63 | dma_addr_t dma_handle) __attribute__((alias("dma_free_noncoherent"))); | ||
64 | |||
65 | EXPORT_SYMBOL(dma_free_coherent); | ||
66 | |||
67 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
68 | enum dma_data_direction direction) | ||
69 | { | ||
70 | BUG_ON(direction == DMA_NONE); | ||
71 | |||
72 | return dev_to_baddr(dev, __pa(ptr)); | ||
73 | } | ||
74 | |||
75 | EXPORT_SYMBOL(dma_map_single); | ||
76 | |||
77 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
78 | enum dma_data_direction direction) | ||
79 | { | ||
80 | BUG_ON(direction == DMA_NONE); | ||
81 | } | ||
82 | |||
83 | EXPORT_SYMBOL(dma_unmap_single); | ||
84 | |||
85 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
86 | enum dma_data_direction direction) | ||
87 | { | ||
88 | int i; | ||
89 | |||
90 | BUG_ON(direction == DMA_NONE); | ||
91 | |||
92 | for (i = 0; i < nents; i++, sg++) { | ||
93 | sg->dma_address = (dma_addr_t) dev_to_baddr(dev, | ||
94 | page_to_phys(sg->page) + sg->offset); | ||
95 | } | ||
96 | |||
97 | return nents; | ||
98 | } | ||
99 | |||
100 | EXPORT_SYMBOL(dma_map_sg); | ||
101 | |||
102 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
103 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
104 | { | ||
105 | BUG_ON(direction == DMA_NONE); | ||
106 | |||
107 | return dev_to_baddr(dev, page_to_phys(page) + offset); | ||
108 | } | ||
109 | |||
110 | EXPORT_SYMBOL(dma_map_page); | ||
111 | |||
112 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
113 | enum dma_data_direction direction) | ||
114 | { | ||
115 | BUG_ON(direction == DMA_NONE); | ||
116 | } | ||
117 | |||
118 | EXPORT_SYMBOL(dma_unmap_page); | ||
119 | |||
120 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
121 | enum dma_data_direction direction) | ||
122 | { | ||
123 | BUG_ON(direction == DMA_NONE); | ||
124 | } | ||
125 | |||
126 | EXPORT_SYMBOL(dma_unmap_sg); | ||
127 | |||
128 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
129 | enum dma_data_direction direction) | ||
130 | { | ||
131 | BUG_ON(direction == DMA_NONE); | ||
132 | } | ||
133 | |||
134 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
135 | |||
136 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, | ||
137 | enum dma_data_direction direction) | ||
138 | { | ||
139 | BUG_ON(direction == DMA_NONE); | ||
140 | } | ||
141 | |||
142 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
143 | |||
144 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
145 | unsigned long offset, size_t size, | ||
146 | enum dma_data_direction direction) | ||
147 | { | ||
148 | BUG_ON(direction == DMA_NONE); | ||
149 | } | ||
150 | |||
151 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
152 | |||
153 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
154 | unsigned long offset, size_t size, | ||
155 | enum dma_data_direction direction) | ||
156 | { | ||
157 | BUG_ON(direction == DMA_NONE); | ||
158 | } | ||
159 | |||
160 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
161 | |||
162 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
163 | enum dma_data_direction direction) | ||
164 | { | ||
165 | BUG_ON(direction == DMA_NONE); | ||
166 | } | ||
167 | |||
168 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
169 | |||
170 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
171 | enum dma_data_direction direction) | ||
172 | { | ||
173 | BUG_ON(direction == DMA_NONE); | ||
174 | } | ||
175 | |||
176 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
177 | |||
178 | int dma_mapping_error(dma_addr_t dma_addr) | ||
179 | { | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | EXPORT_SYMBOL(dma_mapping_error); | ||
184 | |||
185 | int dma_supported(struct device *dev, u64 mask) | ||
186 | { | ||
187 | /* | ||
188 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
189 | * so we can't guarantee allocations that must be | ||
190 | * within a tighter range than GFP_DMA.. | ||
191 | */ | ||
192 | if (mask < 0x00ffffff) | ||
193 | return 0; | ||
194 | |||
195 | return 1; | ||
196 | } | ||
197 | |||
198 | EXPORT_SYMBOL(dma_supported); | ||
199 | |||
200 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
201 | { | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | EXPORT_SYMBOL(dma_is_consistent); | ||
206 | |||
207 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
208 | enum dma_data_direction direction) | ||
209 | { | ||
210 | BUG_ON(direction == DMA_NONE); | ||
211 | } | ||
212 | |||
213 | EXPORT_SYMBOL(dma_cache_sync); | ||
214 | |||
215 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
216 | struct page *page, unsigned long offset, int direction) | ||
217 | { | ||
218 | dma64_addr_t addr = page_to_phys(page) + offset; | ||
219 | |||
220 | return (dma64_addr_t) pdev_to_baddr(pdev, addr); | ||
221 | } | ||
222 | |||
223 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
224 | |||
225 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
226 | dma64_addr_t dma_addr) | ||
227 | { | ||
228 | struct bridge_controller *bc = BRIDGE_CONTROLLER(pdev->bus); | ||
229 | |||
230 | return pfn_to_page((dma_addr - bc->baddr) >> PAGE_SHIFT); | ||
231 | } | ||
232 | |||
233 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
234 | |||
235 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
236 | dma64_addr_t dma_addr) | ||
237 | { | ||
238 | return dma_addr & ~PAGE_MASK; | ||
239 | } | ||
240 | |||
241 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
242 | |||
243 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
244 | dma64_addr_t dma_addr, size_t len, int direction) | ||
245 | { | ||
246 | BUG_ON(direction == PCI_DMA_NONE); | ||
247 | } | ||
248 | |||
249 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
250 | |||
251 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
252 | dma64_addr_t dma_addr, size_t len, int direction) | ||
253 | { | ||
254 | BUG_ON(direction == PCI_DMA_NONE); | ||
255 | } | ||
256 | |||
257 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||
diff --git a/arch/mips/mm/dma-ip32.c b/arch/mips/mm/dma-ip32.c deleted file mode 100644 index b42b6f7456e6..000000000000 --- a/arch/mips/mm/dma-ip32.c +++ /dev/null | |||
@@ -1,383 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> | ||
8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> | ||
9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
10 | * IP32 changes by Ilya. | ||
11 | */ | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | |||
18 | #include <asm/cache.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/ip32/crime.h> | ||
21 | |||
22 | /* | ||
23 | * Warning on the terminology - Linux calls an uncached area coherent; | ||
24 | * MIPS terminology calls memory areas with hardware maintained coherency | ||
25 | * coherent. | ||
26 | */ | ||
27 | |||
28 | /* | ||
29 | * Few notes. | ||
30 | * 1. CPU sees memory as two chunks: 0-256M@0x0, and the rest @0x40000000+256M | ||
31 | * 2. PCI sees memory as one big chunk @0x0 (or we could use 0x40000000 for native-endian) | ||
32 | * 3. All other devices see memory as one big chunk at 0x40000000 | ||
33 | * 4. Non-PCI devices will pass NULL as struct device* | ||
34 | * Thus we translate differently, depending on device. | ||
35 | */ | ||
36 | |||
37 | #define RAM_OFFSET_MASK 0x3fffffff | ||
38 | |||
39 | void *dma_alloc_noncoherent(struct device *dev, size_t size, | ||
40 | dma_addr_t * dma_handle, gfp_t gfp) | ||
41 | { | ||
42 | void *ret; | ||
43 | /* ignore region specifiers */ | ||
44 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); | ||
45 | |||
46 | if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) | ||
47 | gfp |= GFP_DMA; | ||
48 | ret = (void *) __get_free_pages(gfp, get_order(size)); | ||
49 | |||
50 | if (ret != NULL) { | ||
51 | unsigned long addr = virt_to_phys(ret)&RAM_OFFSET_MASK; | ||
52 | memset(ret, 0, size); | ||
53 | if(dev==NULL) | ||
54 | addr+= CRIME_HI_MEM_BASE; | ||
55 | *dma_handle = addr; | ||
56 | } | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | EXPORT_SYMBOL(dma_alloc_noncoherent); | ||
62 | |||
63 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
64 | dma_addr_t * dma_handle, gfp_t gfp) | ||
65 | { | ||
66 | void *ret; | ||
67 | |||
68 | ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp); | ||
69 | if (ret) { | ||
70 | dma_cache_wback_inv((unsigned long) ret, size); | ||
71 | ret = UNCAC_ADDR(ret); | ||
72 | } | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
78 | |||
79 | void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, | ||
80 | dma_addr_t dma_handle) | ||
81 | { | ||
82 | free_pages((unsigned long) vaddr, get_order(size)); | ||
83 | } | ||
84 | |||
85 | EXPORT_SYMBOL(dma_free_noncoherent); | ||
86 | |||
87 | void dma_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
88 | dma_addr_t dma_handle) | ||
89 | { | ||
90 | unsigned long addr = (unsigned long) vaddr; | ||
91 | |||
92 | addr = CAC_ADDR(addr); | ||
93 | free_pages(addr, get_order(size)); | ||
94 | } | ||
95 | |||
96 | EXPORT_SYMBOL(dma_free_coherent); | ||
97 | |||
98 | static inline void __dma_sync(unsigned long addr, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | switch (direction) { | ||
102 | case DMA_TO_DEVICE: | ||
103 | dma_cache_wback(addr, size); | ||
104 | break; | ||
105 | |||
106 | case DMA_FROM_DEVICE: | ||
107 | dma_cache_inv(addr, size); | ||
108 | break; | ||
109 | |||
110 | case DMA_BIDIRECTIONAL: | ||
111 | dma_cache_wback_inv(addr, size); | ||
112 | break; | ||
113 | |||
114 | default: | ||
115 | BUG(); | ||
116 | } | ||
117 | } | ||
118 | |||
119 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
120 | enum dma_data_direction direction) | ||
121 | { | ||
122 | unsigned long addr = (unsigned long) ptr; | ||
123 | |||
124 | switch (direction) { | ||
125 | case DMA_TO_DEVICE: | ||
126 | dma_cache_wback(addr, size); | ||
127 | break; | ||
128 | |||
129 | case DMA_FROM_DEVICE: | ||
130 | dma_cache_inv(addr, size); | ||
131 | break; | ||
132 | |||
133 | case DMA_BIDIRECTIONAL: | ||
134 | dma_cache_wback_inv(addr, size); | ||
135 | break; | ||
136 | |||
137 | default: | ||
138 | BUG(); | ||
139 | } | ||
140 | |||
141 | addr = virt_to_phys(ptr)&RAM_OFFSET_MASK; | ||
142 | if(dev == NULL) | ||
143 | addr+=CRIME_HI_MEM_BASE; | ||
144 | return (dma_addr_t)addr; | ||
145 | } | ||
146 | |||
147 | EXPORT_SYMBOL(dma_map_single); | ||
148 | |||
149 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
150 | enum dma_data_direction direction) | ||
151 | { | ||
152 | switch (direction) { | ||
153 | case DMA_TO_DEVICE: | ||
154 | break; | ||
155 | |||
156 | case DMA_FROM_DEVICE: | ||
157 | break; | ||
158 | |||
159 | case DMA_BIDIRECTIONAL: | ||
160 | break; | ||
161 | |||
162 | default: | ||
163 | BUG(); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | EXPORT_SYMBOL(dma_unmap_single); | ||
168 | |||
169 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
170 | enum dma_data_direction direction) | ||
171 | { | ||
172 | int i; | ||
173 | |||
174 | BUG_ON(direction == DMA_NONE); | ||
175 | |||
176 | for (i = 0; i < nents; i++, sg++) { | ||
177 | unsigned long addr; | ||
178 | |||
179 | addr = (unsigned long) page_address(sg->page)+sg->offset; | ||
180 | if (addr) | ||
181 | __dma_sync(addr, sg->length, direction); | ||
182 | addr = __pa(addr)&RAM_OFFSET_MASK; | ||
183 | if(dev == NULL) | ||
184 | addr += CRIME_HI_MEM_BASE; | ||
185 | sg->dma_address = (dma_addr_t)addr; | ||
186 | } | ||
187 | |||
188 | return nents; | ||
189 | } | ||
190 | |||
191 | EXPORT_SYMBOL(dma_map_sg); | ||
192 | |||
193 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
194 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
195 | { | ||
196 | unsigned long addr; | ||
197 | |||
198 | BUG_ON(direction == DMA_NONE); | ||
199 | |||
200 | addr = (unsigned long) page_address(page) + offset; | ||
201 | dma_cache_wback_inv(addr, size); | ||
202 | addr = __pa(addr)&RAM_OFFSET_MASK; | ||
203 | if(dev == NULL) | ||
204 | addr += CRIME_HI_MEM_BASE; | ||
205 | |||
206 | return (dma_addr_t)addr; | ||
207 | } | ||
208 | |||
209 | EXPORT_SYMBOL(dma_map_page); | ||
210 | |||
211 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
212 | enum dma_data_direction direction) | ||
213 | { | ||
214 | BUG_ON(direction == DMA_NONE); | ||
215 | |||
216 | if (direction != DMA_TO_DEVICE) { | ||
217 | unsigned long addr; | ||
218 | |||
219 | dma_address&=RAM_OFFSET_MASK; | ||
220 | addr = dma_address + PAGE_OFFSET; | ||
221 | if(dma_address>=256*1024*1024) | ||
222 | addr+=CRIME_HI_MEM_BASE; | ||
223 | dma_cache_wback_inv(addr, size); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | EXPORT_SYMBOL(dma_unmap_page); | ||
228 | |||
229 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
230 | enum dma_data_direction direction) | ||
231 | { | ||
232 | unsigned long addr; | ||
233 | int i; | ||
234 | |||
235 | BUG_ON(direction == DMA_NONE); | ||
236 | |||
237 | if (direction == DMA_TO_DEVICE) | ||
238 | return; | ||
239 | |||
240 | for (i = 0; i < nhwentries; i++, sg++) { | ||
241 | addr = (unsigned long) page_address(sg->page); | ||
242 | if (!addr) | ||
243 | continue; | ||
244 | dma_cache_wback_inv(addr + sg->offset, sg->length); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | EXPORT_SYMBOL(dma_unmap_sg); | ||
249 | |||
250 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
251 | size_t size, enum dma_data_direction direction) | ||
252 | { | ||
253 | unsigned long addr; | ||
254 | |||
255 | BUG_ON(direction == DMA_NONE); | ||
256 | |||
257 | dma_handle&=RAM_OFFSET_MASK; | ||
258 | addr = dma_handle + PAGE_OFFSET; | ||
259 | if(dma_handle>=256*1024*1024) | ||
260 | addr+=CRIME_HI_MEM_BASE; | ||
261 | __dma_sync(addr, size, direction); | ||
262 | } | ||
263 | |||
264 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
265 | |||
266 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
267 | size_t size, enum dma_data_direction direction) | ||
268 | { | ||
269 | unsigned long addr; | ||
270 | |||
271 | BUG_ON(direction == DMA_NONE); | ||
272 | |||
273 | dma_handle&=RAM_OFFSET_MASK; | ||
274 | addr = dma_handle + PAGE_OFFSET; | ||
275 | if(dma_handle>=256*1024*1024) | ||
276 | addr+=CRIME_HI_MEM_BASE; | ||
277 | __dma_sync(addr, size, direction); | ||
278 | } | ||
279 | |||
280 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
281 | |||
282 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
283 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
284 | { | ||
285 | unsigned long addr; | ||
286 | |||
287 | BUG_ON(direction == DMA_NONE); | ||
288 | |||
289 | dma_handle&=RAM_OFFSET_MASK; | ||
290 | addr = dma_handle + offset + PAGE_OFFSET; | ||
291 | if(dma_handle>=256*1024*1024) | ||
292 | addr+=CRIME_HI_MEM_BASE; | ||
293 | __dma_sync(addr, size, direction); | ||
294 | } | ||
295 | |||
296 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
297 | |||
298 | void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, | ||
299 | unsigned long offset, size_t size, enum dma_data_direction direction) | ||
300 | { | ||
301 | unsigned long addr; | ||
302 | |||
303 | BUG_ON(direction == DMA_NONE); | ||
304 | |||
305 | dma_handle&=RAM_OFFSET_MASK; | ||
306 | addr = dma_handle + offset + PAGE_OFFSET; | ||
307 | if(dma_handle>=256*1024*1024) | ||
308 | addr+=CRIME_HI_MEM_BASE; | ||
309 | __dma_sync(addr, size, direction); | ||
310 | } | ||
311 | |||
312 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
313 | |||
314 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
315 | enum dma_data_direction direction) | ||
316 | { | ||
317 | int i; | ||
318 | |||
319 | BUG_ON(direction == DMA_NONE); | ||
320 | |||
321 | /* Make sure that gcc doesn't leave the empty loop body. */ | ||
322 | for (i = 0; i < nelems; i++, sg++) | ||
323 | __dma_sync((unsigned long)page_address(sg->page), | ||
324 | sg->length, direction); | ||
325 | } | ||
326 | |||
327 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
328 | |||
329 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, | ||
330 | enum dma_data_direction direction) | ||
331 | { | ||
332 | int i; | ||
333 | |||
334 | BUG_ON(direction == DMA_NONE); | ||
335 | |||
336 | /* Make sure that gcc doesn't leave the empty loop body. */ | ||
337 | for (i = 0; i < nelems; i++, sg++) | ||
338 | __dma_sync((unsigned long)page_address(sg->page), | ||
339 | sg->length, direction); | ||
340 | } | ||
341 | |||
342 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
343 | |||
344 | int dma_mapping_error(dma_addr_t dma_addr) | ||
345 | { | ||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | EXPORT_SYMBOL(dma_mapping_error); | ||
350 | |||
351 | int dma_supported(struct device *dev, u64 mask) | ||
352 | { | ||
353 | /* | ||
354 | * we fall back to GFP_DMA when the mask isn't all 1s, | ||
355 | * so we can't guarantee allocations that must be | ||
356 | * within a tighter range than GFP_DMA.. | ||
357 | */ | ||
358 | if (mask < 0x00ffffff) | ||
359 | return 0; | ||
360 | |||
361 | return 1; | ||
362 | } | ||
363 | |||
364 | EXPORT_SYMBOL(dma_supported); | ||
365 | |||
366 | int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) | ||
367 | { | ||
368 | return 1; | ||
369 | } | ||
370 | |||
371 | EXPORT_SYMBOL(dma_is_consistent); | ||
372 | |||
373 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | ||
374 | enum dma_data_direction direction) | ||
375 | { | ||
376 | if (direction == DMA_NONE) | ||
377 | return; | ||
378 | |||
379 | dma_cache_wback_inv((unsigned long)vaddr, size); | ||
380 | } | ||
381 | |||
382 | EXPORT_SYMBOL(dma_cache_sync); | ||
383 | |||
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile index 82b20c28bef8..bf85995ca042 100644 --- a/arch/mips/pci/Makefile +++ b/arch/mips/pci/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the PCI specific kernel interface routines under Linux. | 2 | # Makefile for the PCI specific kernel interface routines under Linux. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += pci.o | 5 | obj-y += pci.o pci-dac.o |
6 | 6 | ||
7 | # | 7 | # |
8 | # PCI bus host bridge specific code | 8 | # PCI bus host bridge specific code |
diff --git a/arch/mips/pci/pci-dac.c b/arch/mips/pci/pci-dac.c new file mode 100644 index 000000000000..0f0ea1b7d4dd --- /dev/null +++ b/arch/mips/pci/pci-dac.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> | ||
7 | * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/string.h> | ||
16 | |||
17 | #include <asm/cache.h> | ||
18 | #include <asm/io.h> | ||
19 | |||
20 | #include <dma-coherence.h> | ||
21 | |||
22 | #include <linux/pci.h> | ||
23 | |||
24 | dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev, | ||
25 | struct page *page, unsigned long offset, int direction) | ||
26 | { | ||
27 | struct device *dev = &pdev->dev; | ||
28 | |||
29 | BUG_ON(direction == DMA_NONE); | ||
30 | |||
31 | if (!plat_device_is_coherent(dev)) { | ||
32 | unsigned long addr; | ||
33 | |||
34 | addr = (unsigned long) page_address(page) + offset; | ||
35 | dma_cache_wback_inv(addr, PAGE_SIZE); | ||
36 | } | ||
37 | |||
38 | return plat_map_dma_mem_page(dev, page) + offset; | ||
39 | } | ||
40 | |||
41 | EXPORT_SYMBOL(pci_dac_page_to_dma); | ||
42 | |||
43 | struct page *pci_dac_dma_to_page(struct pci_dev *pdev, | ||
44 | dma64_addr_t dma_addr) | ||
45 | { | ||
46 | return pfn_to_page(plat_dma_addr_to_phys(dma_addr) >> PAGE_SHIFT); | ||
47 | } | ||
48 | |||
49 | EXPORT_SYMBOL(pci_dac_dma_to_page); | ||
50 | |||
51 | unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev, | ||
52 | dma64_addr_t dma_addr) | ||
53 | { | ||
54 | return dma_addr & ~PAGE_MASK; | ||
55 | } | ||
56 | |||
57 | EXPORT_SYMBOL(pci_dac_dma_to_offset); | ||
58 | |||
59 | void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, | ||
60 | dma64_addr_t dma_addr, size_t len, int direction) | ||
61 | { | ||
62 | BUG_ON(direction == PCI_DMA_NONE); | ||
63 | |||
64 | if (!plat_device_is_coherent(&pdev->dev)) | ||
65 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
66 | } | ||
67 | |||
68 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu); | ||
69 | |||
70 | void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, | ||
71 | dma64_addr_t dma_addr, size_t len, int direction) | ||
72 | { | ||
73 | BUG_ON(direction == PCI_DMA_NONE); | ||
74 | |||
75 | if (!plat_device_is_coherent(&pdev->dev)) | ||
76 | dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len); | ||
77 | } | ||
78 | |||
79 | EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device); | ||